repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/json.rs
libs/http-utils/src/json.rs
use anyhow::Context; use bytes::Buf; use hyper::{Body, Request, Response, StatusCode, header}; use serde::{Deserialize, Serialize}; use super::error::ApiError; /// Parse a json request body and deserialize it to the type `T`. pub async fn json_request<T: for<'de> Deserialize<'de>>( request: &mut Request<Body>, ) -> Result<T, ApiError> { let body = hyper::body::aggregate(request.body_mut()) .await .context("Failed to read request body") .map_err(ApiError::BadRequest)?; if body.remaining() == 0 { return Err(ApiError::BadRequest(anyhow::anyhow!( "missing request body" ))); } let mut deser = serde_json::de::Deserializer::from_reader(body.reader()); serde_path_to_error::deserialize(&mut deser) // intentionally stringify because the debug version is not helpful in python logs .map_err(|e| anyhow::anyhow!("Failed to parse json request: {e}")) .map_err(ApiError::BadRequest) } /// Parse a json request body and deserialize it to the type `T`. If the body is empty, return `T::default`. pub async fn json_request_maybe<T: for<'de> Deserialize<'de> + Default>( request: &mut Request<Body>, ) -> Result<T, ApiError> { let body = hyper::body::aggregate(request.body_mut()) .await .context("Failed to read request body") .map_err(ApiError::BadRequest)?; if body.remaining() == 0 { return Ok(T::default()); } let mut deser = serde_json::de::Deserializer::from_reader(body.reader()); serde_path_to_error::deserialize(&mut deser) // intentionally stringify because the debug version is not helpful in python logs .map_err(|e| anyhow::anyhow!("Failed to parse json request: {e}")) .map_err(ApiError::BadRequest) } pub fn json_response<T: Serialize>( status: StatusCode, data: T, ) -> Result<Response<Body>, ApiError> { let json = serde_json::to_string(&data) .context("Failed to serialize JSON response") .map_err(ApiError::InternalServerError)?; let response = Response::builder() .status(status) .header(header::CONTENT_TYPE, "application/json") .body(Body::from(json)) .map_err(|e| ApiError::InternalServerError(e.into()))?; Ok(response) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/error.rs
libs/http-utils/src/error.rs
use std::borrow::Cow; use std::error::Error as StdError; use hyper::{Body, Response, StatusCode, header}; use serde::{Deserialize, Serialize}; use thiserror::Error; use tracing::{error, info, warn}; use utils::auth::AuthError; #[derive(Debug, Error)] pub enum ApiError { #[error("Bad request: {0:#?}")] BadRequest(anyhow::Error), #[error("Forbidden: {0}")] Forbidden(String), #[error("Unauthorized: {0}")] Unauthorized(String), #[error("NotFound: {0}")] NotFound(Box<dyn StdError + Send + Sync + 'static>), #[error("Conflict: {0}")] Conflict(String), #[error("Precondition failed: {0}")] PreconditionFailed(Box<str>), #[error("Resource temporarily unavailable: {0}")] ResourceUnavailable(Cow<'static, str>), #[error("Too many requests: {0}")] TooManyRequests(Cow<'static, str>), #[error("Shutting down")] ShuttingDown, #[error("Timeout")] Timeout(Cow<'static, str>), #[error("Request cancelled")] Cancelled, #[error(transparent)] InternalServerError(anyhow::Error), } impl ApiError { pub fn into_response(self) -> Response<Body> { match self { ApiError::BadRequest(err) => HttpErrorBody::response_from_msg_and_status( format!("{err:#?}"), // use debug printing so that we give the cause StatusCode::BAD_REQUEST, ), ApiError::Forbidden(_) => { HttpErrorBody::response_from_msg_and_status(self.to_string(), StatusCode::FORBIDDEN) } ApiError::Unauthorized(_) => HttpErrorBody::response_from_msg_and_status( self.to_string(), StatusCode::UNAUTHORIZED, ), ApiError::NotFound(_) => { HttpErrorBody::response_from_msg_and_status(self.to_string(), StatusCode::NOT_FOUND) } ApiError::Conflict(_) => { HttpErrorBody::response_from_msg_and_status(self.to_string(), StatusCode::CONFLICT) } ApiError::PreconditionFailed(_) => HttpErrorBody::response_from_msg_and_status( self.to_string(), StatusCode::PRECONDITION_FAILED, ), ApiError::ShuttingDown => HttpErrorBody::response_from_msg_and_status( "Shutting down".to_string(), StatusCode::SERVICE_UNAVAILABLE, ), ApiError::ResourceUnavailable(err) => HttpErrorBody::response_from_msg_and_status( err.to_string(), StatusCode::SERVICE_UNAVAILABLE, ), ApiError::TooManyRequests(err) => HttpErrorBody::response_from_msg_and_status( err.to_string(), StatusCode::TOO_MANY_REQUESTS, ), ApiError::Timeout(err) => HttpErrorBody::response_from_msg_and_status( err.to_string(), StatusCode::REQUEST_TIMEOUT, ), ApiError::Cancelled => HttpErrorBody::response_from_msg_and_status( self.to_string(), StatusCode::INTERNAL_SERVER_ERROR, ), ApiError::InternalServerError(err) => HttpErrorBody::response_from_msg_and_status( format!("{err:#}"), // use alternative formatting so that we give the cause without backtrace StatusCode::INTERNAL_SERVER_ERROR, ), } } } impl From<AuthError> for ApiError { fn from(_value: AuthError) -> Self { // Don't pass on the value of the AuthError as a precautionary measure. // Being intentionally vague in public error communication hurts debugability // but it is more secure. ApiError::Forbidden("JWT authentication error".to_string()) } } #[derive(Serialize, Deserialize)] pub struct HttpErrorBody { pub msg: String, } impl HttpErrorBody { pub fn from_msg(msg: String) -> Self { HttpErrorBody { msg } } pub fn response_from_msg_and_status(msg: String, status: StatusCode) -> Response<Body> { HttpErrorBody { msg }.to_response(status) } pub fn to_response(&self, status: StatusCode) -> Response<Body> { Response::builder() .status(status) .header(header::CONTENT_TYPE, "application/json") // we do not have nested maps with non string keys so serialization shouldn't fail .body(Body::from(serde_json::to_string(self).unwrap())) .unwrap() } } pub async fn route_error_handler(err: routerify::RouteError) -> Response<Body> { match err.downcast::<ApiError>() { Ok(api_error) => api_error_handler(*api_error), Err(other_error) => { // We expect all the request handlers to return an ApiError, so this should // not be reached. But just in case. error!("Error processing HTTP request: {other_error:?}"); HttpErrorBody::response_from_msg_and_status( other_error.to_string(), StatusCode::INTERNAL_SERVER_ERROR, ) } } } pub fn api_error_handler(api_error: ApiError) -> Response<Body> { // Print a stack trace for Internal Server errors match api_error { ApiError::Forbidden(_) | ApiError::Unauthorized(_) => { warn!("Error processing HTTP request: {api_error:#}") } ApiError::ResourceUnavailable(_) => info!("Error processing HTTP request: {api_error:#}"), ApiError::NotFound(_) => info!("Error processing HTTP request: {api_error:#}"), ApiError::InternalServerError(_) => error!("Error processing HTTP request: {api_error:?}"), ApiError::ShuttingDown => info!("Shut down while processing HTTP request"), ApiError::Timeout(_) => info!("Timeout while processing HTTP request: {api_error:#}"), ApiError::Cancelled => info!("Request cancelled while processing HTTP request"), _ => info!("Error processing HTTP request: {api_error:#}"), } api_error.into_response() }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/tls_certs.rs
libs/http-utils/src/tls_certs.rs
use std::{sync::Arc, time::Duration}; use anyhow::Context; use arc_swap::ArcSwap; use camino::Utf8Path; use metrics::{IntCounterVec, UIntGaugeVec, register_int_counter_vec, register_uint_gauge_vec}; use once_cell::sync::Lazy; use rustls::{ pki_types::{CertificateDer, PrivateKeyDer, UnixTime}, server::{ClientHello, ResolvesServerCert}, sign::CertifiedKey, }; use x509_cert::der::Reader; pub async fn load_cert_chain(filename: &Utf8Path) -> anyhow::Result<Vec<CertificateDer<'static>>> { let cert_data = tokio::fs::read(filename) .await .context(format!("failed reading certificate file {filename:?}"))?; let mut reader = std::io::Cursor::new(&cert_data); let cert_chain = rustls_pemfile::certs(&mut reader) .collect::<Result<Vec<_>, _>>() .context(format!("failed parsing certificate from file {filename:?}"))?; Ok(cert_chain) } pub async fn load_private_key(filename: &Utf8Path) -> anyhow::Result<PrivateKeyDer<'static>> { let key_data = tokio::fs::read(filename) .await .context(format!("failed reading private key file {filename:?}"))?; let mut reader = std::io::Cursor::new(&key_data); let key = rustls_pemfile::private_key(&mut reader) .context(format!("failed parsing private key from file {filename:?}"))?; key.ok_or(anyhow::anyhow!( "no private key found in {}", filename.as_str(), )) } pub async fn load_certified_key( key_filename: &Utf8Path, cert_filename: &Utf8Path, ) -> anyhow::Result<CertifiedKey> { let cert_chain = load_cert_chain(cert_filename).await?; let key = load_private_key(key_filename).await?; let key = rustls::crypto::ring::default_provider() .key_provider .load_private_key(key)?; let certified_key = CertifiedKey::new(cert_chain, key); certified_key.keys_match()?; Ok(certified_key) } /// rustls's CertifiedKey with extra parsed fields used for metrics. struct ParsedCertifiedKey { certified_key: CertifiedKey, expiration_time: UnixTime, } /// Parse expiration time from an X509 certificate. fn parse_expiration_time(cert: &CertificateDer<'_>) -> anyhow::Result<UnixTime> { let parsed_cert = x509_cert::der::SliceReader::new(cert) .context("Failed to parse cerficiate")? .decode::<x509_cert::Certificate>() .context("Failed to parse cerficiate")?; Ok(UnixTime::since_unix_epoch( parsed_cert .tbs_certificate .validity .not_after .to_unix_duration(), )) } async fn load_and_parse_certified_key( key_filename: &Utf8Path, cert_filename: &Utf8Path, ) -> anyhow::Result<ParsedCertifiedKey> { let certified_key = load_certified_key(key_filename, cert_filename).await?; let expiration_time = parse_expiration_time(certified_key.end_entity_cert()?)?; Ok(ParsedCertifiedKey { certified_key, expiration_time, }) } static CERT_EXPIRATION_TIME: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "tls_certs_expiration_time_seconds", "Expiration time of the loaded certificate since unix epoch in seconds", &["resolver_name"] ) .expect("failed to define a metric") }); static CERT_RELOAD_STARTED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "tls_certs_reload_started_total", "Number of certificate reload loop iterations started", &["resolver_name"] ) .expect("failed to define a metric") }); static CERT_RELOAD_UPDATED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "tls_certs_reload_updated_total", "Number of times the certificate was updated to the new one", &["resolver_name"] ) .expect("failed to define a metric") }); static CERT_RELOAD_FAILED_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "tls_certs_reload_failed_total", "Number of times the certificate reload failed", &["resolver_name"] ) .expect("failed to define a metric") }); /// Implementation of [`rustls::server::ResolvesServerCert`] which reloads certificates from /// the disk periodically. #[derive(Debug)] pub struct ReloadingCertificateResolver { certified_key: ArcSwap<CertifiedKey>, } impl ReloadingCertificateResolver { /// Creates a new Resolver by loading certificate and private key from FS and /// creating tokio::task to reload them with provided reload_period. /// resolver_name is used as metric's label. pub async fn new( resolver_name: &str, key_filename: &Utf8Path, cert_filename: &Utf8Path, reload_period: Duration, ) -> anyhow::Result<Arc<Self>> { // Create metrics for current resolver. let cert_expiration_time = CERT_EXPIRATION_TIME.with_label_values(&[resolver_name]); let cert_reload_started_counter = CERT_RELOAD_STARTED_COUNTER.with_label_values(&[resolver_name]); let cert_reload_updated_counter = CERT_RELOAD_UPDATED_COUNTER.with_label_values(&[resolver_name]); let cert_reload_failed_counter = CERT_RELOAD_FAILED_COUNTER.with_label_values(&[resolver_name]); let parsed_key = load_and_parse_certified_key(key_filename, cert_filename).await?; let this = Arc::new(Self { certified_key: ArcSwap::from_pointee(parsed_key.certified_key), }); cert_expiration_time.set(parsed_key.expiration_time.as_secs()); tokio::spawn({ let weak_this = Arc::downgrade(&this); let key_filename = key_filename.to_owned(); let cert_filename = cert_filename.to_owned(); async move { let start = tokio::time::Instant::now() + reload_period; let mut interval = tokio::time::interval_at(start, reload_period); let mut last_reload_failed = false; loop { interval.tick().await; let this = match weak_this.upgrade() { Some(this) => this, None => break, // Resolver has been destroyed, exit. }; cert_reload_started_counter.inc(); match load_and_parse_certified_key(&key_filename, &cert_filename).await { Ok(parsed_key) => { if parsed_key.certified_key.cert == this.certified_key.load().cert { tracing::debug!("Certificate has not changed since last reloading"); } else { tracing::info!("Certificate has been reloaded"); this.certified_key.store(Arc::new(parsed_key.certified_key)); cert_expiration_time.set(parsed_key.expiration_time.as_secs()); cert_reload_updated_counter.inc(); } last_reload_failed = false; } Err(err) => { cert_reload_failed_counter.inc(); // Note: Reloading certs may fail if it conflicts with the script updating // the files at the same time. Warn only if the error is persistent. if last_reload_failed { tracing::warn!("Error reloading certificate: {err:#}"); } else { tracing::info!("Error reloading certificate: {err:#}"); } last_reload_failed = true; } } } } }); Ok(this) } } impl ResolvesServerCert for ReloadingCertificateResolver { fn resolve(&self, _client_hello: ClientHello<'_>) -> Option<Arc<CertifiedKey>> { Some(self.certified_key.load_full()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/server.rs
libs/http-utils/src/server.rs
use std::{error::Error, sync::Arc}; use futures::StreamExt; use futures::stream::FuturesUnordered; use hyper0::Body; use hyper0::server::conn::Http; use metrics::{IntCounterVec, register_int_counter_vec}; use once_cell::sync::Lazy; use routerify::{RequestService, RequestServiceBuilder}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_rustls::TlsAcceptor; use tokio_util::sync::CancellationToken; use tracing::{error, info}; use crate::error::ApiError; /// A simple HTTP server over hyper library. /// You may want to use it instead of [`hyper0::server::Server`] because: /// 1. hyper0's Server was removed from hyper v1. /// It's recommended to replace hyepr0's Server with a manual loop, which is done here. /// 2. hyper0's Server doesn't support TLS out of the box, and there is no way /// to support it efficiently with the Accept trait that hyper0's Server uses. /// That's one of the reasons why it was removed from v1. /// <https://github.com/hyperium/hyper/blob/115339d3df50f20c8717680aa35f48858e9a6205/docs/ROADMAP.md#higher-level-client-and-server-problems> pub struct Server { request_service: Arc<RequestServiceBuilder<Body, ApiError>>, listener: tokio::net::TcpListener, tls_acceptor: Option<TlsAcceptor>, } static CONNECTION_STARTED_COUNT: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "http_server_connection_started_total", "Number of established http/https connections", &["scheme"] ) .expect("failed to define a metric") }); static CONNECTION_ERROR_COUNT: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "http_server_connection_errors_total", "Number of occured connection errors by type", &["type"] ) .expect("failed to define a metric") }); impl Server { pub fn new( request_service: Arc<RequestServiceBuilder<Body, ApiError>>, listener: std::net::TcpListener, tls_acceptor: Option<TlsAcceptor>, ) -> anyhow::Result<Self> { // Note: caller of from_std is responsible for setting nonblocking mode. listener.set_nonblocking(true)?; let listener = tokio::net::TcpListener::from_std(listener)?; Ok(Self { request_service, listener, tls_acceptor, }) } pub async fn serve(self, cancel: CancellationToken) -> anyhow::Result<()> { fn suppress_io_error(err: &std::io::Error) -> bool { use std::io::ErrorKind::*; matches!(err.kind(), ConnectionReset | ConnectionAborted | BrokenPipe) } fn suppress_hyper_error(err: &hyper0::Error) -> bool { if err.is_incomplete_message() || err.is_closed() || err.is_timeout() { return true; } if let Some(inner) = err.source() && let Some(io) = inner.downcast_ref::<std::io::Error>() { return suppress_io_error(io); } false } let tcp_error_cnt = CONNECTION_ERROR_COUNT.with_label_values(&["tcp"]); let tls_error_cnt = CONNECTION_ERROR_COUNT.with_label_values(&["tls"]); let http_error_cnt = CONNECTION_ERROR_COUNT.with_label_values(&["http"]); let https_error_cnt = CONNECTION_ERROR_COUNT.with_label_values(&["https"]); let panic_error_cnt = CONNECTION_ERROR_COUNT.with_label_values(&["panic"]); let http_connection_cnt = CONNECTION_STARTED_COUNT.with_label_values(&["http"]); let https_connection_cnt = CONNECTION_STARTED_COUNT.with_label_values(&["https"]); let mut connections = FuturesUnordered::new(); loop { tokio::select! { stream = self.listener.accept() => { let (tcp_stream, remote_addr) = match stream { Ok(stream) => stream, Err(err) => { tcp_error_cnt.inc(); if !suppress_io_error(&err) { info!("Failed to accept TCP connection: {err:#}"); } continue; } }; let service = self.request_service.build(remote_addr); let tls_acceptor = self.tls_acceptor.clone(); let cancel = cancel.clone(); let tls_error_cnt = tls_error_cnt.clone(); let http_error_cnt = http_error_cnt.clone(); let https_error_cnt = https_error_cnt.clone(); let http_connection_cnt = http_connection_cnt.clone(); let https_connection_cnt = https_connection_cnt.clone(); connections.push(tokio::spawn( async move { match tls_acceptor { Some(tls_acceptor) => { // Handle HTTPS connection. https_connection_cnt.inc(); let tls_stream = tokio::select! { tls_stream = tls_acceptor.accept(tcp_stream) => tls_stream, _ = cancel.cancelled() => return, }; let tls_stream = match tls_stream { Ok(tls_stream) => tls_stream, Err(err) => { tls_error_cnt.inc(); if !suppress_io_error(&err) { info!(%remote_addr, "Failed to accept TLS connection: {err:#}"); } return; } }; if let Err(err) = Self::serve_connection(tls_stream, service, cancel).await { https_error_cnt.inc(); if !suppress_hyper_error(&err) { info!(%remote_addr, "Failed to serve HTTPS connection: {err:#}"); } } } None => { // Handle HTTP connection. http_connection_cnt.inc(); if let Err(err) = Self::serve_connection(tcp_stream, service, cancel).await { http_error_cnt.inc(); if !suppress_hyper_error(&err) { info!(%remote_addr, "Failed to serve HTTP connection: {err:#}"); } } } }; })); } Some(conn) = connections.next() => { if let Err(err) = conn { panic_error_cnt.inc(); error!("Connection panicked: {err:#}"); } } _ = cancel.cancelled() => { // Wait for graceful shutdown of all connections. while let Some(conn) = connections.next().await { if let Err(err) = conn { panic_error_cnt.inc(); error!("Connection panicked: {err:#}"); } } break; } } } Ok(()) } /// Serves HTTP connection with graceful shutdown. async fn serve_connection<I>( io: I, service: RequestService<Body, ApiError>, cancel: CancellationToken, ) -> Result<(), hyper0::Error> where I: AsyncRead + AsyncWrite + Unpin + Send + 'static, { let mut conn = Http::new().serve_connection(io, service).with_upgrades(); tokio::select! { res = &mut conn => res, _ = cancel.cancelled() => { Pin::new(&mut conn).graceful_shutdown(); // Note: connection should still be awaited for graceful shutdown to complete. conn.await } } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/request.rs
libs/http-utils/src/request.rs
use core::fmt; use std::borrow::Cow; use std::str::FromStr; use anyhow::anyhow; use hyper::body::HttpBody; use hyper::{Body, Request}; use routerify::ext::RequestExt; use super::error::ApiError; pub fn get_request_param<'a>( request: &'a Request<Body>, param_name: &str, ) -> Result<&'a str, ApiError> { match request.param(param_name) { Some(arg) => Ok(arg), None => Err(ApiError::BadRequest(anyhow!( "no {param_name} specified in path param", ))), } } pub fn parse_request_param<T: FromStr>( request: &Request<Body>, param_name: &str, ) -> Result<T, ApiError> { match get_request_param(request, param_name)?.parse() { Ok(v) => Ok(v), Err(_) => Err(ApiError::BadRequest(anyhow!( "failed to parse {param_name}", ))), } } pub fn get_query_param<'a>( request: &'a Request<Body>, param_name: &str, ) -> Result<Option<Cow<'a, str>>, ApiError> { let query = match request.uri().query() { Some(q) => q, None => return Ok(None), }; let values = url::form_urlencoded::parse(query.as_bytes()) .filter_map(|(k, v)| if k == param_name { Some(v) } else { None }) // we call .next() twice below. If it's None the first time, .fuse() ensures it's None afterwards .fuse(); // Work around an issue with Alloy's pyroscope scrape where the "seconds" // parameter is added several times. https://github.com/grafana/alloy/issues/3026 // TODO: revert after Alloy is fixed. let value1 = values .map(Ok) .reduce(|acc, i| { match acc { Err(_) => acc, // It's okay to have duplicates as along as they have the same value. Ok(ref a) if a == &i.unwrap() => acc, _ => Err(ApiError::BadRequest(anyhow!( "param {param_name} specified more than once" ))), } }) .transpose()?; // if values.next().is_some() { // return Err(ApiError::BadRequest(anyhow!( // "param {param_name} specified more than once" // ))); // } Ok(value1) } pub fn must_get_query_param<'a>( request: &'a Request<Body>, param_name: &str, ) -> Result<Cow<'a, str>, ApiError> { get_query_param(request, param_name)?.ok_or_else(|| { ApiError::BadRequest(anyhow!("no {param_name} specified in query parameters")) }) } pub fn parse_query_param<E: fmt::Display, T: FromStr<Err = E>>( request: &Request<Body>, param_name: &str, ) -> Result<Option<T>, ApiError> { get_query_param(request, param_name)? .map(|v| { v.parse().map_err(|e| { ApiError::BadRequest(anyhow!("cannot parse query param {param_name}: {e}")) }) }) .transpose() } pub fn must_parse_query_param<E: fmt::Display, T: FromStr<Err = E>>( request: &Request<Body>, param_name: &str, ) -> Result<T, ApiError> { parse_query_param(request, param_name)?.ok_or_else(|| { ApiError::BadRequest(anyhow!("no {param_name} specified in query parameters")) }) } pub async fn ensure_no_body(request: &mut Request<Body>) -> Result<(), ApiError> { match request.body_mut().data().await { Some(_) => Err(ApiError::BadRequest(anyhow!("Unexpected request body"))), None => Ok(()), } } #[cfg(test)] mod tests { use super::*; #[test] fn test_get_query_param_duplicate() { let req = Request::builder() .uri("http://localhost:12345/testuri?testparam=1") .body(hyper::Body::empty()) .unwrap(); let value = get_query_param(&req, "testparam").unwrap(); assert_eq!(value.unwrap(), "1"); let req = Request::builder() .uri("http://localhost:12345/testuri?testparam=1&testparam=1") .body(hyper::Body::empty()) .unwrap(); let value = get_query_param(&req, "testparam").unwrap(); assert_eq!(value.unwrap(), "1"); let req = Request::builder() .uri("http://localhost:12345/testuri") .body(hyper::Body::empty()) .unwrap(); let value = get_query_param(&req, "testparam").unwrap(); assert!(value.is_none()); let req = Request::builder() .uri("http://localhost:12345/testuri?testparam=1&testparam=2&testparam=3") .body(hyper::Body::empty()) .unwrap(); let value = get_query_param(&req, "testparam"); assert!(value.is_err()); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/endpoint.rs
libs/http-utils/src/endpoint.rs
use std::future::Future; use std::io::Write as _; use std::str::FromStr; use std::time::Duration; use anyhow::{Context, anyhow}; use bytes::{Bytes, BytesMut}; use hyper::header::{AUTHORIZATION, CONTENT_DISPOSITION, CONTENT_TYPE, HeaderName}; use hyper::http::HeaderValue; use hyper::{Body, Method, Request, Response}; use jsonwebtoken::TokenData; use metrics::{Encoder, IntCounter, TextEncoder, register_int_counter}; use once_cell::sync::Lazy; use pprof::ProfilerGuardBuilder; use pprof::protos::Message as _; use routerify::ext::RequestExt; use routerify::{Middleware, RequestInfo, Router, RouterBuilder}; use tokio::sync::{Mutex, Notify, mpsc}; use tokio_stream::wrappers::ReceiverStream; use tokio_util::io::ReaderStream; use tracing::{Instrument, debug, info, info_span, warn}; use utils::auth::{AuthError, Claims, SwappableJwtAuth}; use utils::metrics_collector::{METRICS_COLLECTOR, METRICS_STALE_MILLIS}; use crate::error::{ApiError, api_error_handler, route_error_handler}; use crate::request::{get_query_param, parse_query_param}; static SERVE_METRICS_COUNT: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "libmetrics_metric_handler_requests_total", "Number of metric requests made" ) .expect("failed to define a metric") }); static X_REQUEST_ID_HEADER_STR: &str = "x-request-id"; static X_REQUEST_ID_HEADER: HeaderName = HeaderName::from_static(X_REQUEST_ID_HEADER_STR); #[derive(Debug, Default, Clone)] struct RequestId(String); /// Adds a tracing info_span! instrumentation around the handler events, /// logs the request start and end events for non-GET requests and non-200 responses. /// /// Usage: Replace `my_handler` with `|r| request_span(r, my_handler)` /// /// Use this to distinguish between logs of different HTTP requests: every request handler wrapped /// with this will get request info logged in the wrapping span, including the unique request ID. /// /// This also handles errors, logging them and converting them to an HTTP error response. /// /// NB: If the client disconnects, Hyper will drop the Future, without polling it to /// completion. In other words, the handler must be async cancellation safe! request_span /// prints a warning to the log when that happens, so that you have some trace of it in /// the log. /// /// /// There could be other ways to implement similar functionality: /// /// * procmacros placed on top of all handler methods /// With all the drawbacks of procmacros, brings no difference implementation-wise, /// and little code reduction compared to the existing approach. /// /// * Another `TraitExt` with e.g. the `get_with_span`, `post_with_span` methods to do similar logic, /// implemented for [`RouterBuilder`]. /// Could be simpler, but we don't want to depend on [`routerify`] more, targeting to use other library later. /// /// * In theory, a span guard could've been created in a pre-request middleware and placed into a global collection, to be dropped /// later, in a post-response middleware. /// Due to suspendable nature of the futures, would give contradictive results which is exactly the opposite of what `tracing-futures` /// tries to achive with its `.instrument` used in the current approach. /// /// If needed, a declarative macro to substitute the |r| ... closure boilerplate could be introduced. pub async fn request_span<R, H>(request: Request<Body>, handler: H) -> R::Output where R: Future<Output = Result<Response<Body>, ApiError>> + Send + 'static, H: FnOnce(Request<Body>) -> R + Send + Sync + 'static, { let request_id = request.context::<RequestId>().unwrap_or_default().0; let method = request.method(); let path = request.uri().path(); let request_span = info_span!("request", %method, %path, %request_id); let log_quietly = method == Method::GET; async move { let cancellation_guard = RequestCancelled::warn_when_dropped_without_responding(); if log_quietly { debug!("Handling request"); } else { info!("Handling request"); } // No special handling for panics here. There's a `tracing_panic_hook` from another // module to do that globally. let res = handler(request).await; cancellation_guard.disarm(); // Log the result if needed. // // We also convert any errors into an Ok response with HTTP error code here. // `make_router` sets a last-resort error handler that would do the same, but // we prefer to do it here, before we exit the request span, so that the error // is still logged with the span. // // (Because we convert errors to Ok response, we never actually return an error, // and we could declare the function to return the never type (`!`). However, // using `routerify::RouterBuilder` requires a proper error type.) match res { Ok(response) => { let response_status = response.status(); if log_quietly && response_status.is_success() { debug!("Request handled, status: {response_status}"); } else { info!("Request handled, status: {response_status}"); } Ok(response) } Err(err) => Ok(api_error_handler(err)), } } .instrument(request_span) .await } /// Drop guard to WARN in case the request was dropped before completion. struct RequestCancelled { warn: Option<tracing::Span>, } impl RequestCancelled { /// Create the drop guard using the [`tracing::Span::current`] as the span. fn warn_when_dropped_without_responding() -> Self { RequestCancelled { warn: Some(tracing::Span::current()), } } /// Consume the drop guard without logging anything. fn disarm(mut self) { self.warn = None; } } impl Drop for RequestCancelled { fn drop(&mut self) { if std::thread::panicking() { // we are unwinding due to panicking, assume we are not dropped for cancellation } else if let Some(span) = self.warn.take() { // the span has all of the info already, but the outer `.instrument(span)` has already // been dropped, so we need to manually re-enter it for this message. // // this is what the instrument would do before polling so it is fine. let _g = span.entered(); warn!("request was dropped before completing"); } } } /// An [`std::io::Write`] implementation on top of a channel sending [`bytes::Bytes`] chunks. pub struct ChannelWriter { buffer: BytesMut, pub tx: mpsc::Sender<std::io::Result<Bytes>>, written: usize, /// Time spent waiting for the channel to make progress. It is not the same as time to upload a /// buffer because we cannot know anything about that, but this should allow us to understand /// the actual time taken without the time spent `std::thread::park`ed. wait_time: std::time::Duration, } impl ChannelWriter { pub fn new(buf_len: usize, tx: mpsc::Sender<std::io::Result<Bytes>>) -> Self { assert_ne!(buf_len, 0); ChannelWriter { // split about half off the buffer from the start, because we flush depending on // capacity. first flush will come sooner than without this, but now resizes will // have better chance of picking up the "other" half. not guaranteed of course. buffer: BytesMut::with_capacity(buf_len).split_off(buf_len / 2), tx, written: 0, wait_time: std::time::Duration::ZERO, } } pub fn flush0(&mut self) -> std::io::Result<usize> { let n = self.buffer.len(); if n == 0 { return Ok(0); } tracing::trace!(n, "flushing"); let ready = self.buffer.split().freeze(); let wait_started_at = std::time::Instant::now(); // not ideal to call from blocking code to block_on, but we are sure that this // operation does not spawn_blocking other tasks let res: Result<(), ()> = tokio::runtime::Handle::current().block_on(async { self.tx.send(Ok(ready)).await.map_err(|_| ())?; // throttle sending to allow reuse of our buffer in `write`. self.tx.reserve().await.map_err(|_| ())?; // now the response task has picked up the buffer and hopefully started // sending it to the client. Ok(()) }); self.wait_time += wait_started_at.elapsed(); if res.is_err() { return Err(std::io::ErrorKind::BrokenPipe.into()); } self.written += n; Ok(n) } pub fn flushed_bytes(&self) -> usize { self.written } pub fn wait_time(&self) -> std::time::Duration { self.wait_time } } impl std::io::Write for ChannelWriter { fn write(&mut self, mut buf: &[u8]) -> std::io::Result<usize> { let remaining = self.buffer.capacity() - self.buffer.len(); let out_of_space = remaining < buf.len(); let original_len = buf.len(); if out_of_space { let can_still_fit = buf.len() - remaining; self.buffer.extend_from_slice(&buf[..can_still_fit]); buf = &buf[can_still_fit..]; self.flush0()?; } // assume that this will often under normal operation just move the pointer back to the // beginning of allocation, because previous split off parts are already sent and // dropped. self.buffer.extend_from_slice(buf); Ok(original_len) } fn flush(&mut self) -> std::io::Result<()> { self.flush0().map(|_| ()) } } pub async fn prometheus_metrics_handler( req: Request<Body>, force_metric_collection_on_scrape: bool, ) -> Result<Response<Body>, ApiError> { SERVE_METRICS_COUNT.inc(); // HADRON let requested_use_latest = parse_query_param(&req, "use_latest")?; let use_latest = match requested_use_latest { None => force_metric_collection_on_scrape, Some(true) => true, Some(false) => { if force_metric_collection_on_scrape { // We don't cache in this case true } else { false } } }; let started_at = std::time::Instant::now(); let (tx, rx) = mpsc::channel(1); let body = Body::wrap_stream(ReceiverStream::new(rx)); let mut writer = ChannelWriter::new(128 * 1024, tx); let encoder = TextEncoder::new(); let response = Response::builder() .status(200) .header(CONTENT_TYPE, encoder.format_type()) .body(body) .unwrap(); let span = info_span!("blocking"); tokio::task::spawn_blocking(move || { // there are situations where we lose scraped metrics under load, try to gather some clues // since all nodes are queried this, keep the message count low. let spawned_at = std::time::Instant::now(); let _span = span.entered(); // HADRON let collected = if use_latest { // Skip caching the results if we always force metric collection on scrape. METRICS_COLLECTOR.run_once(!force_metric_collection_on_scrape) } else { METRICS_COLLECTOR.last_collected() }; let gathered_at = std::time::Instant::now(); let res = encoder .encode(&collected.metrics, &mut writer) .and_then(|_| writer.flush().map_err(|e| e.into())); // this instant is not when we finally got the full response sent, sending is done by hyper // in another task. let encoded_at = std::time::Instant::now(); let spawned_in = spawned_at - started_at; let collected_in = gathered_at - spawned_at; // remove the wait time here in case the tcp connection was clogged let encoded_in = encoded_at - gathered_at - writer.wait_time(); let total = encoded_at - started_at; // HADRON let staleness_ms = (encoded_at - collected.collected_at).as_millis(); METRICS_STALE_MILLIS.set(staleness_ms as i64); match res { Ok(()) => { tracing::info!( bytes = writer.flushed_bytes(), total_ms = total.as_millis(), spawning_ms = spawned_in.as_millis(), collection_ms = collected_in.as_millis(), encoding_ms = encoded_in.as_millis(), stalenss_ms = staleness_ms, "responded /metrics" ); } Err(e) => { // there is a chance that this error is not the BrokenPipe we generate in the writer // for "closed connection", but it is highly unlikely. tracing::warn!( after_bytes = writer.flushed_bytes(), total_ms = total.as_millis(), spawning_ms = spawned_in.as_millis(), collection_ms = collected_in.as_millis(), encoding_ms = encoded_in.as_millis(), "failed to write out /metrics response: {e:?}" ); // semantics of this error are quite... unclear. we want to error the stream out to // abort the response to somehow notify the client that we failed. // // though, most likely the reason for failure is that the receiver is already gone. drop( writer .tx .blocking_send(Err(std::io::ErrorKind::BrokenPipe.into())), ); } } }); Ok(response) } /// Generates CPU profiles. pub async fn profile_cpu_handler(req: Request<Body>) -> Result<Response<Body>, ApiError> { enum Format { Pprof, Svg, } // Parameters. let format = match get_query_param(&req, "format")?.as_deref() { None => Format::Pprof, Some("pprof") => Format::Pprof, Some("svg") => Format::Svg, Some(format) => return Err(ApiError::BadRequest(anyhow!("invalid format {format}"))), }; let seconds = match parse_query_param(&req, "seconds")? { None => 5, Some(seconds @ 1..=60) => seconds, Some(_) => return Err(ApiError::BadRequest(anyhow!("duration must be 1-60 secs"))), }; let frequency_hz = match parse_query_param(&req, "frequency")? { None => 99, Some(1001..) => return Err(ApiError::BadRequest(anyhow!("frequency must be <=1000 Hz"))), Some(frequency) => frequency, }; let force: bool = parse_query_param(&req, "force")?.unwrap_or_default(); // Take the profile. static PROFILE_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(())); static PROFILE_CANCEL: Lazy<Notify> = Lazy::new(Notify::new); let report = { // Only allow one profiler at a time. If force is true, cancel a running profile (e.g. a // Grafana continuous profile). We use a try_lock() loop when cancelling instead of waiting // for a lock(), to avoid races where the notify isn't currently awaited. let _lock = loop { match PROFILE_LOCK.try_lock() { Ok(lock) => break lock, Err(_) if force => PROFILE_CANCEL.notify_waiters(), Err(_) => { return Err(ApiError::Conflict( "profiler already running (use ?force=true to cancel it)".into(), )); } } tokio::time::sleep(Duration::from_millis(1)).await; // don't busy-wait }; let guard = ProfilerGuardBuilder::default() .frequency(frequency_hz) .blocklist(&["libc", "libgcc", "pthread", "vdso"]) .build() .map_err(|err| ApiError::InternalServerError(err.into()))?; tokio::select! { _ = tokio::time::sleep(Duration::from_secs(seconds)) => {}, _ = PROFILE_CANCEL.notified() => {}, }; guard .report() .build() .map_err(|err| ApiError::InternalServerError(err.into()))? }; // Return the report in the requested format. match format { Format::Pprof => { let body = report .pprof() .map_err(|err| ApiError::InternalServerError(err.into()))? .encode_to_vec(); Response::builder() .status(200) .header(CONTENT_TYPE, "application/octet-stream") .header(CONTENT_DISPOSITION, "attachment; filename=\"profile.pb\"") .body(Body::from(body)) .map_err(|err| ApiError::InternalServerError(err.into())) } Format::Svg => { let mut body = Vec::new(); report .flamegraph(&mut body) .map_err(|err| ApiError::InternalServerError(err.into()))?; Response::builder() .status(200) .header(CONTENT_TYPE, "image/svg+xml") .body(Body::from(body)) .map_err(|err| ApiError::InternalServerError(err.into())) } } } /// Generates heap profiles. /// /// This only works with jemalloc on Linux. pub async fn profile_heap_handler(req: Request<Body>) -> Result<Response<Body>, ApiError> { enum Format { Jemalloc, Pprof, Svg, } // Parameters. let format = match get_query_param(&req, "format")?.as_deref() { None => Format::Pprof, Some("jemalloc") => Format::Jemalloc, Some("pprof") => Format::Pprof, Some("svg") => Format::Svg, Some(format) => return Err(ApiError::BadRequest(anyhow!("invalid format {format}"))), }; // Obtain profiler handle. let mut prof_ctl = jemalloc_pprof::PROF_CTL .as_ref() .ok_or(ApiError::InternalServerError(anyhow!( "heap profiling not enabled" )))? .lock() .await; if !prof_ctl.activated() { return Err(ApiError::InternalServerError(anyhow!( "heap profiling not enabled" ))); } // Take and return the profile. match format { Format::Jemalloc => { // NB: file is an open handle to a tempfile that's already deleted. let file = tokio::task::spawn_blocking(move || prof_ctl.dump()) .await .map_err(|join_err| ApiError::InternalServerError(join_err.into()))? .map_err(ApiError::InternalServerError)?; let stream = ReaderStream::new(tokio::fs::File::from_std(file)); Response::builder() .status(200) .header(CONTENT_TYPE, "application/octet-stream") .header(CONTENT_DISPOSITION, "attachment; filename=\"heap.dump\"") .body(Body::wrap_stream(stream)) .map_err(|err| ApiError::InternalServerError(err.into())) } Format::Pprof => { let data = tokio::task::spawn_blocking(move || prof_ctl.dump_pprof()) .await .map_err(|join_err| ApiError::InternalServerError(join_err.into()))? .map_err(ApiError::InternalServerError)?; Response::builder() .status(200) .header(CONTENT_TYPE, "application/octet-stream") .header(CONTENT_DISPOSITION, "attachment; filename=\"heap.pb.gz\"") .body(Body::from(data)) .map_err(|err| ApiError::InternalServerError(err.into())) } Format::Svg => { let svg = tokio::task::spawn_blocking(move || prof_ctl.dump_flamegraph()) .await .map_err(|join_err| ApiError::InternalServerError(join_err.into()))? .map_err(ApiError::InternalServerError)?; Response::builder() .status(200) .header(CONTENT_TYPE, "image/svg+xml") .body(Body::from(svg)) .map_err(|err| ApiError::InternalServerError(err.into())) } } } pub fn add_request_id_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>() -> Middleware<B, ApiError> { Middleware::pre(move |req| async move { let request_id = match req.headers().get(&X_REQUEST_ID_HEADER) { Some(request_id) => request_id .to_str() .expect("extract request id value") .to_owned(), None => { let request_id = uuid::Uuid::new_v4(); request_id.to_string() } }; req.set_context(RequestId(request_id)); Ok(req) }) } async fn add_request_id_header_to_response( mut res: Response<Body>, req_info: RequestInfo, ) -> Result<Response<Body>, ApiError> { if let Some(request_id) = req_info.context::<RequestId>() && let Ok(request_header_value) = HeaderValue::from_str(&request_id.0) { res.headers_mut() .insert(&X_REQUEST_ID_HEADER, request_header_value); }; Ok(res) } pub fn make_router() -> RouterBuilder<hyper::Body, ApiError> { Router::builder() .middleware(add_request_id_middleware()) .middleware(Middleware::post_with_info( add_request_id_header_to_response, )) .err_handler(route_error_handler) } pub fn attach_openapi_ui( router_builder: RouterBuilder<hyper::Body, ApiError>, spec: &'static [u8], spec_mount_path: &'static str, ui_mount_path: &'static str, ) -> RouterBuilder<hyper::Body, ApiError> { router_builder .get(spec_mount_path, move |r| request_span(r, move |_| async move { Ok(Response::builder().body(Body::from(spec)).unwrap()) }) ) .get(ui_mount_path, move |r| request_span(r, move |_| async move { Ok(Response::builder().body(Body::from(format!(r#" <!DOCTYPE html> <html lang="en"> <head> <title>rweb</title> <link href="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui.css" rel="stylesheet"> </head> <body> <div id="swagger-ui"></div> <script src="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui-bundle.js" charset="UTF-8"> </script> <script> window.onload = function() {{ const ui = SwaggerUIBundle({{ "dom_id": "\#swagger-ui", presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: "BaseLayout", deepLinking: true, showExtensions: true, showCommonExtensions: true, url: "{spec_mount_path}", }}) window.ui = ui; }}; </script> </body> </html> "#))).unwrap()) }) ) } fn parse_token(header_value: &str) -> Result<&str, ApiError> { // header must be in form Bearer <token> let (prefix, token) = header_value .split_once(' ') .ok_or_else(|| ApiError::Unauthorized("malformed authorization header".to_string()))?; if prefix != "Bearer" { return Err(ApiError::Unauthorized( "malformed authorization header".to_string(), )); } Ok(token) } pub fn auth_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>( provide_auth: fn(&Request<Body>) -> Option<&SwappableJwtAuth>, ) -> Middleware<B, ApiError> { Middleware::pre(move |req| async move { if let Some(auth) = provide_auth(&req) { match req.headers().get(AUTHORIZATION) { Some(value) => { let header_value = value.to_str().map_err(|_| { ApiError::Unauthorized("malformed authorization header".to_string()) })?; let token = parse_token(header_value)?; let data: TokenData<Claims> = auth.decode(token).map_err(|err| { warn!("Authentication error: {err}"); // Rely on From<AuthError> for ApiError impl err })?; req.set_context(data.claims); } None => { return Err(ApiError::Unauthorized( "missing authorization header".to_string(), )); } } } Ok(req) }) } pub fn add_response_header_middleware<B>( header: &str, value: &str, ) -> anyhow::Result<Middleware<B, ApiError>> where B: hyper::body::HttpBody + Send + Sync + 'static, { let name = HeaderName::from_str(header).with_context(|| format!("invalid header name: {header}"))?; let value = HeaderValue::from_str(value).with_context(|| format!("invalid header value: {value}"))?; Ok(Middleware::post_with_info( move |mut response, request_info| { let name = name.clone(); let value = value.clone(); async move { let headers = response.headers_mut(); if headers.contains_key(&name) { warn!( "{} response already contains header {:?}", request_info.uri(), &name, ); } else { headers.insert(name, value); } Ok(response) } }, )) } pub fn check_permission_with( req: &Request<Body>, check_permission: impl Fn(&Claims) -> Result<(), AuthError>, ) -> Result<(), ApiError> { match req.context::<Claims>() { Some(claims) => Ok(check_permission(&claims) .map_err(|_err| ApiError::Forbidden("JWT authentication error".to_string()))?), None => Ok(()), // claims is None because auth is disabled } } #[cfg(test)] mod tests { use std::future::poll_fn; use std::net::{IpAddr, SocketAddr}; use hyper::service::Service; use routerify::RequestServiceBuilder; use super::*; #[tokio::test] async fn test_request_id_returned() { let builder = RequestServiceBuilder::new(make_router().build().unwrap()).unwrap(); let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { panic!("request service is not ready: {e:?}"); } let mut req: Request<Body> = Request::default(); req.headers_mut() .append(&X_REQUEST_ID_HEADER, HeaderValue::from_str("42").unwrap()); let resp: Response<hyper::body::Body> = service.call(req).await.unwrap(); let header_val = resp.headers().get(&X_REQUEST_ID_HEADER).unwrap(); assert!(header_val == "42", "response header mismatch"); } #[tokio::test] async fn test_request_id_empty() { let builder = RequestServiceBuilder::new(make_router().build().unwrap()).unwrap(); let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { panic!("request service is not ready: {e:?}"); } let req: Request<Body> = Request::default(); let resp: Response<hyper::body::Body> = service.call(req).await.unwrap(); let header_val = resp.headers().get(&X_REQUEST_ID_HEADER); assert_ne!(header_val, None, "response header should NOT be empty"); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/lib.rs
libs/desim/src/lib.rs
pub mod chan; pub mod executor; pub mod network; pub mod node_os; pub mod options; pub mod proto; pub mod time; pub mod world;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/world.rs
libs/desim/src/world.rs
use std::ops::DerefMut; use std::sync::{Arc, mpsc}; use parking_lot::Mutex; use rand::SeedableRng; use rand::rngs::StdRng; use super::chan::Chan; use super::network::TCP; use super::node_os::NodeOs; use crate::executor::{ExternalHandle, Runtime}; use crate::network::NetworkTask; use crate::options::NetworkOptions; use crate::proto::{NodeEvent, SimEvent}; use crate::time::Timing; pub type NodeId = u32; /// World contains simulation state. pub struct World { nodes: Mutex<Vec<Arc<Node>>>, /// Random number generator. rng: Mutex<StdRng>, /// Internal event log. events: Mutex<Vec<SimEvent>>, /// Separate task that processes all network messages. network_task: Arc<NetworkTask>, /// Runtime for running threads and moving time. runtime: Mutex<Runtime>, /// To get current time. timing: Arc<Timing>, } impl World { pub fn new(seed: u64, options: Arc<NetworkOptions>) -> World { let timing = Arc::new(Timing::new()); let mut runtime = Runtime::new(timing.clone()); let (tx, rx) = mpsc::channel(); runtime.spawn(move || { // create and start network background thread, and send it back via the channel NetworkTask::start_new(options, tx) }); // wait for the network task to start while runtime.step() {} let network_task = rx.recv().unwrap(); World { nodes: Mutex::new(Vec::new()), rng: Mutex::new(StdRng::seed_from_u64(seed)), events: Mutex::new(Vec::new()), network_task, runtime: Mutex::new(runtime), timing, } } pub fn step(&self) -> bool { self.runtime.lock().step() } pub fn get_thread_step_count(&self) -> u64 { self.runtime.lock().step_counter } /// Create a new random number generator. pub fn new_rng(&self) -> StdRng { let mut rng = self.rng.lock(); StdRng::from_rng(rng.deref_mut()) } /// Create a new node. pub fn new_node(self: &Arc<Self>) -> Arc<Node> { let mut nodes = self.nodes.lock(); let id = nodes.len() as NodeId; let node = Arc::new(Node::new(id, self.clone(), self.new_rng())); nodes.push(node.clone()); node } /// Get an internal node state by id. fn get_node(&self, id: NodeId) -> Option<Arc<Node>> { let nodes = self.nodes.lock(); let num = id as usize; if num < nodes.len() { Some(nodes[num].clone()) } else { None } } pub fn stop_all(&self) { self.runtime.lock().crash_all_threads(); } /// Returns a writable end of a TCP connection, to send src->dst messages. pub fn open_tcp(self: &Arc<World>, dst: NodeId) -> TCP { // TODO: replace unwrap() with /dev/null socket. let dst = self.get_node(dst).unwrap(); let dst_accept = dst.node_events.lock().clone(); let rng = self.new_rng(); self.network_task.start_new_connection(rng, dst_accept) } /// Get current time. pub fn now(&self) -> u64 { self.timing.now() } /// Get a copy of the internal clock. pub fn clock(&self) -> Arc<Timing> { self.timing.clone() } pub fn add_event(&self, node: NodeId, data: String) { let time = self.now(); self.events.lock().push(SimEvent { time, node, data }); } pub fn take_events(&self) -> Vec<SimEvent> { let mut events = self.events.lock(); let mut res = Vec::new(); std::mem::swap(&mut res, &mut events); res } pub fn deallocate(&self) { self.stop_all(); self.timing.clear(); self.nodes.lock().clear(); } } /// Internal node state. pub struct Node { pub id: NodeId, node_events: Mutex<Chan<NodeEvent>>, world: Arc<World>, pub(crate) rng: Mutex<StdRng>, } impl Node { pub fn new(id: NodeId, world: Arc<World>, rng: StdRng) -> Node { Node { id, node_events: Mutex::new(Chan::new()), world, rng: Mutex::new(rng), } } /// Spawn a new thread with this node context. pub fn launch(self: &Arc<Self>, f: impl FnOnce(NodeOs) + Send + 'static) -> ExternalHandle { let node = self.clone(); let world = self.world.clone(); self.world.runtime.lock().spawn(move || { f(NodeOs::new(world, node.clone())); }) } /// Returns a channel to receive Accepts and internal messages. pub fn node_events(&self) -> Chan<NodeEvent> { self.node_events.lock().clone() } /// This will drop all in-flight Accept messages. pub fn replug_node_events(&self, chan: Chan<NodeEvent>) { *self.node_events.lock() = chan; } /// Append event to the world's log. pub fn log_event(&self, data: String) { self.world.add_event(self.id, data) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/chan.rs
libs/desim/src/chan.rs
use std::collections::VecDeque; use std::sync::Arc; use parking_lot::{Mutex, MutexGuard}; use crate::executor::{self, PollSome, Waker}; /// FIFO channel with blocking send and receive. Can be cloned and shared between threads. /// Blocking functions should be used only from threads that are managed by the executor. pub struct Chan<T> { shared: Arc<State<T>>, } impl<T> Clone for Chan<T> { fn clone(&self) -> Self { Chan { shared: self.shared.clone(), } } } impl<T> Default for Chan<T> { fn default() -> Self { Self::new() } } impl<T> Chan<T> { pub fn new() -> Chan<T> { Chan { shared: Arc::new(State { queue: Mutex::new(VecDeque::new()), waker: Waker::new(), }), } } /// Get a message from the front of the queue, block if the queue is empty. /// If not called from the executor thread, it can block forever. pub fn recv(&self) -> T { self.shared.recv() } /// Panic if the queue is empty. pub fn must_recv(&self) -> T { self.shared .try_recv() .expect("message should've been ready") } /// Get a message from the front of the queue, return None if the queue is empty. /// Never blocks. pub fn try_recv(&self) -> Option<T> { self.shared.try_recv() } /// Send a message to the back of the queue. pub fn send(&self, t: T) { self.shared.send(t); } } struct State<T> { queue: Mutex<VecDeque<T>>, waker: Waker, } impl<T> State<T> { fn send(&self, t: T) { self.queue.lock().push_back(t); self.waker.wake_all(); } fn try_recv(&self) -> Option<T> { let mut q = self.queue.lock(); q.pop_front() } fn recv(&self) -> T { // interrupt the receiver to prevent consuming everything at once executor::yield_me(0); let mut queue = self.queue.lock(); if let Some(t) = queue.pop_front() { return t; } loop { self.waker.wake_me_later(); if let Some(t) = queue.pop_front() { return t; } MutexGuard::unlocked(&mut queue, || { executor::yield_me(-1); }); } } } impl<T> PollSome for Chan<T> { /// Schedules a wakeup for the current thread. fn wake_me(&self) { self.shared.waker.wake_me_later(); } /// Checks if chan has any pending messages. fn has_some(&self) -> bool { !self.shared.queue.lock().is_empty() } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/network.rs
libs/desim/src/network.rs
use std::cmp::Ordering; use std::collections::{BinaryHeap, VecDeque}; use std::fmt::{self, Debug}; use std::ops::DerefMut; use std::sync::{Arc, mpsc}; use parking_lot::lock_api::{MappedMutexGuard, MutexGuard}; use parking_lot::{Mutex, RawMutex}; use rand::rngs::StdRng; use tracing::debug; use super::chan::Chan; use super::proto::AnyMessage; use crate::executor::{self, ThreadContext}; use crate::options::NetworkOptions; use crate::proto::{NetEvent, NodeEvent}; pub struct NetworkTask { options: Arc<NetworkOptions>, connections: Mutex<Vec<VirtualConnection>>, /// min-heap of connections having something to deliver. events: Mutex<BinaryHeap<Event>>, task_context: Arc<ThreadContext>, } impl NetworkTask { pub fn start_new(options: Arc<NetworkOptions>, tx: mpsc::Sender<Arc<NetworkTask>>) { let ctx = executor::get_thread_ctx(); let task = Arc::new(Self { options, connections: Mutex::new(Vec::new()), events: Mutex::new(BinaryHeap::new()), task_context: ctx, }); // send the task upstream tx.send(task.clone()).unwrap(); // start the task task.start(); } pub fn start_new_connection(self: &Arc<Self>, rng: StdRng, dst_accept: Chan<NodeEvent>) -> TCP { let now = executor::now(); let connection_id = self.connections.lock().len(); let vc = VirtualConnection { connection_id, dst_accept, dst_sockets: [Chan::new(), Chan::new()], state: Mutex::new(ConnectionState { buffers: [NetworkBuffer::new(None), NetworkBuffer::new(Some(now))], rng, }), }; vc.schedule_timeout(self); vc.send_connect(self); let recv_chan = vc.dst_sockets[0].clone(); self.connections.lock().push(vc); TCP { net: self.clone(), conn_id: connection_id, dir: 0, recv_chan, } } } // private functions impl NetworkTask { /// Schedule to wakeup network task (self) `after_ms` later to deliver /// messages of connection `id`. fn schedule(&self, id: usize, after_ms: u64) { self.events.lock().push(Event { time: executor::now() + after_ms, conn_id: id, }); self.task_context.schedule_wakeup(after_ms); } /// Get locked connection `id`. fn get(&self, id: usize) -> MappedMutexGuard<'_, RawMutex, VirtualConnection> { MutexGuard::map(self.connections.lock(), |connections| { connections.get_mut(id).unwrap() }) } fn collect_pending_events(&self, now: u64, vec: &mut Vec<Event>) { vec.clear(); let mut events = self.events.lock(); while let Some(event) = events.peek() { if event.time > now { break; } let event = events.pop().unwrap(); vec.push(event); } } fn start(self: &Arc<Self>) { debug!("started network task"); let mut events = Vec::new(); loop { let now = executor::now(); self.collect_pending_events(now, &mut events); for event in events.drain(..) { let conn = self.get(event.conn_id); conn.process(self); } // block until wakeup executor::yield_me(-1); } } } // 0 - from node(0) to node(1) // 1 - from node(1) to node(0) type MessageDirection = u8; fn sender_str(dir: MessageDirection) -> &'static str { match dir { 0 => "client", 1 => "server", _ => unreachable!(), } } fn receiver_str(dir: MessageDirection) -> &'static str { match dir { 0 => "server", 1 => "client", _ => unreachable!(), } } /// Virtual connection between two nodes. /// Node 0 is the creator of the connection (client), /// and node 1 is the acceptor (server). struct VirtualConnection { connection_id: usize, /// one-off chan, used to deliver Accept message to dst dst_accept: Chan<NodeEvent>, /// message sinks dst_sockets: [Chan<NetEvent>; 2], state: Mutex<ConnectionState>, } struct ConnectionState { buffers: [NetworkBuffer; 2], rng: StdRng, } impl VirtualConnection { /// Notify the future about the possible timeout. fn schedule_timeout(&self, net: &NetworkTask) { if let Some(timeout) = net.options.keepalive_timeout { net.schedule(self.connection_id, timeout); } } /// Send the handshake (Accept) to the server. fn send_connect(&self, net: &NetworkTask) { let now = executor::now(); let mut state = self.state.lock(); let delay = net.options.connect_delay.delay(&mut state.rng); let buffer = &mut state.buffers[0]; assert!(buffer.buf.is_empty()); assert!(!buffer.recv_closed); assert!(!buffer.send_closed); assert!(buffer.last_recv.is_none()); let delay = if let Some(ms) = delay { ms } else { debug!("NET: TCP #{} dropped connect", self.connection_id); buffer.send_closed = true; return; }; // Send a message into the future. buffer .buf .push_back((now + delay, AnyMessage::InternalConnect)); net.schedule(self.connection_id, delay); } /// Transmit some of the messages from the buffer to the nodes. fn process(&self, net: &Arc<NetworkTask>) { let now = executor::now(); let mut state = self.state.lock(); for direction in 0..2 { self.process_direction( net, state.deref_mut(), now, direction as MessageDirection, &self.dst_sockets[direction ^ 1], ); } // Close the one side of the connection by timeout if the node // has not received any messages for a long time. if let Some(timeout) = net.options.keepalive_timeout { let mut to_close = [false, false]; for direction in 0..2 { let buffer = &mut state.buffers[direction]; if buffer.recv_closed { continue; } if let Some(last_recv) = buffer.last_recv { if now - last_recv >= timeout { debug!( "NET: connection {} timed out at {}", self.connection_id, receiver_str(direction as MessageDirection) ); let node_idx = direction ^ 1; to_close[node_idx] = true; } } } drop(state); for (node_idx, should_close) in to_close.iter().enumerate() { if *should_close { self.close(node_idx); } } } } /// Process messages in the buffer in the given direction. fn process_direction( &self, net: &Arc<NetworkTask>, state: &mut ConnectionState, now: u64, direction: MessageDirection, to_socket: &Chan<NetEvent>, ) { let buffer = &mut state.buffers[direction as usize]; if buffer.recv_closed { assert!(buffer.buf.is_empty()); } while !buffer.buf.is_empty() && buffer.buf.front().unwrap().0 <= now { let msg = buffer.buf.pop_front().unwrap().1; buffer.last_recv = Some(now); self.schedule_timeout(net); if let AnyMessage::InternalConnect = msg { // TODO: assert to_socket is the server let server_to_client = TCP { net: net.clone(), conn_id: self.connection_id, dir: direction ^ 1, recv_chan: to_socket.clone(), }; // special case, we need to deliver new connection to a separate channel self.dst_accept.send(NodeEvent::Accept(server_to_client)); } else { to_socket.send(NetEvent::Message(msg)); } } } /// Try to send a message to the buffer, optionally dropping it and /// determining delivery timestamp. fn send(&self, net: &NetworkTask, direction: MessageDirection, msg: AnyMessage) { let now = executor::now(); let mut state = self.state.lock(); let (delay, close) = if let Some(ms) = net.options.send_delay.delay(&mut state.rng) { (ms, false) } else { (0, true) }; let buffer = &mut state.buffers[direction as usize]; if buffer.send_closed { debug!( "NET: TCP #{} dropped message {:?} (broken pipe)", self.connection_id, msg ); return; } if close { debug!( "NET: TCP #{} dropped message {:?} (pipe just broke)", self.connection_id, msg ); buffer.send_closed = true; return; } if buffer.recv_closed { debug!( "NET: TCP #{} dropped message {:?} (recv closed)", self.connection_id, msg ); return; } // Send a message into the future. buffer.buf.push_back((now + delay, msg)); net.schedule(self.connection_id, delay); } /// Close the connection. Only one side of the connection will be closed, /// and no further messages will be delivered. The other side will not be notified. fn close(&self, node_idx: usize) { let mut state = self.state.lock(); let recv_buffer = &mut state.buffers[1 ^ node_idx]; if recv_buffer.recv_closed { debug!( "NET: TCP #{} closed twice at {}", self.connection_id, sender_str(node_idx as MessageDirection), ); return; } debug!( "NET: TCP #{} closed at {}", self.connection_id, sender_str(node_idx as MessageDirection), ); recv_buffer.recv_closed = true; for msg in recv_buffer.buf.drain(..) { debug!( "NET: TCP #{} dropped message {:?} (closed)", self.connection_id, msg ); } let send_buffer = &mut state.buffers[node_idx]; send_buffer.send_closed = true; drop(state); // TODO: notify the other side? self.dst_sockets[node_idx].send(NetEvent::Closed); } } struct NetworkBuffer { /// Messages paired with time of delivery buf: VecDeque<(u64, AnyMessage)>, /// True if the connection is closed on the receiving side, /// i.e. no more messages from the buffer will be delivered. recv_closed: bool, /// True if the connection is closed on the sending side, /// i.e. no more messages will be added to the buffer. send_closed: bool, /// Last time a message was delivered from the buffer. /// If None, it means that the server is the receiver and /// it has not yet aware of this connection (i.e. has not /// received the Accept). last_recv: Option<u64>, } impl NetworkBuffer { fn new(last_recv: Option<u64>) -> Self { Self { buf: VecDeque::new(), recv_closed: false, send_closed: false, last_recv, } } } /// Single end of a bidirectional network stream without reordering (TCP-like). /// Reads are implemented using channels, writes go to the buffer inside VirtualConnection. pub struct TCP { net: Arc<NetworkTask>, conn_id: usize, dir: MessageDirection, recv_chan: Chan<NetEvent>, } impl Debug for TCP { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "TCP #{} ({})", self.conn_id, sender_str(self.dir),) } } impl TCP { /// Send a message to the other side. It's guaranteed that it will not arrive /// before the arrival of all messages sent earlier. pub fn send(&self, msg: AnyMessage) { let conn = self.net.get(self.conn_id); conn.send(&self.net, self.dir, msg); } /// Get a channel to receive incoming messages. pub fn recv_chan(&self) -> Chan<NetEvent> { self.recv_chan.clone() } pub fn connection_id(&self) -> usize { self.conn_id } pub fn close(&self) { let conn = self.net.get(self.conn_id); conn.close(self.dir as usize); } } struct Event { time: u64, conn_id: usize, } // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here // to get that. impl PartialOrd for Event { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Event { fn cmp(&self, other: &Self) -> Ordering { (other.time, other.conn_id).cmp(&(self.time, self.conn_id)) } } impl PartialEq for Event { fn eq(&self, other: &Self) -> bool { (other.time, other.conn_id) == (self.time, self.conn_id) } } impl Eq for Event {}
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/time.rs
libs/desim/src/time.rs
use std::cmp::Ordering; use std::collections::BinaryHeap; use std::ops::DerefMut; use std::sync::Arc; use std::sync::atomic::{AtomicU32, AtomicU64}; use parking_lot::Mutex; use tracing::trace; use crate::executor::ThreadContext; /// Holds current time and all pending wakeup events. pub struct Timing { /// Current world's time. current_time: AtomicU64, /// Pending timers. queue: Mutex<BinaryHeap<Pending>>, /// Global nonce. Makes picking events from binary heap queue deterministic /// by appending a number to events with the same timestamp. nonce: AtomicU32, /// Used to schedule fake events. fake_context: Arc<ThreadContext>, } impl Default for Timing { fn default() -> Self { Self::new() } } impl Timing { /// Create a new empty clock with time set to 0. pub fn new() -> Timing { Timing { current_time: AtomicU64::new(0), queue: Mutex::new(BinaryHeap::new()), nonce: AtomicU32::new(0), fake_context: Arc::new(ThreadContext::new()), } } /// Return the current world's time. pub fn now(&self) -> u64 { self.current_time.load(std::sync::atomic::Ordering::SeqCst) } /// Tick-tock the global clock. Return the event ready to be processed /// or move the clock forward and then return the event. pub(crate) fn step(&self) -> Option<Arc<ThreadContext>> { let mut queue = self.queue.lock(); if queue.is_empty() { // no future events return None; } if !self.is_event_ready(queue.deref_mut()) { let next_time = queue.peek().unwrap().time; self.current_time .store(next_time, std::sync::atomic::Ordering::SeqCst); trace!("rewind time to {}", next_time); assert!(self.is_event_ready(queue.deref_mut())); } Some(queue.pop().unwrap().wake_context) } /// Append an event to the queue, to wakeup the thread in `ms` milliseconds. pub(crate) fn schedule_wakeup(&self, ms: u64, wake_context: Arc<ThreadContext>) { self.nonce.fetch_add(1, std::sync::atomic::Ordering::SeqCst); let nonce = self.nonce.load(std::sync::atomic::Ordering::SeqCst); self.queue.lock().push(Pending { time: self.now() + ms, nonce, wake_context, }) } /// Append a fake event to the queue, to prevent clocks from skipping this time. pub fn schedule_fake(&self, ms: u64) { self.queue.lock().push(Pending { time: self.now() + ms, nonce: 0, wake_context: self.fake_context.clone(), }); } /// Return true if there is a ready event. fn is_event_ready(&self, queue: &mut BinaryHeap<Pending>) -> bool { queue.peek().is_some_and(|x| x.time <= self.now()) } /// Clear all pending events. pub(crate) fn clear(&self) { self.queue.lock().clear(); } } struct Pending { time: u64, nonce: u32, wake_context: Arc<ThreadContext>, } // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here // to get that. impl PartialOrd for Pending { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Pending { fn cmp(&self, other: &Self) -> Ordering { (other.time, other.nonce).cmp(&(self.time, self.nonce)) } } impl PartialEq for Pending { fn eq(&self, other: &Self) -> bool { (other.time, other.nonce) == (self.time, self.nonce) } } impl Eq for Pending {}
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/executor.rs
libs/desim/src/executor.rs
use std::panic::AssertUnwindSafe; use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, Ordering}; use std::sync::{Arc, OnceLock, mpsc}; use std::thread::JoinHandle; use tracing::{debug, error, trace}; use crate::time::Timing; /// Stores status of the running threads. Threads are registered in the runtime upon creation /// and deregistered upon termination. pub struct Runtime { // stores handles to all threads that are currently running threads: Vec<ThreadHandle>, // stores current time and pending wakeups clock: Arc<Timing>, // thread counter thread_counter: AtomicU32, // Thread step counter -- how many times all threads has been actually // stepped (note that all world/time/executor/thread have slightly different // meaning of steps). For observability. pub step_counter: u64, } impl Runtime { /// Init new runtime, no running threads. pub fn new(clock: Arc<Timing>) -> Self { Self { threads: Vec::new(), clock, thread_counter: AtomicU32::new(0), step_counter: 0, } } /// Spawn a new thread and register it in the runtime. pub fn spawn<F>(&mut self, f: F) -> ExternalHandle where F: FnOnce() + Send + 'static, { let (tx, rx) = mpsc::channel(); let clock = self.clock.clone(); let tid = self.thread_counter.fetch_add(1, Ordering::SeqCst); debug!("spawning thread-{}", tid); let join = std::thread::spawn(move || { let _guard = tracing::info_span!("", tid).entered(); let res = std::panic::catch_unwind(AssertUnwindSafe(|| { with_thread_context(|ctx| { assert!(ctx.clock.set(clock).is_ok()); ctx.id.store(tid, Ordering::SeqCst); tx.send(ctx.clone()).expect("failed to send thread context"); // suspend thread to put it to `threads` in sleeping state ctx.yield_me(0); }); // start user-provided function f(); })); debug!("thread finished"); if let Err(e) = res { with_thread_context(|ctx| { if !ctx.allow_panic.load(std::sync::atomic::Ordering::SeqCst) { error!("thread panicked, terminating the process: {:?}", e); std::process::exit(1); } debug!("thread panicked: {:?}", e); let mut result = ctx.result.lock(); if result.0 == -1 { *result = (256, format!("thread panicked: {e:?}")); } }); } with_thread_context(|ctx| { ctx.finish_me(); }); }); let ctx = rx.recv().expect("failed to receive thread context"); let handle = ThreadHandle::new(ctx.clone(), join); self.threads.push(handle); ExternalHandle { ctx } } /// Returns true if there are any unfinished activity, such as running thread or pending events. /// Otherwise returns false, which means all threads are blocked forever. pub fn step(&mut self) -> bool { trace!("runtime step"); // have we run any thread? let mut ran = false; self.threads.retain(|thread: &ThreadHandle| { let res = thread.ctx.wakeup.compare_exchange( PENDING_WAKEUP, NO_WAKEUP, Ordering::SeqCst, Ordering::SeqCst, ); if res.is_err() { // thread has no pending wakeups, leaving as is return true; } ran = true; trace!("entering thread-{}", thread.ctx.tid()); let status = thread.step(); self.step_counter += 1; trace!( "out of thread-{} with status {:?}", thread.ctx.tid(), status ); if status == Status::Sleep { true } else { trace!("thread has finished"); // removing the thread from the list false } }); if !ran { trace!("no threads were run, stepping clock"); if let Some(ctx_to_wake) = self.clock.step() { trace!("waking up thread-{}", ctx_to_wake.tid()); ctx_to_wake.inc_wake(); } else { return false; } } true } /// Kill all threads. This is done by setting a flag in each thread context and waking it up. pub fn crash_all_threads(&mut self) { for thread in self.threads.iter() { thread.ctx.crash_stop(); } // all threads should be finished after a few steps while !self.threads.is_empty() { self.step(); } } } impl Drop for Runtime { fn drop(&mut self) { debug!("dropping the runtime"); self.crash_all_threads(); } } #[derive(Clone)] pub struct ExternalHandle { ctx: Arc<ThreadContext>, } impl ExternalHandle { /// Returns true if thread has finished execution. pub fn is_finished(&self) -> bool { let status = self.ctx.mutex.lock(); *status == Status::Finished } /// Returns exitcode and message, which is available after thread has finished execution. pub fn result(&self) -> (i32, String) { let result = self.ctx.result.lock(); result.clone() } /// Returns thread id. pub fn id(&self) -> u32 { self.ctx.id.load(Ordering::SeqCst) } /// Sets a flag to crash thread on the next wakeup. pub fn crash_stop(&self) { self.ctx.crash_stop(); } } struct ThreadHandle { ctx: Arc<ThreadContext>, _join: JoinHandle<()>, } impl ThreadHandle { /// Create a new [`ThreadHandle`] and wait until thread will enter [`Status::Sleep`] state. fn new(ctx: Arc<ThreadContext>, join: JoinHandle<()>) -> Self { let mut status = ctx.mutex.lock(); // wait until thread will go into the first yield while *status != Status::Sleep { ctx.condvar.wait(&mut status); } drop(status); Self { ctx, _join: join } } /// Allows thread to execute one step of its execution. /// Returns [`Status`] of the thread after the step. fn step(&self) -> Status { let mut status = self.ctx.mutex.lock(); assert!(matches!(*status, Status::Sleep)); *status = Status::Running; self.ctx.condvar.notify_all(); while *status == Status::Running { self.ctx.condvar.wait(&mut status); } *status } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Status { /// Thread is running. Running, /// Waiting for event to complete, will be resumed by the executor step, once wakeup flag is set. Sleep, /// Thread finished execution. Finished, } const NO_WAKEUP: u8 = 0; const PENDING_WAKEUP: u8 = 1; pub struct ThreadContext { id: AtomicU32, // used to block thread until it is woken up mutex: parking_lot::Mutex<Status>, condvar: parking_lot::Condvar, // used as a flag to indicate runtime that thread is ready to be woken up wakeup: AtomicU8, clock: OnceLock<Arc<Timing>>, // execution result, set by exit() call result: parking_lot::Mutex<(i32, String)>, // determines if process should be killed on receiving panic allow_panic: AtomicBool, // acts as a signal that thread should crash itself on the next wakeup crash_request: AtomicBool, } impl ThreadContext { pub(crate) fn new() -> Self { Self { id: AtomicU32::new(0), mutex: parking_lot::Mutex::new(Status::Running), condvar: parking_lot::Condvar::new(), wakeup: AtomicU8::new(NO_WAKEUP), clock: OnceLock::new(), result: parking_lot::Mutex::new((-1, String::new())), allow_panic: AtomicBool::new(false), crash_request: AtomicBool::new(false), } } } // Functions for executor to control thread execution. impl ThreadContext { /// Set atomic flag to indicate that thread is ready to be woken up. fn inc_wake(&self) { self.wakeup.store(PENDING_WAKEUP, Ordering::SeqCst); } /// Internal function used for event queues. pub(crate) fn schedule_wakeup(self: &Arc<Self>, after_ms: u64) { self.clock .get() .unwrap() .schedule_wakeup(after_ms, self.clone()); } fn tid(&self) -> u32 { self.id.load(Ordering::SeqCst) } fn crash_stop(&self) { let status = self.mutex.lock(); if *status == Status::Finished { debug!( "trying to crash thread-{}, which is already finished", self.tid() ); return; } assert!(matches!(*status, Status::Sleep)); drop(status); self.allow_panic.store(true, Ordering::SeqCst); self.crash_request.store(true, Ordering::SeqCst); // set a wakeup self.inc_wake(); // it will panic on the next wakeup } } // Internal functions. impl ThreadContext { /// Blocks thread until it's woken up by the executor. If `after_ms` is 0, is will be /// woken on the next step. If `after_ms` > 0, wakeup is scheduled after that time. /// Otherwise wakeup is not scheduled inside `yield_me`, and should be arranged before /// calling this function. fn yield_me(self: &Arc<Self>, after_ms: i64) { let mut status = self.mutex.lock(); assert!(matches!(*status, Status::Running)); match after_ms.cmp(&0) { std::cmp::Ordering::Less => { // block until something wakes us up } std::cmp::Ordering::Equal => { // tell executor that we are ready to be woken up self.inc_wake(); } std::cmp::Ordering::Greater => { // schedule wakeup self.clock .get() .unwrap() .schedule_wakeup(after_ms as u64, self.clone()); } } *status = Status::Sleep; self.condvar.notify_all(); // wait until executor wakes us up while *status != Status::Running { self.condvar.wait(&mut status); } if self.crash_request.load(Ordering::SeqCst) { panic!("crashed by request"); } } /// Called only once, exactly before thread finishes execution. fn finish_me(&self) { let mut status = self.mutex.lock(); assert!(matches!(*status, Status::Running)); *status = Status::Finished; { let mut result = self.result.lock(); if result.0 == -1 { *result = (0, "finished normally".to_owned()); } } self.condvar.notify_all(); } } /// Invokes the given closure with a reference to the current thread [`ThreadContext`]. #[inline(always)] fn with_thread_context<T>(f: impl FnOnce(&Arc<ThreadContext>) -> T) -> T { thread_local!(static THREAD_DATA: Arc<ThreadContext> = Arc::new(ThreadContext::new())); THREAD_DATA.with(f) } /// Waker is used to wake up threads that are blocked on condition. /// It keeps track of contexts [`Arc<ThreadContext>`] and can increment the counter /// of several contexts to send a notification. pub struct Waker { // contexts that are waiting for a notification contexts: parking_lot::Mutex<smallvec::SmallVec<[Arc<ThreadContext>; 8]>>, } impl Default for Waker { fn default() -> Self { Self::new() } } impl Waker { pub fn new() -> Self { Self { contexts: parking_lot::Mutex::new(smallvec::SmallVec::new()), } } /// Subscribe current thread to receive a wake notification later. pub fn wake_me_later(&self) { with_thread_context(|ctx| { self.contexts.lock().push(ctx.clone()); }); } /// Wake up all threads that are waiting for a notification and clear the list. pub fn wake_all(&self) { let mut v = self.contexts.lock(); for ctx in v.iter() { ctx.inc_wake(); } v.clear(); } } /// See [`ThreadContext::yield_me`]. pub fn yield_me(after_ms: i64) { with_thread_context(|ctx| ctx.yield_me(after_ms)) } /// Get current time. pub fn now() -> u64 { with_thread_context(|ctx| ctx.clock.get().unwrap().now()) } pub fn exit(code: i32, msg: String) -> ! { with_thread_context(|ctx| { ctx.allow_panic.store(true, Ordering::SeqCst); let mut result = ctx.result.lock(); *result = (code, msg); panic!("exit"); }) } pub(crate) fn get_thread_ctx() -> Arc<ThreadContext> { with_thread_context(|ctx| ctx.clone()) } /// Trait for polling channels until they have something. pub trait PollSome { /// Schedule wakeup for message arrival. fn wake_me(&self); /// Check if channel has a ready message. fn has_some(&self) -> bool; } /// Blocks current thread until one of the channels has a ready message. Returns /// index of the channel that has a message. If timeout is reached, returns None. /// /// Negative timeout means block forever. Zero timeout means check channels and return /// immediately. Positive timeout means block until timeout is reached. pub fn epoll_chans(chans: &[Box<dyn PollSome>], timeout: i64) -> Option<usize> { let deadline = if timeout < 0 { 0 } else { now() + timeout as u64 }; loop { for chan in chans { chan.wake_me() } for (i, chan) in chans.iter().enumerate() { if chan.has_some() { return Some(i); } } if timeout < 0 { // block until wakeup yield_me(-1); } else { let current_time = now(); if current_time >= deadline { return None; } yield_me((deadline - current_time) as i64); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/options.rs
libs/desim/src/options.rs
use rand::Rng; use rand::rngs::StdRng; /// Describes random delays and failures. Delay will be uniformly distributed in [min, max]. /// Connection failure will occur with the probablity fail_prob. #[derive(Clone, Debug)] pub struct Delay { pub min: u64, pub max: u64, pub fail_prob: f64, // [0; 1] } impl Delay { /// Create a struct with no delay, no failures. pub fn empty() -> Delay { Delay { min: 0, max: 0, fail_prob: 0.0, } } /// Create a struct with a fixed delay. pub fn fixed(ms: u64) -> Delay { Delay { min: ms, max: ms, fail_prob: 0.0, } } /// Generate a random delay in range [min, max]. Return None if the /// message should be dropped. pub fn delay(&self, rng: &mut StdRng) -> Option<u64> { if rng.random_bool(self.fail_prob) { return None; } Some(rng.random_range(self.min..=self.max)) } } /// Describes network settings. All network packets will be subjected to the same delays and failures. #[derive(Clone, Debug)] pub struct NetworkOptions { /// Connection will be automatically closed after this timeout if no data is received. pub keepalive_timeout: Option<u64>, /// New connections will be delayed by this amount of time. pub connect_delay: Delay, /// Each message will be delayed by this amount of time. pub send_delay: Delay, }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/node_os.rs
libs/desim/src/node_os.rs
use std::sync::Arc; use rand::Rng; use super::chan::Chan; use super::network::TCP; use super::world::{Node, NodeId, World}; use crate::proto::NodeEvent; /// Abstraction with all functions (aka syscalls) available to the node. #[derive(Clone)] pub struct NodeOs { world: Arc<World>, internal: Arc<Node>, } impl NodeOs { pub fn new(world: Arc<World>, internal: Arc<Node>) -> NodeOs { NodeOs { world, internal } } /// Get the node id. pub fn id(&self) -> NodeId { self.internal.id } /// Opens a bidirectional connection with the other node. Always successful. pub fn open_tcp(&self, dst: NodeId) -> TCP { self.world.open_tcp(dst) } /// Returns a channel to receive node events (socket Accept and internal messages). pub fn node_events(&self) -> Chan<NodeEvent> { self.internal.node_events() } /// Get current time. pub fn now(&self) -> u64 { self.world.now() } /// Generate a random number in range [0, max). pub fn random(&self, max: u64) -> u64 { self.internal.rng.lock().random_range(0..max) } /// Append a new event to the world event log. pub fn log_event(&self, data: String) { self.internal.log_event(data) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/src/proto.rs
libs/desim/src/proto.rs
use std::fmt::Debug; use bytes::Bytes; use utils::lsn::Lsn; use crate::network::TCP; use crate::world::NodeId; /// Internal node events. #[derive(Debug)] pub enum NodeEvent { Accept(TCP), Internal(AnyMessage), } /// Events that are coming from a network socket. #[derive(Clone, Debug)] pub enum NetEvent { Message(AnyMessage), Closed, } /// Custom events generated throughout the simulation. Can be used by the test to verify the correctness. #[derive(Debug)] pub struct SimEvent { pub time: u64, pub node: NodeId, pub data: String, } /// Umbrella type for all possible flavours of messages. These events can be sent over network /// or to an internal node events channel. #[derive(Clone)] pub enum AnyMessage { /// Not used, empty placeholder. None, /// Used internally for notifying node about new incoming connection. InternalConnect, Just32(u32), ReplCell(ReplCell), Bytes(Bytes), LSN(u64), } impl Debug for AnyMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { AnyMessage::None => write!(f, "None"), AnyMessage::InternalConnect => write!(f, "InternalConnect"), AnyMessage::Just32(v) => write!(f, "Just32({v})"), AnyMessage::ReplCell(v) => write!(f, "ReplCell({v:?})"), AnyMessage::Bytes(v) => write!(f, "Bytes({})", hex::encode(v)), AnyMessage::LSN(v) => write!(f, "LSN({})", Lsn(*v)), } } } /// Used in reliable_copy_test.rs #[derive(Clone, Debug)] pub struct ReplCell { pub value: u32, pub client_id: u32, pub seqno: u32, }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/desim/tests/reliable_copy_test.rs
libs/desim/tests/reliable_copy_test.rs
//! Simple test to verify that simulator is working. #[cfg(test)] mod reliable_copy_test { use std::sync::Arc; use anyhow::Result; use desim::executor::{self, PollSome}; use desim::node_os::NodeOs; use desim::options::{Delay, NetworkOptions}; use desim::proto::{AnyMessage, NetEvent, NodeEvent, ReplCell}; use desim::world::{NodeId, World}; use parking_lot::Mutex; use tracing::info; /// Disk storage trait and implementation. pub trait Storage<T> { fn flush_pos(&self) -> u32; fn flush(&mut self) -> Result<()>; fn write(&mut self, t: T); } #[derive(Clone)] pub struct SharedStorage<T> { pub state: Arc<Mutex<InMemoryStorage<T>>>, } impl<T> SharedStorage<T> { pub fn new() -> Self { Self { state: Arc::new(Mutex::new(InMemoryStorage::new())), } } } impl<T> Storage<T> for SharedStorage<T> { fn flush_pos(&self) -> u32 { self.state.lock().flush_pos } fn flush(&mut self) -> Result<()> { executor::yield_me(0); self.state.lock().flush() } fn write(&mut self, t: T) { executor::yield_me(0); self.state.lock().write(t); } } pub struct InMemoryStorage<T> { pub data: Vec<T>, pub flush_pos: u32, } impl<T> InMemoryStorage<T> { pub fn new() -> Self { Self { data: Vec::new(), flush_pos: 0, } } pub fn flush(&mut self) -> Result<()> { self.flush_pos = self.data.len() as u32; Ok(()) } pub fn write(&mut self, t: T) { self.data.push(t); } } /// Server implementation. pub fn run_server(os: NodeOs, mut storage: Box<dyn Storage<u32>>) { info!("started server"); let node_events = os.node_events(); let mut epoll_vec: Vec<Box<dyn PollSome>> = vec![Box::new(node_events.clone())]; let mut sockets = vec![]; loop { let index = executor::epoll_chans(&epoll_vec, -1).unwrap(); if index == 0 { let node_event = node_events.must_recv(); info!("got node event: {:?}", node_event); if let NodeEvent::Accept(tcp) = node_event { tcp.send(AnyMessage::Just32(storage.flush_pos())); epoll_vec.push(Box::new(tcp.recv_chan())); sockets.push(tcp); } continue; } let recv_chan = sockets[index - 1].recv_chan(); let socket = &sockets[index - 1]; let event = recv_chan.must_recv(); info!("got event: {:?}", event); if let NetEvent::Message(AnyMessage::ReplCell(cell)) = event { if cell.seqno != storage.flush_pos() { info!("got out of order data: {:?}", cell); continue; } storage.write(cell.value); storage.flush().unwrap(); socket.send(AnyMessage::Just32(storage.flush_pos())); } } } /// Client copies all data from array to the remote node. pub fn run_client(os: NodeOs, data: &[ReplCell], dst: NodeId) { info!("started client"); let mut delivered = 0; let mut sock = os.open_tcp(dst); let mut recv_chan = sock.recv_chan(); while delivered < data.len() { let num = &data[delivered]; info!("sending data: {:?}", num.clone()); sock.send(AnyMessage::ReplCell(num.clone())); // loop { let event = recv_chan.recv(); match event { NetEvent::Message(AnyMessage::Just32(flush_pos)) => { if flush_pos == 1 + delivered as u32 { delivered += 1; } } NetEvent::Closed => { info!("connection closed, reestablishing"); sock = os.open_tcp(dst); recv_chan = sock.recv_chan(); } _ => {} } // } } let sock = os.open_tcp(dst); for num in data { info!("sending data: {:?}", num.clone()); sock.send(AnyMessage::ReplCell(num.clone())); } info!("sent all data and finished client"); } /// Run test simulations. #[test] fn sim_example_reliable_copy() { utils::logging::init( utils::logging::LogFormat::Test, utils::logging::TracingErrorLayerEnablement::Disabled, utils::logging::Output::Stdout, ) .expect("logging init failed"); let delay = Delay { min: 1, max: 60, fail_prob: 0.4, }; let network = NetworkOptions { keepalive_timeout: Some(50), connect_delay: delay.clone(), send_delay: delay.clone(), }; for seed in 0..20 { let u32_data: [u32; 5] = [1, 2, 3, 4, 5]; let data = u32_to_cells(&u32_data, 1); let world = Arc::new(World::new(seed, Arc::new(network.clone()))); start_simulation(Options { world, time_limit: 1_000_000, client_fn: Box::new(move |os, server_id| run_client(os, &data, server_id)), u32_data, }); } } pub struct Options { pub world: Arc<World>, pub time_limit: u64, pub u32_data: [u32; 5], pub client_fn: Box<dyn FnOnce(NodeOs, u32) + Send + 'static>, } pub fn start_simulation(options: Options) { let world = options.world; let client_node = world.new_node(); let server_node = world.new_node(); let server_id = server_node.id; // start the client thread client_node.launch(move |os| { let client_fn = options.client_fn; client_fn(os, server_id); }); // start the server thread let shared_storage = SharedStorage::new(); let server_storage = shared_storage.clone(); server_node.launch(move |os| run_server(os, Box::new(server_storage))); while world.step() && world.now() < options.time_limit {} let disk_data = shared_storage.state.lock().data.clone(); assert!(verify_data(&disk_data, &options.u32_data[..])); } pub fn u32_to_cells(data: &[u32], client_id: u32) -> Vec<ReplCell> { let mut res = Vec::new(); for (i, _) in data.iter().enumerate() { res.push(ReplCell { client_id, seqno: i as u32, value: data[i], }); } res } fn verify_data(disk_data: &[u32], data: &[u32]) -> bool { if disk_data.len() != data.len() { return false; } for i in 0..data.len() { if disk_data[i] != data[i] { return false; } } true } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/metrics/src/wrappers.rs
libs/metrics/src/wrappers.rs
use std::io::{Read, Result, Write}; /// A wrapper for an object implementing [Read] /// which allows a closure to observe the amount of bytes read. /// This is useful in conjunction with metrics (e.g. [IntCounter](crate::IntCounter)). /// /// Example: /// /// ``` /// # use std::io::{Result, Read}; /// # use metrics::{register_int_counter, IntCounter}; /// # use metrics::CountedReader; /// # use once_cell::sync::Lazy; /// # /// # static INT_COUNTER: Lazy<IntCounter> = Lazy::new( || { register_int_counter!( /// # "int_counter", /// # "let's count something!" /// # ).unwrap() /// # }); /// # /// fn do_some_reads(stream: impl Read, count: usize) -> Result<Vec<u8>> { /// let mut reader = CountedReader::new(stream, |cnt| { /// // bump a counter each time we do a read /// INT_COUNTER.inc_by(cnt as u64); /// }); /// /// let mut proto_header = [0; 8]; /// reader.read_exact(&mut proto_header)?; /// assert!(&proto_header == b"deadbeef"); /// /// let mut payload = vec![0; count]; /// reader.read_exact(&mut payload)?; /// Ok(payload) /// } /// ``` /// /// NB: rapid concurrent bumping of an atomic counter might incur /// a performance penalty. Please make sure to amortize the amount /// of atomic operations by either using [BufReader](std::io::BufReader) /// or choosing a non-atomic (thread local) counter. pub struct CountedReader<'a, T> { reader: T, update_counter: Box<dyn FnMut(usize) + Sync + Send + 'a>, } impl<'a, T> CountedReader<'a, T> { pub fn new(reader: T, update_counter: impl FnMut(usize) + Sync + Send + 'a) -> Self { Self { reader, update_counter: Box::new(update_counter), } } /// Get an immutable reference to the underlying [Read] implementor pub fn inner(&self) -> &T { &self.reader } /// Get a mutable reference to the underlying [Read] implementor pub fn inner_mut(&mut self) -> &mut T { &mut self.reader } /// Consume the wrapper and return the underlying [Read] implementor pub fn into_inner(self) -> T { self.reader } } impl<T: Read> Read for CountedReader<'_, T> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { let count = self.reader.read(buf)?; (self.update_counter)(count); Ok(count) } } /// A wrapper for an object implementing [Write] /// which allows a closure to observe the amount of bytes written. /// This is useful in conjunction with metrics (e.g. [IntCounter](crate::IntCounter)). /// /// Example: /// /// ``` /// # use std::io::{Result, Write}; /// # use metrics::{register_int_counter, IntCounter}; /// # use metrics::CountedWriter; /// # use once_cell::sync::Lazy; /// # /// # static INT_COUNTER: Lazy<IntCounter> = Lazy::new( || { register_int_counter!( /// # "int_counter", /// # "let's count something!" /// # ).unwrap() /// # }); /// # /// fn do_some_writes(stream: impl Write, payload: &[u8]) -> Result<()> { /// let mut writer = CountedWriter::new(stream, |cnt| { /// // bump a counter each time we do a write /// INT_COUNTER.inc_by(cnt as u64); /// }); /// /// let proto_header = b"deadbeef"; /// writer.write_all(proto_header)?; /// writer.write_all(payload) /// } /// ``` /// /// NB: rapid concurrent bumping of an atomic counter might incur /// a performance penalty. Please make sure to amortize the amount /// of atomic operations by either using [BufWriter](std::io::BufWriter) /// or choosing a non-atomic (thread local) counter. pub struct CountedWriter<'a, T> { writer: T, update_counter: Box<dyn FnMut(usize) + Sync + Send + 'a>, } impl<'a, T> CountedWriter<'a, T> { pub fn new(writer: T, update_counter: impl FnMut(usize) + Sync + Send + 'a) -> Self { Self { writer, update_counter: Box::new(update_counter), } } /// Get an immutable reference to the underlying [Write] implementor pub fn inner(&self) -> &T { &self.writer } /// Get a mutable reference to the underlying [Write] implementor pub fn inner_mut(&mut self) -> &mut T { &mut self.writer } /// Consume the wrapper and return the underlying [Write] implementor pub fn into_inner(self) -> T { self.writer } } impl<T: Write> Write for CountedWriter<'_, T> { fn write(&mut self, buf: &[u8]) -> Result<usize> { let count = self.writer.write(buf)?; (self.update_counter)(count); Ok(count) } fn flush(&mut self) -> Result<()> { self.writer.flush() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_counted_reader() { let stream = [0; 16]; let mut total = 0; let mut reader = CountedReader::new(stream.as_ref(), |cnt| { total += cnt; }); let mut buffer = [0; 8]; reader.read_exact(&mut buffer).unwrap(); reader.read_exact(&mut buffer).unwrap(); drop(reader); assert_eq!(total, stream.len()); } #[test] fn test_counted_writer() { let mut stream = [0; 16]; let mut total = 0; let mut writer = CountedWriter::new(stream.as_mut(), |cnt| { total += cnt; }); let buffer = [0; 8]; writer.write_all(&buffer).unwrap(); writer.write_all(&buffer).unwrap(); drop(writer); assert_eq!(total, stream.len()); } // This mimics the constraints of std::thread::spawn fn assert_send_sync(_x: impl Sync + Send + 'static) {} #[test] fn test_send_sync_counted_reader() { let stream: &[u8] = &[]; let mut reader = CountedReader::new(stream, |_| {}); assert_send_sync(move || { reader.read_exact(&mut []).unwrap(); }); } #[test] fn test_send_sync_counted_writer() { let stream = Vec::<u8>::new(); let mut writer = CountedWriter::new(stream, |_| {}); assert_send_sync(move || { writer.write_all(&[]).unwrap(); }); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/metrics/src/lib.rs
libs/metrics/src/lib.rs
//! We re-export those from prometheus crate to //! make sure that we use the same dep version everywhere. //! Otherwise, we might not see all metrics registered via //! a default registry. #![deny(clippy::undocumented_unsafe_blocks)] use std::sync::RwLock; use measured::label::{LabelGroupSet, LabelGroupVisitor, LabelName, NoLabels}; use measured::metric::counter::CounterState; use measured::metric::gauge::GaugeState; use measured::metric::group::Encoding; use measured::metric::name::{MetricName, MetricNameEncoder}; use measured::metric::{MetricEncoding, MetricFamilyEncoding, MetricType}; use measured::{FixedCardinalityLabel, LabelGroup, MetricGroup}; use once_cell::sync::Lazy; use prometheus::Registry; use prometheus::core::{ Atomic, AtomicU64, Collector, GenericCounter, GenericCounterVec, GenericGauge, GenericGaugeVec, }; pub use prometheus::local::LocalHistogram; pub use prometheus::{ Counter, CounterVec, Encoder, Error, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, TextEncoder, core, default_registry, exponential_buckets, linear_buckets, opts, proto, register, register_counter_vec, register_gauge, register_gauge_vec, register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge, register_int_gauge_vec, }; pub mod launch_timestamp; mod wrappers; pub use prometheus; pub use wrappers::{CountedReader, CountedWriter}; mod hll; pub use hll::{HyperLogLog, HyperLogLogState, HyperLogLogVec}; #[cfg(target_os = "linux")] pub mod more_process_metrics; pub type UIntGauge = GenericGauge<AtomicU64>; pub type UIntGaugeVec = GenericGaugeVec<AtomicU64>; #[macro_export] macro_rules! register_uint_gauge_vec { ($NAME:expr, $HELP:expr, $LABELS_NAMES:expr $(,)?) => {{ let gauge_vec = UIntGaugeVec::new($crate::opts!($NAME, $HELP), $LABELS_NAMES).unwrap(); $crate::register(Box::new(gauge_vec.clone())).map(|_| gauge_vec) }}; } #[macro_export] macro_rules! register_uint_gauge { ($NAME:expr, $HELP:expr $(,)?) => {{ let gauge = $crate::UIntGauge::new($NAME, $HELP).unwrap(); $crate::register(Box::new(gauge.clone())).map(|_| gauge) }}; } /// Special internal registry, to collect metrics independently from the default registry. /// Was introduced to fix deadlock with lazy registration of metrics in the default registry. static INTERNAL_REGISTRY: Lazy<Registry> = Lazy::new(Registry::new); /// Register a collector in the internal registry. MUST be called before the first call to `gather()`. /// /// Otherwise, we can have a deadlock in the `gather()` call, trying to register a new collector /// while holding the lock. pub fn register_internal(c: Box<dyn Collector>) -> prometheus::Result<()> { INTERNAL_REGISTRY.register(c) } /// Gathers all Prometheus metrics and records the I/O stats just before that. /// /// Metrics gathering is a relatively simple and standalone operation, so /// it might be fine to do it this way to keep things simple. pub fn gather() -> Vec<prometheus::proto::MetricFamily> { update_rusage_metrics(); let mut mfs = prometheus::gather(); let mut internal_mfs = INTERNAL_REGISTRY.gather(); mfs.append(&mut internal_mfs); mfs } static DISK_IO_BYTES: Lazy<IntGaugeVec> = Lazy::new(|| { register_int_gauge_vec!( "libmetrics_disk_io_bytes_total", "Bytes written and read from disk, grouped by the operation (read|write)", &["io_operation"] ) .expect("Failed to register disk i/o bytes int gauge vec") }); static MAXRSS_KB: Lazy<IntGauge> = Lazy::new(|| { register_int_gauge!( "libmetrics_maxrss_kb", "Memory usage (Maximum Resident Set Size)" ) .expect("Failed to register maxrss_kb int gauge") }); /// Most common fsync latency is 50 µs - 100 µs, but it can be much higher, /// especially during many concurrent disk operations. pub const DISK_FSYNC_SECONDS_BUCKETS: &[f64] = &[0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 30.0]; /// Constructs histogram buckets that are powers of two starting at 1 (i.e. 2^0), covering the end /// points. For example, passing start=5,end=20 yields 4,8,16,32 as does start=4,end=32. pub fn pow2_buckets(start: usize, end: usize) -> Vec<f64> { assert_ne!(start, 0); assert!(start <= end); let start = match start.checked_next_power_of_two() { Some(n) if n == start => n, // start already power of two Some(n) => n >> 1, // power of two below start None => panic!("start too large"), }; let end = end.checked_next_power_of_two().expect("end too large"); std::iter::successors(Some(start), |n| n.checked_mul(2)) .take_while(|n| n <= &end) .map(|n| n as f64) .collect() } pub struct InfoMetric<L: LabelGroup, M: MetricType = GaugeState> { label: RwLock<L>, metric: M, } impl<L: LabelGroup> InfoMetric<L> { pub fn new(label: L) -> Self { Self::with_metric(label, GaugeState::new(1)) } } impl<L: LabelGroup + Default> Default for InfoMetric<L, GaugeState> { fn default() -> Self { InfoMetric::new(L::default()) } } impl<L: LabelGroup, M: MetricType<Metadata = ()>> InfoMetric<L, M> { pub fn with_metric(label: L, metric: M) -> Self { Self { label: RwLock::new(label), metric, } } pub fn set_label(&self, label: L) { *self.label.write().unwrap() = label; } } impl<L, M, E> MetricFamilyEncoding<E> for InfoMetric<L, M> where L: LabelGroup, M: MetricEncoding<E, Metadata = ()>, E: Encoding, { fn collect_family_into( &self, name: impl measured::metric::name::MetricNameEncoder, enc: &mut E, ) -> Result<(), E::Err> { M::write_type(&name, enc)?; self.metric .collect_into(&(), &*self.label.read().unwrap(), name, enc) } } pub struct BuildInfo { pub revision: &'static str, pub build_tag: &'static str, } impl LabelGroup for BuildInfo { fn visit_values(&self, v: &mut impl LabelGroupVisitor) { const REVISION: &LabelName = LabelName::from_str("revision"); v.write_value(REVISION, &self.revision); const BUILD_TAG: &LabelName = LabelName::from_str("build_tag"); v.write_value(BUILD_TAG, &self.build_tag); } } #[derive(MetricGroup)] #[metric(new(build_info: BuildInfo))] pub struct NeonMetrics { #[cfg(target_os = "linux")] #[metric(namespace = "process")] #[metric(init = measured_process::ProcessCollector::for_self())] process: measured_process::ProcessCollector, #[metric(namespace = "libmetrics")] #[metric(init = LibMetrics::new(build_info))] libmetrics: LibMetrics, } #[derive(MetricGroup)] #[metric(new(build_info: BuildInfo))] pub struct LibMetrics { #[metric(init = InfoMetric::new(build_info))] build_info: InfoMetric<BuildInfo>, #[metric(flatten)] rusage: Rusage, serve_count: CollectionCounter, } fn write_gauge<Enc: Encoding>( x: i64, labels: impl LabelGroup, name: impl MetricNameEncoder, enc: &mut Enc, ) -> Result<(), Enc::Err> where GaugeState: MetricEncoding<Enc>, { GaugeState::new(x).collect_into(&(), labels, name, enc) } #[derive(Default)] struct Rusage; #[derive(FixedCardinalityLabel, Clone, Copy)] #[label(singleton = "io_operation")] enum IoOp { Read, Write, } impl<T: Encoding> MetricGroup<T> for Rusage where GaugeState: MetricEncoding<T>, { fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> { const DISK_IO: &MetricName = MetricName::from_str("disk_io_bytes_total"); const MAXRSS: &MetricName = MetricName::from_str("maxrss_kb"); let ru = get_rusage_stats(); enc.write_help( DISK_IO, "Bytes written and read from disk, grouped by the operation (read|write)", )?; GaugeState::write_type(DISK_IO, enc)?; write_gauge(ru.ru_inblock * BYTES_IN_BLOCK, IoOp::Read, DISK_IO, enc)?; write_gauge(ru.ru_oublock * BYTES_IN_BLOCK, IoOp::Write, DISK_IO, enc)?; enc.write_help(MAXRSS, "Memory usage (Maximum Resident Set Size)")?; GaugeState::write_type(MAXRSS, enc)?; write_gauge(ru.ru_maxrss, IoOp::Read, MAXRSS, enc)?; Ok(()) } } #[derive(Default)] struct CollectionCounter(CounterState); impl<T: Encoding> MetricFamilyEncoding<T> for CollectionCounter where CounterState: MetricEncoding<T>, { fn collect_family_into( &self, name: impl measured::metric::name::MetricNameEncoder, enc: &mut T, ) -> Result<(), T::Err> { self.0.inc(); enc.write_help(&name, "Number of metric requests made")?; self.0.collect_into(&(), NoLabels, name, enc) } } pub fn set_build_info_metric(revision: &str, build_tag: &str) { let metric = register_int_gauge_vec!( "libmetrics_build_info", "Build/version information", &["revision", "build_tag"] ) .expect("Failed to register build info metric"); metric.with_label_values(&[revision, build_tag]).set(1); } const BYTES_IN_BLOCK: i64 = 512; // Records I/O stats in a "cross-platform" way. // Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats. // An alternative is to read procfs (`/proc/[pid]/io`) which does not work under macOS at all, hence abandoned. // // Uses https://www.freebsd.org/cgi/man.cgi?query=getrusage to retrieve the number of block operations // performed by the process. // We know the size of the block, so we can determine the I/O bytes out of it. // The value might be not 100% exact, but should be fine for Prometheus metrics in this case. fn update_rusage_metrics() { let rusage_stats = get_rusage_stats(); DISK_IO_BYTES .with_label_values(&["read"]) .set(rusage_stats.ru_inblock * BYTES_IN_BLOCK); DISK_IO_BYTES .with_label_values(&["write"]) .set(rusage_stats.ru_oublock * BYTES_IN_BLOCK); // On macOS, the unit of maxrss is bytes; on Linux, it's kilobytes. https://stackoverflow.com/a/59915669 #[cfg(target_os = "macos")] { MAXRSS_KB.set(rusage_stats.ru_maxrss / 1024); } #[cfg(not(target_os = "macos"))] { MAXRSS_KB.set(rusage_stats.ru_maxrss); } } fn get_rusage_stats() -> libc::rusage { let mut rusage = std::mem::MaybeUninit::uninit(); // SAFETY: kernel will initialize the struct for us unsafe { let ret = libc::getrusage(libc::RUSAGE_SELF, rusage.as_mut_ptr()); assert!(ret == 0, "getrusage failed: bad args"); rusage.assume_init() } } /// Create an [`IntCounterPairVec`] and registers to default registry. #[macro_export(local_inner_macros)] macro_rules! register_int_counter_pair_vec { ($NAME1:expr, $HELP1:expr, $NAME2:expr, $HELP2:expr, $LABELS_NAMES:expr $(,)?) => {{ match ( $crate::register_int_counter_vec!($NAME1, $HELP1, $LABELS_NAMES), $crate::register_int_counter_vec!($NAME2, $HELP2, $LABELS_NAMES), ) { (Ok(inc), Ok(dec)) => Ok($crate::IntCounterPairVec::new(inc, dec)), (Err(e), _) | (_, Err(e)) => Err(e), } }}; } /// Create an [`IntCounterPair`] and registers to default registry. #[macro_export(local_inner_macros)] macro_rules! register_int_counter_pair { ($NAME1:expr, $HELP1:expr, $NAME2:expr, $HELP2:expr $(,)?) => {{ match ( $crate::register_int_counter!($NAME1, $HELP1), $crate::register_int_counter!($NAME2, $HELP2), ) { (Ok(inc), Ok(dec)) => Ok($crate::IntCounterPair::new(inc, dec)), (Err(e), _) | (_, Err(e)) => Err(e), } }}; } /// A Pair of [`GenericCounterVec`]s. Like an [`GenericGaugeVec`] but will always observe changes pub struct GenericCounterPairVec<P: Atomic> { inc: GenericCounterVec<P>, dec: GenericCounterVec<P>, } /// A Pair of [`GenericCounter`]s. Like an [`GenericGauge`] but will always observe changes pub struct GenericCounterPair<P: Atomic> { inc: GenericCounter<P>, dec: GenericCounter<P>, } impl<P: Atomic> GenericCounterPairVec<P> { pub fn new(inc: GenericCounterVec<P>, dec: GenericCounterVec<P>) -> Self { Self { inc, dec } } /// `get_metric_with_label_values` returns the [`GenericCounterPair<P>`] for the given slice /// of label values (same order as the VariableLabels in Desc). If that combination of /// label values is accessed for the first time, a new [`GenericCounterPair<P>`] is created. /// /// An error is returned if the number of label values is not the same as the /// number of VariableLabels in Desc. pub fn get_metric_with_label_values( &self, vals: &[&str], ) -> prometheus::Result<GenericCounterPair<P>> { Ok(GenericCounterPair { inc: self.inc.get_metric_with_label_values(vals)?, dec: self.dec.get_metric_with_label_values(vals)?, }) } /// `with_label_values` works as `get_metric_with_label_values`, but panics if an error /// occurs. pub fn with_label_values(&self, vals: &[&str]) -> GenericCounterPair<P> { self.get_metric_with_label_values(vals).unwrap() } pub fn remove_label_values(&self, res: &mut [prometheus::Result<()>; 2], vals: &[&str]) { res[0] = self.inc.remove_label_values(vals); res[1] = self.dec.remove_label_values(vals); } } impl<P: Atomic> GenericCounterPair<P> { pub fn new(inc: GenericCounter<P>, dec: GenericCounter<P>) -> Self { Self { inc, dec } } /// Increment the gauge by 1, returning a guard that decrements by 1 on drop. pub fn guard(&self) -> GenericCounterPairGuard<P> { self.inc.inc(); GenericCounterPairGuard(self.dec.clone()) } /// Increment the gauge by n, returning a guard that decrements by n on drop. pub fn guard_by(&self, n: P::T) -> GenericCounterPairGuardBy<P> { self.inc.inc_by(n); GenericCounterPairGuardBy(self.dec.clone(), n) } /// Increase the gauge by 1. #[inline] pub fn inc(&self) { self.inc.inc(); } /// Decrease the gauge by 1. #[inline] pub fn dec(&self) { self.dec.inc(); } /// Add the given value to the gauge. (The value can be /// negative, resulting in a decrement of the gauge.) #[inline] pub fn inc_by(&self, v: P::T) { self.inc.inc_by(v); } /// Subtract the given value from the gauge. (The value can be /// negative, resulting in an increment of the gauge.) #[inline] pub fn dec_by(&self, v: P::T) { self.dec.inc_by(v); } } impl<P: Atomic> Clone for GenericCounterPair<P> { fn clone(&self) -> Self { Self { inc: self.inc.clone(), dec: self.dec.clone(), } } } /// Guard returned by [`GenericCounterPair::guard`] pub struct GenericCounterPairGuard<P: Atomic>(GenericCounter<P>); impl<P: Atomic> Drop for GenericCounterPairGuard<P> { fn drop(&mut self) { self.0.inc(); } } /// Guard returned by [`GenericCounterPair::guard_by`] pub struct GenericCounterPairGuardBy<P: Atomic>(GenericCounter<P>, P::T); impl<P: Atomic> Drop for GenericCounterPairGuardBy<P> { fn drop(&mut self) { self.0.inc_by(self.1); } } /// A Pair of [`IntCounterVec`]s. Like an [`IntGaugeVec`] but will always observe changes pub type IntCounterPairVec = GenericCounterPairVec<AtomicU64>; /// A Pair of [`IntCounter`]s. Like an [`IntGauge`] but will always observe changes pub type IntCounterPair = GenericCounterPair<AtomicU64>; /// A guard for [`IntCounterPair`] that will decrement the gauge on drop pub type IntCounterPairGuard = GenericCounterPairGuard<AtomicU64>; pub trait CounterPairAssoc { const INC_NAME: &'static MetricName; const DEC_NAME: &'static MetricName; const INC_HELP: &'static str; const DEC_HELP: &'static str; type LabelGroupSet: LabelGroupSet; } pub struct CounterPairVec<A: CounterPairAssoc> { vec: measured::metric::MetricVec<MeasuredCounterPairState, A::LabelGroupSet>, } impl<A: CounterPairAssoc> Default for CounterPairVec<A> where A::LabelGroupSet: Default, { fn default() -> Self { Self { vec: Default::default(), } } } impl<A: CounterPairAssoc> CounterPairVec<A> { pub fn guard( &self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>, ) -> MeasuredCounterPairGuard<'_, A> { let id = self.vec.with_labels(labels); self.vec.get_metric(id).inc.inc(); MeasuredCounterPairGuard { vec: &self.vec, id } } pub fn inc(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) { let id = self.vec.with_labels(labels); self.vec.get_metric(id).inc.inc(); } pub fn dec(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) { let id = self.vec.with_labels(labels); self.vec.get_metric(id).dec.inc(); } pub fn remove_metric( &self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>, ) -> Option<MeasuredCounterPairState> { let id = self.vec.with_labels(labels); self.vec.remove_metric(id) } pub fn sample(&self, labels: <A::LabelGroupSet as LabelGroupSet>::Group<'_>) -> u64 { let id = self.vec.with_labels(labels); let metric = self.vec.get_metric(id); let inc = metric.inc.count.load(std::sync::atomic::Ordering::Relaxed); let dec = metric.dec.count.load(std::sync::atomic::Ordering::Relaxed); inc.saturating_sub(dec) } } impl<T, A> ::measured::metric::group::MetricGroup<T> for CounterPairVec<A> where T: ::measured::metric::group::Encoding, A: CounterPairAssoc, ::measured::metric::counter::CounterState: ::measured::metric::MetricEncoding<T>, { fn collect_group_into(&self, enc: &mut T) -> Result<(), T::Err> { // write decrement first to avoid a race condition where inc - dec < 0 T::write_help(enc, A::DEC_NAME, A::DEC_HELP)?; self.vec .collect_family_into(A::DEC_NAME, &mut Dec(&mut *enc))?; T::write_help(enc, A::INC_NAME, A::INC_HELP)?; self.vec .collect_family_into(A::INC_NAME, &mut Inc(&mut *enc))?; Ok(()) } } #[derive(MetricGroup, Default)] pub struct MeasuredCounterPairState { pub inc: CounterState, pub dec: CounterState, } impl measured::metric::MetricType for MeasuredCounterPairState { type Metadata = (); } pub struct MeasuredCounterPairGuard<'a, A: CounterPairAssoc> { vec: &'a measured::metric::MetricVec<MeasuredCounterPairState, A::LabelGroupSet>, id: measured::metric::LabelId<A::LabelGroupSet>, } impl<A: CounterPairAssoc> Drop for MeasuredCounterPairGuard<'_, A> { fn drop(&mut self) { self.vec.get_metric(self.id).dec.inc(); } } /// [`MetricEncoding`] for [`MeasuredCounterPairState`] that only writes the inc counter to the inner encoder. struct Inc<T>(T); /// [`MetricEncoding`] for [`MeasuredCounterPairState`] that only writes the dec counter to the inner encoder. struct Dec<T>(T); impl<T: Encoding> Encoding for Inc<T> { type Err = T::Err; fn write_help(&mut self, name: impl MetricNameEncoder, help: &str) -> Result<(), Self::Err> { self.0.write_help(name, help) } } impl<T: Encoding> MetricEncoding<Inc<T>> for MeasuredCounterPairState where CounterState: MetricEncoding<T>, { fn write_type(name: impl MetricNameEncoder, enc: &mut Inc<T>) -> Result<(), T::Err> { CounterState::write_type(name, &mut enc.0) } fn collect_into( &self, metadata: &(), labels: impl LabelGroup, name: impl MetricNameEncoder, enc: &mut Inc<T>, ) -> Result<(), T::Err> { self.inc.collect_into(metadata, labels, name, &mut enc.0) } } impl<T: Encoding> Encoding for Dec<T> { type Err = T::Err; fn write_help(&mut self, name: impl MetricNameEncoder, help: &str) -> Result<(), Self::Err> { self.0.write_help(name, help) } } /// Write the dec counter to the encoder impl<T: Encoding> MetricEncoding<Dec<T>> for MeasuredCounterPairState where CounterState: MetricEncoding<T>, { fn write_type(name: impl MetricNameEncoder, enc: &mut Dec<T>) -> Result<(), T::Err> { CounterState::write_type(name, &mut enc.0) } fn collect_into( &self, metadata: &(), labels: impl LabelGroup, name: impl MetricNameEncoder, enc: &mut Dec<T>, ) -> Result<(), T::Err> { self.dec.collect_into(metadata, labels, name, &mut enc.0) } } #[cfg(test)] mod tests { use super::*; const POW2_BUCKETS_MAX: usize = 1 << (usize::BITS - 1); #[test] fn pow2_buckets_cases() { assert_eq!(pow2_buckets(1, 1), vec![1.0]); assert_eq!(pow2_buckets(1, 2), vec![1.0, 2.0]); assert_eq!(pow2_buckets(1, 3), vec![1.0, 2.0, 4.0]); assert_eq!(pow2_buckets(1, 4), vec![1.0, 2.0, 4.0]); assert_eq!(pow2_buckets(1, 5), vec![1.0, 2.0, 4.0, 8.0]); assert_eq!(pow2_buckets(1, 6), vec![1.0, 2.0, 4.0, 8.0]); assert_eq!(pow2_buckets(1, 7), vec![1.0, 2.0, 4.0, 8.0]); assert_eq!(pow2_buckets(1, 8), vec![1.0, 2.0, 4.0, 8.0]); assert_eq!( pow2_buckets(1, 200), vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0] ); assert_eq!(pow2_buckets(1, 8), vec![1.0, 2.0, 4.0, 8.0]); assert_eq!(pow2_buckets(2, 8), vec![2.0, 4.0, 8.0]); assert_eq!(pow2_buckets(3, 8), vec![2.0, 4.0, 8.0]); assert_eq!(pow2_buckets(4, 8), vec![4.0, 8.0]); assert_eq!(pow2_buckets(5, 8), vec![4.0, 8.0]); assert_eq!(pow2_buckets(6, 8), vec![4.0, 8.0]); assert_eq!(pow2_buckets(7, 8), vec![4.0, 8.0]); assert_eq!(pow2_buckets(8, 8), vec![8.0]); assert_eq!(pow2_buckets(20, 200), vec![16.0, 32.0, 64.0, 128.0, 256.0]); // Largest valid values. assert_eq!( pow2_buckets(1, POW2_BUCKETS_MAX).len(), usize::BITS as usize ); assert_eq!(pow2_buckets(POW2_BUCKETS_MAX, POW2_BUCKETS_MAX).len(), 1); } #[test] #[should_panic] fn pow2_buckets_zero_start() { pow2_buckets(0, 1); } #[test] #[should_panic] fn pow2_buckets_end_lt_start() { pow2_buckets(2, 1); } #[test] #[should_panic] fn pow2_buckets_end_overflow_min() { pow2_buckets(1, POW2_BUCKETS_MAX + 1); } #[test] #[should_panic] fn pow2_buckets_end_overflow_max() { pow2_buckets(1, usize::MAX); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/metrics/src/more_process_metrics.rs
libs/metrics/src/more_process_metrics.rs
//! process metrics that the [`::prometheus`] crate doesn't provide. // This module has heavy inspiration from the prometheus crate's `process_collector.rs`. use once_cell::sync::Lazy; use prometheus::Gauge; use crate::UIntGauge; pub struct Collector { descs: Vec<prometheus::core::Desc>, vmlck: crate::UIntGauge, cpu_seconds_highres: Gauge, } const NMETRICS: usize = 2; static CLK_TCK_F64: Lazy<f64> = Lazy::new(|| { // SAFETY: libc::sysconf is safe, it merely returns a value. let long = unsafe { libc::sysconf(libc::_SC_CLK_TCK) }; if long == -1 { panic!("sysconf(_SC_CLK_TCK) failed"); } let convertible_to_f64: i32 = i32::try_from(long).expect("sysconf(_SC_CLK_TCK) is larger than i32"); convertible_to_f64 as f64 }); impl prometheus::core::Collector for Collector { fn desc(&self) -> Vec<&prometheus::core::Desc> { self.descs.iter().collect() } fn collect(&self) -> Vec<prometheus::proto::MetricFamily> { let Ok(myself) = procfs::process::Process::myself() else { return vec![]; }; let mut mfs = Vec::with_capacity(NMETRICS); if let Ok(status) = myself.status() { if let Some(vmlck) = status.vmlck { self.vmlck.set(vmlck); mfs.extend(self.vmlck.collect()) } } if let Ok(stat) = myself.stat() { let cpu_seconds = stat.utime + stat.stime; self.cpu_seconds_highres .set(cpu_seconds as f64 / *CLK_TCK_F64); mfs.extend(self.cpu_seconds_highres.collect()); } mfs } } impl Collector { pub fn new() -> Self { let mut descs = Vec::new(); let vmlck = UIntGauge::new("libmetrics_process_status_vmlck", "/proc/self/status vmlck").unwrap(); descs.extend( prometheus::core::Collector::desc(&vmlck) .into_iter() .cloned(), ); let cpu_seconds_highres = Gauge::new( "libmetrics_process_cpu_seconds_highres", "Total user and system CPU time spent in seconds.\ Sub-second resolution, hence better than `process_cpu_seconds_total`.", ) .unwrap(); descs.extend( prometheus::core::Collector::desc(&cpu_seconds_highres) .into_iter() .cloned(), ); Self { descs, vmlck, cpu_seconds_highres, } } } impl Default for Collector { fn default() -> Self { Self::new() } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/metrics/src/hll.rs
libs/metrics/src/hll.rs
//! HyperLogLog is an algorithm for the count-distinct problem, //! approximating the number of distinct elements in a multiset. //! Calculating the exact cardinality of the distinct elements //! of a multiset requires an amount of memory proportional to //! the cardinality, which is impractical for very large data sets. //! Probabilistic cardinality estimators, such as the HyperLogLog algorithm, //! use significantly less memory than this, but can only approximate the cardinality. use std::hash::{BuildHasher, BuildHasherDefault, Hash}; use std::sync::atomic::AtomicU8; use measured::LabelGroup; use measured::label::{LabelGroupVisitor, LabelName, LabelValue, LabelVisitor}; use measured::metric::counter::CounterState; use measured::metric::name::MetricNameEncoder; use measured::metric::{Metric, MetricType, MetricVec}; use measured::text::TextEncoder; use twox_hash::xxh3; /// Create an [`HyperLogLogVec`] and registers to default registry. #[macro_export(local_inner_macros)] macro_rules! register_hll_vec { ($N:literal, $OPTS:expr, $LABELS_NAMES:expr $(,)?) => {{ let hll_vec = $crate::HyperLogLogVec::<$N>::new($OPTS, $LABELS_NAMES).unwrap(); $crate::register(Box::new(hll_vec.clone())).map(|_| hll_vec) }}; ($N:literal, $NAME:expr, $HELP:expr, $LABELS_NAMES:expr $(,)?) => {{ $crate::register_hll_vec!($N, $crate::opts!($NAME, $HELP), $LABELS_NAMES) }}; } /// Create an [`HyperLogLog`] and registers to default registry. #[macro_export(local_inner_macros)] macro_rules! register_hll { ($N:literal, $OPTS:expr $(,)?) => {{ let hll = $crate::HyperLogLog::<$N>::with_opts($OPTS).unwrap(); $crate::register(Box::new(hll.clone())).map(|_| hll) }}; ($N:literal, $NAME:expr, $HELP:expr $(,)?) => {{ $crate::register_hll!($N, $crate::opts!($NAME, $HELP)) }}; } /// HLL is a probabilistic cardinality measure. /// /// How to use this time-series for a metric name `my_metrics_total_hll`: /// /// ```promql /// # harmonic mean /// 1 / ( /// sum ( /// 2 ^ -( /// # HLL merge operation /// max (my_metrics_total_hll{}) by (hll_shard, other_labels...) /// ) /// ) without (hll_shard) /// ) /// * alpha /// * shards_count /// * shards_count /// ``` /// /// If you want an estimate over time, you can use the following query: /// /// ```promql /// # harmonic mean /// 1 / ( /// sum ( /// 2 ^ -( /// # HLL merge operation /// max ( /// max_over_time(my_metrics_total_hll{}[$__rate_interval]) /// ) by (hll_shard, other_labels...) /// ) /// ) without (hll_shard) /// ) /// * alpha /// * shards_count /// * shards_count /// ``` /// /// In the case of low cardinality, you might want to use the linear counting approximation: /// /// ```promql /// # LinearCounting(m, V) = m log (m / V) /// shards_count * ln(shards_count / /// # calculate V = how many shards contain a 0 /// count(max (proxy_connecting_endpoints{}) by (hll_shard, protocol) == 0) without (hll_shard) /// ) /// ``` /// /// See <https://en.wikipedia.org/wiki/HyperLogLog#Practical_considerations> for estimates on alpha pub type HyperLogLogVec<L, const N: usize> = MetricVec<HyperLogLogState<N>, L>; pub type HyperLogLog<const N: usize> = Metric<HyperLogLogState<N>>; pub struct HyperLogLogState<const N: usize> { shards: [AtomicU8; N], } impl<const N: usize> Default for HyperLogLogState<N> { fn default() -> Self { #[allow(clippy::declare_interior_mutable_const)] const ZERO: AtomicU8 = AtomicU8::new(0); Self { shards: [ZERO; N] } } } impl<const N: usize> MetricType for HyperLogLogState<N> { type Metadata = (); } impl<const N: usize> HyperLogLogState<N> { pub fn measure(&self, item: &(impl Hash + ?Sized)) { // changing the hasher will break compatibility with previous measurements. self.record(BuildHasherDefault::<xxh3::Hash64>::default().hash_one(item)); } fn record(&self, hash: u64) { let p = N.ilog2() as u8; let j = hash & (N as u64 - 1); let rho = (hash >> p).leading_zeros() as u8 + 1 - p; self.shards[j as usize].fetch_max(rho, std::sync::atomic::Ordering::Relaxed); } fn take_sample(&self) -> [u8; N] { self.shards.each_ref().map(|x| { // We reset the counter to 0 so we can perform a cardinality measure over any time slice in prometheus. // This seems like it would be a race condition, // but HLL is not impacted by a write in one shard happening in between. // This is because in PromQL we will be implementing a harmonic mean of all buckets. // we will also merge samples in a time series using `max by (hll_shard)`. // TODO: maybe we shouldn't reset this on every collect, instead, only after a time window. // this would mean that a dev port-forwarding the metrics url won't break the sampling. x.swap(0, std::sync::atomic::Ordering::Relaxed) }) } } impl<W: std::io::Write, const N: usize> measured::metric::MetricEncoding<TextEncoder<W>> for HyperLogLogState<N> { fn write_type( name: impl MetricNameEncoder, enc: &mut TextEncoder<W>, ) -> Result<(), std::io::Error> { enc.write_type(&name, measured::text::MetricType::Gauge) } fn collect_into( &self, _: &(), labels: impl LabelGroup, name: impl MetricNameEncoder, enc: &mut TextEncoder<W>, ) -> Result<(), std::io::Error> { struct I64(i64); impl LabelValue for I64 { fn visit<V: LabelVisitor>(&self, v: V) -> V::Output { v.write_int(self.0) } } struct HllShardLabel { hll_shard: i64, } impl LabelGroup for HllShardLabel { fn visit_values(&self, v: &mut impl LabelGroupVisitor) { const LE: &LabelName = LabelName::from_str("hll_shard"); v.write_value(LE, &I64(self.hll_shard)); } } self.take_sample() .into_iter() .enumerate() .try_for_each(|(hll_shard, val)| { CounterState::new(val as u64).collect_into( &(), labels.by_ref().compose_with(HllShardLabel { hll_shard: hll_shard as i64, }), name.by_ref(), enc, ) }) } } #[cfg(test)] mod tests { use std::collections::HashSet; use measured::FixedCardinalityLabel; use measured::label::StaticLabelSet; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use rand_distr::{Distribution, Zipf}; use crate::HyperLogLogVec; #[derive(FixedCardinalityLabel, Clone, Copy)] #[label(singleton = "x")] enum Label { A, B, } fn collect(hll: &HyperLogLogVec<StaticLabelSet<Label>, 32>) -> ([u8; 32], [u8; 32]) { // cannot go through the `hll.collect_family_into` interface yet... // need to see if I can fix the conflicting impls problem in measured. ( hll.get_metric(hll.with_labels(Label::A)).take_sample(), hll.get_metric(hll.with_labels(Label::B)).take_sample(), ) } fn get_cardinality(samples: &[[u8; 32]]) -> f64 { let mut buckets = [0.0; 32]; for &sample in samples { for (i, m) in sample.into_iter().enumerate() { buckets[i] = f64::max(buckets[i], m as f64); } } buckets .into_iter() .map(|f| 2.0f64.powf(-f)) .sum::<f64>() .recip() * 0.697 * 32.0 * 32.0 } fn test_cardinality(n: usize, dist: impl Distribution<f64>) -> ([usize; 3], [f64; 3]) { let hll = HyperLogLogVec::<StaticLabelSet<Label>, 32>::new(); let mut iter = StdRng::seed_from_u64(0x2024_0112).sample_iter(dist); let mut set_a = HashSet::new(); let mut set_b = HashSet::new(); for x in iter.by_ref().take(n) { set_a.insert(x.to_bits()); hll.get_metric(hll.with_labels(Label::A)) .measure(&x.to_bits()); } for x in iter.by_ref().take(n) { set_b.insert(x.to_bits()); hll.get_metric(hll.with_labels(Label::B)) .measure(&x.to_bits()); } let merge = &set_a | &set_b; let (a, b) = collect(&hll); let len = get_cardinality(&[a, b]); let len_a = get_cardinality(&[a]); let len_b = get_cardinality(&[b]); ([merge.len(), set_a.len(), set_b.len()], [len, len_a, len_b]) } #[test] fn test_cardinality_small() { let (actual, estimate) = test_cardinality(100, Zipf::new(100.0, 1.2f64).unwrap()); assert_eq!(actual, [46, 30, 32]); assert!(51.3 < estimate[0] && estimate[0] < 51.4); assert!(44.0 < estimate[1] && estimate[1] < 44.1); assert!(39.0 < estimate[2] && estimate[2] < 39.1); } #[test] fn test_cardinality_medium() { let (actual, estimate) = test_cardinality(10000, Zipf::new(10000.0, 1.2f64).unwrap()); assert_eq!(actual, [2529, 1618, 1629]); assert!(2309.1 < estimate[0] && estimate[0] < 2309.2); assert!(1566.6 < estimate[1] && estimate[1] < 1566.7); assert!(1629.5 < estimate[2] && estimate[2] < 1629.6); } #[test] fn test_cardinality_large() { let (actual, estimate) = test_cardinality(1_000_000, Zipf::new(1_000_000.0, 1.2f64).unwrap()); assert_eq!(actual, [129077, 79579, 79630]); assert!(126067.2 < estimate[0] && estimate[0] < 126067.3); assert!(83076.8 < estimate[1] && estimate[1] < 83076.9); assert!(64251.2 < estimate[2] && estimate[2] < 64251.3); } #[test] fn test_cardinality_small2() { let (actual, estimate) = test_cardinality(100, Zipf::new(200.0, 0.8f64).unwrap()); assert_eq!(actual, [92, 58, 60]); assert!(116.1 < estimate[0] && estimate[0] < 116.2); assert!(81.7 < estimate[1] && estimate[1] < 81.8); assert!(69.3 < estimate[2] && estimate[2] < 69.4); } #[test] fn test_cardinality_medium2() { let (actual, estimate) = test_cardinality(10000, Zipf::new(20000.0, 0.8f64).unwrap()); assert_eq!(actual, [8201, 5131, 5051]); assert!(6846.4 < estimate[0] && estimate[0] < 6846.5); assert!(5239.1 < estimate[1] && estimate[1] < 5239.2); assert!(4292.8 < estimate[2] && estimate[2] < 4292.9); } #[test] fn test_cardinality_large2() { let (actual, estimate) = test_cardinality(1_000_000, Zipf::new(2_000_000.0, 0.8f64).unwrap()); assert_eq!(actual, [777847, 482069, 482246]); assert!(699437.4 < estimate[0] && estimate[0] < 699437.5); assert!(374948.9 < estimate[1] && estimate[1] < 374949.0); assert!(434609.7 < estimate[2] && estimate[2] < 434609.8); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/metrics/src/launch_timestamp.rs
libs/metrics/src/launch_timestamp.rs
//! A timestamp captured at process startup to identify restarts of the process, e.g., in logs and metrics. use std::fmt::Display; use chrono::Utc; use super::register_uint_gauge; pub struct LaunchTimestamp(chrono::DateTime<Utc>); impl LaunchTimestamp { pub fn generate() -> Self { LaunchTimestamp(Utc::now()) } } impl Display for LaunchTimestamp { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } pub fn set_launch_timestamp_metric(launch_ts: &'static LaunchTimestamp) { let millis_since_epoch: u64 = launch_ts .0 .timestamp_millis() .try_into() .expect("we're after the epoch, this should be positive"); let metric = register_uint_gauge!( "libmetrics_launch_timestamp", "Timestamp (millis since epoch) at wich the process launched." ) .unwrap(); metric.set(millis_since_epoch); }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_initdb/src/lib.rs
libs/postgres_initdb/src/lib.rs
//! The canonical way we run `initdb` in Neon. //! //! initdb has implicit defaults that are dependent on the environment, e.g., locales & collations. //! //! This module's job is to eliminate the environment-dependence as much as possible. use std::fmt; use camino::Utf8Path; use postgres_versioninfo::PgMajorVersion; pub struct RunInitdbArgs<'a> { pub superuser: &'a str, pub locale: &'a str, pub initdb_bin: &'a Utf8Path, pub pg_version: PgMajorVersion, pub library_search_path: &'a Utf8Path, pub pgdata: &'a Utf8Path, } #[derive(thiserror::Error, Debug)] pub enum Error { Spawn(std::io::Error), Failed { status: std::process::ExitStatus, stderr: Vec<u8>, }, WaitOutput(std::io::Error), Other(anyhow::Error), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Spawn(e) => write!(f, "Error spawning command: {e:?}"), Error::Failed { status, stderr } => write!( f, "Command failed with status {:?}: {}", status, String::from_utf8_lossy(stderr) ), Error::WaitOutput(e) => write!(f, "Error waiting for command output: {e:?}"), Error::Other(e) => write!(f, "Error: {e:?}"), } } } pub async fn do_run_initdb(args: RunInitdbArgs<'_>) -> Result<(), Error> { let RunInitdbArgs { superuser, locale, initdb_bin: initdb_bin_path, pg_version, library_search_path, pgdata, } = args; let mut initdb_command = tokio::process::Command::new(initdb_bin_path); initdb_command .args(["--pgdata", pgdata.as_ref()]) .args(["--username", superuser]) .args(["--encoding", "utf8"]) .args(["--locale", locale]) .arg("--no-instructions") .arg("--no-sync") .env_clear() .env("LD_LIBRARY_PATH", library_search_path) .env("DYLD_LIBRARY_PATH", library_search_path) .env( "ASAN_OPTIONS", std::env::var("ASAN_OPTIONS").unwrap_or_default(), ) .env( "UBSAN_OPTIONS", std::env::var("UBSAN_OPTIONS").unwrap_or_default(), ) .stdin(std::process::Stdio::null()) // stdout invocation produces the same output every time, we don't need it .stdout(std::process::Stdio::null()) // we would be interested in the stderr output, if there was any .stderr(std::process::Stdio::piped()); // Before version 14, only the libc provide was available. if pg_version > PgMajorVersion::PG14 { // Version 17 brought with it a builtin locale provider which only provides // C and C.UTF-8. While being safer for collation purposes since it is // guaranteed to be consistent throughout a major release, it is also more // performant. let locale_provider = if pg_version >= PgMajorVersion::PG17 { "builtin" } else { "libc" }; initdb_command.args(["--locale-provider", locale_provider]); } let initdb_proc = initdb_command.spawn().map_err(Error::Spawn)?; // Ideally we'd select here with the cancellation token, but the problem is that // we can't safely terminate initdb: it launches processes of its own, and killing // initdb doesn't kill them. After we return from this function, we want the target // directory to be able to be cleaned up. // See https://github.com/neondatabase/neon/issues/6385 let initdb_output = initdb_proc .wait_with_output() .await .map_err(Error::WaitOutput)?; if !initdb_output.status.success() { return Err(Error::Failed { status: initdb_output.status, stderr: initdb_output.stderr, }); } Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/build.rs
libs/postgres_ffi/build.rs
extern crate bindgen; use std::env; use std::path::PathBuf; use std::process::Command; use anyhow::{Context, anyhow}; use bindgen::callbacks::{DeriveInfo, ParseCallbacks}; #[derive(Debug)] struct PostgresFfiCallbacks; impl ParseCallbacks for PostgresFfiCallbacks { fn include_file(&self, filename: &str) { // This does the equivalent of passing bindgen::CargoCallbacks // to the builder .parse_callbacks() method. let cargo_callbacks = bindgen::CargoCallbacks::new(); cargo_callbacks.include_file(filename) } // Add any custom #[derive] attributes to the data structures that bindgen // creates. fn add_derives(&self, derive_info: &DeriveInfo) -> Vec<String> { // This is the list of data structures that we want to serialize/deserialize. let serde_list = [ "XLogRecord", "XLogPageHeaderData", "XLogLongPageHeaderData", "CheckPoint", "FullTransactionId", "ControlFileData", ]; if serde_list.contains(&derive_info.name) { vec![ "Default".into(), // Default allows us to easily fill the padding fields with 0. "Serialize".into(), "Deserialize".into(), ] } else { vec![] } } } fn main() -> anyhow::Result<()> { // Tell cargo to invalidate the built crate whenever the wrapper changes println!("cargo:rerun-if-changed=bindgen_deps.h"); // Finding the location of C headers for the Postgres server: // - if POSTGRES_INSTALL_DIR is set look into it, otherwise look into `<project_root>/pg_install` // - if there's a `bin/pg_config` file use it for getting include server, otherwise use `<project_root>/pg_install/{PG_MAJORVERSION}/include/postgresql/server` let pg_install_dir = if let Some(postgres_install_dir) = env::var_os("POSTGRES_INSTALL_DIR") { postgres_install_dir.into() } else { PathBuf::from("pg_install") }; for pg_version in &["v14", "v15", "v16", "v17"] { let mut pg_install_dir_versioned = pg_install_dir.join(pg_version); if pg_install_dir_versioned.is_relative() { let cwd = env::current_dir().context("Failed to get current_dir")?; pg_install_dir_versioned = cwd.join("..").join("..").join(pg_install_dir_versioned); } let pg_config_bin = pg_install_dir_versioned.join("bin").join("pg_config"); let inc_server_path: String = if pg_config_bin.exists() { let output = Command::new(pg_config_bin) .arg("--includedir-server") .output() .context("failed to execute `pg_config --includedir-server`")?; if !output.status.success() { panic!("`pg_config --includedir-server` failed") } String::from_utf8(output.stdout) .context("pg_config output is not UTF-8")? .trim_end() .into() } else { let server_path = pg_install_dir_versioned .join("include") .join("postgresql") .join("server") .into_os_string(); server_path .into_string() .map_err(|s| anyhow!("Bad postgres server path {s:?}"))? }; // The bindgen::Builder is the main entry point // to bindgen, and lets you build up options for // the resulting bindings. let bindings = bindgen::Builder::default() // // All the needed PostgreSQL headers are included from 'bindgen_deps.h' // .header("bindgen_deps.h") // // Tell cargo to invalidate the built crate whenever any of the // included header files changed. // .parse_callbacks(Box::new(PostgresFfiCallbacks)) // // These are the types and constants that we want to generate bindings for // .allowlist_type("BlockNumber") .allowlist_type("OffsetNumber") .allowlist_type("XLogRecPtr") .allowlist_type("XLogSegNo") .allowlist_type("TimeLineID") .allowlist_type("MultiXactId") .allowlist_type("MultiXactOffset") .allowlist_type("MultiXactStatus") .allowlist_type("ControlFileData") .allowlist_type("CheckPoint") .allowlist_type("FullTransactionId") .allowlist_type("XLogRecord") .allowlist_type("XLogPageHeaderData") .allowlist_type("XLogLongPageHeaderData") .allowlist_var("XLOG_PAGE_MAGIC") .allowlist_var("PG_MAJORVERSION_NUM") .allowlist_var("PG_CONTROL_FILE_SIZE") .allowlist_var("PG_CONTROLFILEDATA_OFFSETOF_CRC") .allowlist_type("PageHeaderData") .allowlist_type("DBState") .allowlist_type("RelMapFile") .allowlist_type("RepOriginId") // Because structs are used for serialization, tell bindgen to emit // explicit padding fields. .explicit_padding(true) // .clang_arg(format!("-I{inc_server_path}")) // // Finish the builder and generate the bindings. // .generate() .context("Unable to generate bindings")?; // Write the bindings to the $OUT_DIR/bindings_$pg_version.rs file. let out_path: PathBuf = env::var("OUT_DIR") .context("Couldn't read OUT_DIR environment variable var")? .into(); let filename = format!("bindings_{pg_version}.rs"); bindings .write_to_file(out_path.join(filename)) .context("Couldn't write bindings")?; } Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/wal_craft/src/lib.rs
libs/postgres_ffi/wal_craft/src/lib.rs
use std::ffi::OsStr; use std::path::{Path, PathBuf}; use std::process::Command; use std::time::{Duration, Instant}; use anyhow::{bail, ensure}; use camino_tempfile::{Utf8TempDir, tempdir}; use log::*; use postgres::Client; use postgres::types::PgLsn; use postgres_ffi::{ PgMajorVersion, WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD, }; macro_rules! xlog_utils_test { ($version:ident) => { #[path = "."] mod $version { #[allow(unused_imports)] pub use postgres_ffi::$version::wal_craft_test_export::*; #[allow(clippy::duplicate_mod)] #[cfg(test)] mod xlog_utils_test; } }; } postgres_ffi::for_all_postgres_versions! { xlog_utils_test } pub struct Conf { pub pg_version: PgMajorVersion, pub pg_distrib_dir: PathBuf, pub datadir: PathBuf, } pub struct PostgresServer { process: std::process::Child, _unix_socket_dir: Utf8TempDir, client_config: postgres::Config, } pub static REQUIRED_POSTGRES_CONFIG: [&str; 4] = [ "wal_keep_size=50MB", // Ensure old WAL is not removed "shared_preload_libraries=neon", // can only be loaded at startup // Disable background processes as much as possible "wal_writer_delay=10s", "autovacuum=off", ]; impl Conf { pub fn pg_distrib_dir(&self) -> anyhow::Result<PathBuf> { let path = self.pg_distrib_dir.clone(); Ok(path.join(self.pg_version.v_str())) } fn pg_bin_dir(&self) -> anyhow::Result<PathBuf> { Ok(self.pg_distrib_dir()?.join("bin")) } fn pg_lib_dir(&self) -> anyhow::Result<PathBuf> { Ok(self.pg_distrib_dir()?.join("lib")) } pub fn wal_dir(&self) -> PathBuf { self.datadir.join("pg_wal") } fn new_pg_command(&self, command: impl AsRef<Path>) -> anyhow::Result<Command> { let path = self.pg_bin_dir()?.join(command); ensure!(path.exists(), "Command {:?} does not exist", path); let mut cmd = Command::new(path); cmd.env_clear() .env("LD_LIBRARY_PATH", self.pg_lib_dir()?) .env("DYLD_LIBRARY_PATH", self.pg_lib_dir()?) .env( "ASAN_OPTIONS", std::env::var("ASAN_OPTIONS").unwrap_or_default(), ) .env( "UBSAN_OPTIONS", std::env::var("UBSAN_OPTIONS").unwrap_or_default(), ); Ok(cmd) } pub fn initdb(&self) -> anyhow::Result<()> { if let Some(parent) = self.datadir.parent() { info!("Pre-creating parent directory {:?}", parent); // Tests may be run concurrently and there may be a race to create `test_output/`. // std::fs::create_dir_all is guaranteed to have no races with another thread creating directories. std::fs::create_dir_all(parent)?; } info!( "Running initdb in {:?} with user \"postgres\"", self.datadir ); let output = self .new_pg_command("initdb")? .arg("--pgdata") .arg(&self.datadir) .args(["--username", "postgres", "--no-instructions", "--no-sync"]) .output()?; debug!("initdb output: {:?}", output); ensure!( output.status.success(), "initdb failed, stdout and stderr follow:\n{}{}", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr), ); Ok(()) } pub fn start_server(&self) -> anyhow::Result<PostgresServer> { info!("Starting Postgres server in {:?}", self.datadir); let unix_socket_dir = tempdir()?; // We need a directory with a short name for Unix socket (up to 108 symbols) let unix_socket_dir_path = unix_socket_dir.path().to_owned(); let server_process = self .new_pg_command("postgres")? .args(["-c", "listen_addresses="]) .arg("-k") .arg(&unix_socket_dir_path) .arg("-D") .arg(&self.datadir) .args(REQUIRED_POSTGRES_CONFIG.iter().flat_map(|cfg| ["-c", cfg])) .spawn()?; let server = PostgresServer { process: server_process, _unix_socket_dir: unix_socket_dir, client_config: { let mut c = postgres::Config::new(); c.host_path(&unix_socket_dir_path); c.user("postgres"); c.connect_timeout(Duration::from_millis(10000)); c }, }; Ok(server) } pub fn pg_waldump( &self, first_segment_name: &OsStr, last_segment_name: &OsStr, ) -> anyhow::Result<std::process::Output> { let first_segment_file = self.datadir.join(first_segment_name); let last_segment_file = self.datadir.join(last_segment_name); info!( "Running pg_waldump for {} .. {}", first_segment_file.display(), last_segment_file.display() ); let output = self .new_pg_command("pg_waldump")? .args([&first_segment_file, &last_segment_file]) .output()?; debug!("waldump output: {:?}", output); Ok(output) } } impl PostgresServer { pub fn connect_with_timeout(&self) -> anyhow::Result<Client> { let retry_until = Instant::now() + *self.client_config.get_connect_timeout().unwrap(); while Instant::now() < retry_until { if let Ok(client) = self.client_config.connect(postgres::NoTls) { return Ok(client); } std::thread::sleep(Duration::from_millis(100)); } bail!("Connection timed out"); } pub fn kill(mut self) { self.process.kill().unwrap(); self.process.wait().unwrap(); } } impl Drop for PostgresServer { fn drop(&mut self) { match self.process.try_wait() { Ok(Some(_)) => return, Ok(None) => { warn!("Server was not terminated, will be killed"); } Err(e) => { error!("Unable to get status of the server: {}, will be killed", e); } } let _ = self.process.kill(); } } pub trait PostgresClientExt: postgres::GenericClient { fn pg_current_wal_insert_lsn(&mut self) -> anyhow::Result<PgLsn> { Ok(self .query_one("SELECT pg_current_wal_insert_lsn()", &[])? .get(0)) } fn pg_current_wal_flush_lsn(&mut self) -> anyhow::Result<PgLsn> { Ok(self .query_one("SELECT pg_current_wal_flush_lsn()", &[])? .get(0)) } } impl<C: postgres::GenericClient> PostgresClientExt for C {} pub fn ensure_server_config(client: &mut impl postgres::GenericClient) -> anyhow::Result<()> { client.execute("create extension if not exists neon_test_utils", &[])?; let wal_keep_size: String = client.query_one("SHOW wal_keep_size", &[])?.get(0); ensure!(wal_keep_size == "50MB"); let wal_writer_delay: String = client.query_one("SHOW wal_writer_delay", &[])?.get(0); ensure!(wal_writer_delay == "10s"); let autovacuum: String = client.query_one("SHOW autovacuum", &[])?.get(0); ensure!(autovacuum == "off"); let wal_segment_size = client.query_one( "select cast(setting as bigint) as setting, unit \ from pg_settings where name = 'wal_segment_size'", &[], )?; ensure!( wal_segment_size.get::<_, String>("unit") == "B", "Unexpected wal_segment_size unit" ); ensure!( wal_segment_size.get::<_, i64>("setting") == WAL_SEGMENT_SIZE as i64, "Unexpected wal_segment_size in bytes" ); Ok(()) } pub trait Crafter { const NAME: &'static str; /// Generates WAL using the client `client`. Returns a vector of some valid /// "interesting" intermediate LSNs which one may start reading from. /// test_end_of_wal uses this to check various starting points. /// /// Note that postgres is generally keen about writing some WAL. While we /// try to disable it (autovacuum, big wal_writer_delay, etc) it is always /// possible, e.g. xl_running_xacts are dumped each 15s. So checks about /// stable WAL end would be flaky unless postgres is shut down. For this /// reason returning potential end of WAL here is pointless. Most of the /// time this doesn't happen though, so it is reasonable to create needed /// WAL structure and immediately kill postgres like test_end_of_wal does. fn craft(client: &mut impl postgres::GenericClient) -> anyhow::Result<Vec<PgLsn>>; } /// Wraps some WAL craft function, providing current LSN to it before the /// insertion and flushing WAL afterwards. Also pushes initial LSN to the /// result. fn craft_internal<C: postgres::GenericClient>( client: &mut C, f: impl Fn(&mut C, PgLsn) -> anyhow::Result<Vec<PgLsn>>, ) -> anyhow::Result<Vec<PgLsn>> { ensure_server_config(client)?; let initial_lsn = client.pg_current_wal_insert_lsn()?; info!("LSN initial = {}", initial_lsn); let mut intermediate_lsns = f(client, initial_lsn)?; if !intermediate_lsns.starts_with(&[initial_lsn]) { intermediate_lsns.insert(0, initial_lsn); } // Some records may be not flushed, e.g. non-transactional logical messages. Flush now. // // If the previous WAL record ended exactly at page boundary, pg_current_wal_insert_lsn // returns the position just after the page header on the next page. That's where the next // record will be inserted. But the page header hasn't actually been written to the WAL // yet, and if you try to flush it, you get a "request to flush past end of generated WAL" // error. Because of that, if the insert location is just after a page header, back off to // previous page boundary. let mut lsn = u64::from(client.pg_current_wal_insert_lsn()?); if lsn % WAL_SEGMENT_SIZE as u64 == XLOG_SIZE_OF_XLOG_LONG_PHD as u64 { lsn -= XLOG_SIZE_OF_XLOG_LONG_PHD as u64; } else if lsn % XLOG_BLCKSZ as u64 == XLOG_SIZE_OF_XLOG_SHORT_PHD as u64 { lsn -= XLOG_SIZE_OF_XLOG_SHORT_PHD as u64; } client.execute("select neon_xlogflush($1)", &[&PgLsn::from(lsn)])?; Ok(intermediate_lsns) } pub struct Simple; impl Crafter for Simple { const NAME: &'static str = "simple"; fn craft(client: &mut impl postgres::GenericClient) -> anyhow::Result<Vec<PgLsn>> { craft_internal(client, |client, _| { client.execute("CREATE table t(x int)", &[])?; Ok(Vec::new()) }) } } pub struct LastWalRecordXlogSwitch; impl Crafter for LastWalRecordXlogSwitch { const NAME: &'static str = "last_wal_record_xlog_switch"; fn craft(client: &mut impl postgres::GenericClient) -> anyhow::Result<Vec<PgLsn>> { // Do not use craft_internal because here we end up with flush_lsn exactly on // the segment boundary and insert_lsn after the initial page header, which is unusual. ensure_server_config(client)?; client.execute("CREATE table t(x int)", &[])?; let before_xlog_switch = client.pg_current_wal_insert_lsn()?; // pg_switch_wal returns end of last record of the switched segment, // i.e. end of SWITCH itself. let xlog_switch_record_end: PgLsn = client.query_one("SELECT pg_switch_wal()", &[])?.get(0); let before_xlog_switch_u64 = u64::from(before_xlog_switch); let next_segment = PgLsn::from( before_xlog_switch_u64 - (before_xlog_switch_u64 % WAL_SEGMENT_SIZE as u64) + WAL_SEGMENT_SIZE as u64, ); ensure!( xlog_switch_record_end <= next_segment, "XLOG_SWITCH record ended after the expected segment boundary: {} > {}", xlog_switch_record_end, next_segment ); Ok(vec![before_xlog_switch, xlog_switch_record_end]) } } pub struct LastWalRecordXlogSwitchEndsOnPageBoundary; /// Craft xlog SWITCH record ending at page boundary. impl Crafter for LastWalRecordXlogSwitchEndsOnPageBoundary { const NAME: &'static str = "last_wal_record_xlog_switch_ends_on_page_boundary"; fn craft(client: &mut impl postgres::GenericClient) -> anyhow::Result<Vec<PgLsn>> { // Do not use generate_internal because here we end up with flush_lsn exactly on // the segment boundary and insert_lsn after the initial page header, which is unusual. ensure_server_config(client)?; client.execute("CREATE table t(x int)", &[])?; // Add padding so the XLOG_SWITCH record ends exactly on XLOG_BLCKSZ boundary. We // will use carefully-sized logical messages to advance WAL insert location such // that there is just enough space on the page for the XLOG_SWITCH record. loop { // We start with measuring how much WAL it takes for one logical message, // considering all alignments and headers. let before_lsn = client.pg_current_wal_insert_lsn()?; client.execute( "SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', 10))", &[], )?; let after_lsn = client.pg_current_wal_insert_lsn()?; // Did the record cross a page boundary? If it did, start over. Crossing a // page boundary adds to the apparent size of the record because of the page // header, which throws off the calculation. if u64::from(before_lsn) / XLOG_BLCKSZ as u64 != u64::from(after_lsn) / XLOG_BLCKSZ as u64 { continue; } // base_size is the size of a logical message without the payload let base_size = u64::from(after_lsn) - u64::from(before_lsn) - 10; // Is there enough space on the page for another logical message and an // XLOG_SWITCH? If not, start over. let page_remain = XLOG_BLCKSZ as u64 - u64::from(after_lsn) % XLOG_BLCKSZ as u64; if page_remain < base_size + XLOG_SIZE_OF_XLOG_RECORD as u64 { continue; } // We will write another logical message, such that after the logical message // record, there will be space for exactly one XLOG_SWITCH. How large should // the logical message's payload be? An XLOG_SWITCH record has no data => its // size is exactly XLOG_SIZE_OF_XLOG_RECORD. let repeats = page_remain - base_size - XLOG_SIZE_OF_XLOG_RECORD as u64; client.execute( "SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', $1))", &[&(repeats as i32)], )?; info!( "current_wal_insert_lsn={}, XLOG_SIZE_OF_XLOG_RECORD={}", client.pg_current_wal_insert_lsn()?, XLOG_SIZE_OF_XLOG_RECORD ); // Emit the XLOG_SWITCH let before_xlog_switch = client.pg_current_wal_insert_lsn()?; let xlog_switch_record_end: PgLsn = client.query_one("SELECT pg_switch_wal()", &[])?.get(0); if u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ != XLOG_SIZE_OF_XLOG_SHORT_PHD { warn!( "XLOG_SWITCH message ended not on page boundary: {}, offset = {}, repeating", xlog_switch_record_end, u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ ); continue; } return Ok(vec![before_xlog_switch, xlog_switch_record_end]); } } } /// Write ~16MB logical message; it should cross WAL segment. fn craft_seg_size_logical_message( client: &mut impl postgres::GenericClient, transactional: bool, ) -> anyhow::Result<Vec<PgLsn>> { craft_internal(client, |client, initial_lsn| { ensure!( initial_lsn < PgLsn::from(0x0200_0000 - 1024 * 1024), "Initial LSN is too far in the future" ); let message_lsn: PgLsn = client .query_one( "select pg_logical_emit_message($1, 'big-16mb-msg', \ concat(repeat('abcd', 16 * 256 * 1024), 'end')) as message_lsn", &[&transactional], )? .get("message_lsn"); ensure!( message_lsn > PgLsn::from(0x0200_0000 + 4 * 8192), "Logical message did not cross the segment boundary" ); ensure!( message_lsn < PgLsn::from(0x0400_0000), "Logical message crossed two segments" ); Ok(vec![message_lsn]) }) } pub struct WalRecordCrossingSegmentFollowedBySmallOne; impl Crafter for WalRecordCrossingSegmentFollowedBySmallOne { const NAME: &'static str = "wal_record_crossing_segment_followed_by_small_one"; fn craft(client: &mut impl postgres::GenericClient) -> anyhow::Result<Vec<PgLsn>> { // Transactional message crossing WAL segment will be followed by small // commit record. craft_seg_size_logical_message(client, true) } } pub struct LastWalRecordCrossingSegment; impl Crafter for LastWalRecordCrossingSegment { const NAME: &'static str = "last_wal_record_crossing_segment"; fn craft(client: &mut impl postgres::GenericClient) -> anyhow::Result<Vec<PgLsn>> { craft_seg_size_logical_message(client, false) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs
libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs
//! Tests for postgres_ffi xlog_utils module. Put it here to break cyclic dependency. use super::*; use crate::{error, info}; use regex::Regex; use std::cmp::min; use std::ffi::OsStr; use std::fs::{self, File}; use std::io::Write; use std::{env, str::FromStr}; use utils::const_assert; use utils::lsn::Lsn; fn init_logging() { let _ = env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(format!( "crate=info,postgres_ffi::{PG_MAJORVERSION}::xlog_utils=trace" ))) .is_test(true) .try_init(); } /// Test that find_end_of_wal returns the same results as pg_dump on various /// WALs created by Crafter. fn test_end_of_wal<C: crate::Crafter>(test_name: &str) { use crate::*; let pg_version = MY_PGVERSION; // Craft some WAL let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("..") .join("..") .join(".."); let cfg = Conf { pg_version, pg_distrib_dir: top_path.join("pg_install"), datadir: top_path.join(format!("test_output/{test_name}-{PG_MAJORVERSION}")), }; if cfg.datadir.exists() { fs::remove_dir_all(&cfg.datadir).unwrap(); } cfg.initdb().unwrap(); let srv = cfg.start_server().unwrap(); let intermediate_lsns = C::craft(&mut srv.connect_with_timeout().unwrap()).unwrap(); let intermediate_lsns: Vec<Lsn> = intermediate_lsns .iter() .map(|&lsn| u64::from(lsn).into()) .collect(); // Kill postgres. Note that it might have inserted to WAL something after // 'craft' did its job. srv.kill(); // Check find_end_of_wal on the initial WAL let last_segment = cfg .wal_dir() .read_dir() .unwrap() .map(|f| f.unwrap().file_name()) .filter(|fname| IsXLogFileName(fname)) .max() .unwrap(); let expected_end_of_wal = find_pg_waldump_end_of_wal(&cfg, &last_segment); for start_lsn in intermediate_lsns .iter() .chain(std::iter::once(&expected_end_of_wal)) { // Erase all WAL before `start_lsn` to ensure it's not used by `find_end_of_wal`. // We assume that `start_lsn` is non-decreasing. info!( "Checking with start_lsn={}, erasing WAL before it", start_lsn ); for file in fs::read_dir(cfg.wal_dir()).unwrap().flatten() { let fname = file.file_name(); if !IsXLogFileName(&fname) { continue; } let (segno, _) = XLogFromFileName(&fname, WAL_SEGMENT_SIZE).unwrap(); let seg_start_lsn = XLogSegNoOffsetToRecPtr(segno, 0, WAL_SEGMENT_SIZE); if seg_start_lsn > u64::from(*start_lsn) { continue; } let mut f = File::options().write(true).open(file.path()).unwrap(); static ZEROS: [u8; WAL_SEGMENT_SIZE] = [0u8; WAL_SEGMENT_SIZE]; f.write_all( &ZEROS[0..min( WAL_SEGMENT_SIZE, (u64::from(*start_lsn) - seg_start_lsn) as usize, )], ) .unwrap(); } check_end_of_wal(&cfg, &last_segment, *start_lsn, expected_end_of_wal); } } fn find_pg_waldump_end_of_wal(cfg: &crate::Conf, last_segment: &OsStr) -> Lsn { // Get the actual end of WAL by pg_waldump let waldump_output = cfg .pg_waldump(OsStr::new("000000010000000000000001"), last_segment) .unwrap() .stderr; let waldump_output = std::str::from_utf8(&waldump_output).unwrap(); let caps = match Regex::new(r"invalid record length at (.+):") .unwrap() .captures(waldump_output) { Some(caps) => caps, None => { error!("Unable to parse pg_waldump's stderr:\n{}", waldump_output); panic!(); } }; let waldump_wal_end = Lsn::from_str(caps.get(1).unwrap().as_str()).unwrap(); info!("waldump erred on {}", waldump_wal_end); waldump_wal_end } fn check_end_of_wal( cfg: &crate::Conf, last_segment: &OsStr, start_lsn: Lsn, expected_end_of_wal: Lsn, ) { // Check end_of_wal on non-partial WAL segment (we treat it as fully populated) // let wal_end = find_end_of_wal(&cfg.wal_dir(), WAL_SEGMENT_SIZE, start_lsn).unwrap(); // info!( // "find_end_of_wal returned wal_end={} with non-partial WAL segment", // wal_end // ); // assert_eq!(wal_end, expected_end_of_wal_non_partial); // Rename file to partial to actually find last valid lsn, then rename it back. fs::rename( cfg.wal_dir().join(last_segment), cfg.wal_dir() .join(format!("{}.partial", last_segment.to_str().unwrap())), ) .unwrap(); let wal_end = find_end_of_wal(&cfg.wal_dir(), WAL_SEGMENT_SIZE, start_lsn).unwrap(); info!( "find_end_of_wal returned wal_end={} with partial WAL segment", wal_end ); assert_eq!(wal_end, expected_end_of_wal); fs::rename( cfg.wal_dir() .join(format!("{}.partial", last_segment.to_str().unwrap())), cfg.wal_dir().join(last_segment), ) .unwrap(); } const_assert!(WAL_SEGMENT_SIZE == 16 * 1024 * 1024); #[test] pub fn test_find_end_of_wal_simple() { init_logging(); test_end_of_wal::<crate::Simple>("test_find_end_of_wal_simple"); } #[test] pub fn test_find_end_of_wal_crossing_segment_followed_by_small_one() { init_logging(); test_end_of_wal::<crate::WalRecordCrossingSegmentFollowedBySmallOne>( "test_find_end_of_wal_crossing_segment_followed_by_small_one", ); } #[test] pub fn test_find_end_of_wal_last_crossing_segment() { init_logging(); test_end_of_wal::<crate::LastWalRecordCrossingSegment>( "test_find_end_of_wal_last_crossing_segment", ); } /// Check the math in update_next_xid /// /// NOTE: These checks are sensitive to the value of XID_CHECKPOINT_INTERVAL, /// currently 1024. #[test] pub fn test_update_next_xid() { let checkpoint_buf = [0u8; size_of::<CheckPoint>()]; let mut checkpoint = CheckPoint::decode(&checkpoint_buf).unwrap(); checkpoint.nextXid = FullTransactionId { value: 10 }; assert_eq!(checkpoint.nextXid.value, 10); // The input XID gets rounded up to the next XID_CHECKPOINT_INTERVAL // boundary checkpoint.update_next_xid(100); assert_eq!(checkpoint.nextXid.value, 1024); // No change checkpoint.update_next_xid(500); assert_eq!(checkpoint.nextXid.value, 1024); checkpoint.update_next_xid(1023); assert_eq!(checkpoint.nextXid.value, 1024); // The function returns the *next* XID, given the highest XID seen so // far. So when we pass 1024, the nextXid gets bumped up to the next // XID_CHECKPOINT_INTERVAL boundary. checkpoint.update_next_xid(1024); assert_eq!(checkpoint.nextXid.value, 2048); } #[test] pub fn test_update_next_multixid() { let checkpoint_buf = [0u8; size_of::<CheckPoint>()]; let mut checkpoint = CheckPoint::decode(&checkpoint_buf).unwrap(); // simple case checkpoint.nextMulti = 20; checkpoint.nextMultiOffset = 20; checkpoint.update_next_multixid(1000, 2000); assert_eq!(checkpoint.nextMulti, 1000); assert_eq!(checkpoint.nextMultiOffset, 2000); // No change checkpoint.update_next_multixid(500, 900); assert_eq!(checkpoint.nextMulti, 1000); assert_eq!(checkpoint.nextMultiOffset, 2000); // Close to wraparound, but not wrapped around yet checkpoint.nextMulti = 0xffff0000; checkpoint.nextMultiOffset = 0xfffe0000; checkpoint.update_next_multixid(0xffff00ff, 0xfffe00ff); assert_eq!(checkpoint.nextMulti, 0xffff00ff); assert_eq!(checkpoint.nextMultiOffset, 0xfffe00ff); // Wraparound checkpoint.update_next_multixid(1, 900); assert_eq!(checkpoint.nextMulti, 1); assert_eq!(checkpoint.nextMultiOffset, 900); // Wraparound nextMulti to 0. // // It's a bit surprising that nextMulti can be 0, because that's a special value // (InvalidMultiXactId). However, that's how Postgres does it at multi-xid wraparound: // nextMulti wraps around to 0, but then when the next multi-xid is assigned, it skips // the 0 and the next multi-xid actually assigned is 1. checkpoint.nextMulti = 0xffff0000; checkpoint.nextMultiOffset = 0xfffe0000; checkpoint.update_next_multixid(0, 0xfffe00ff); assert_eq!(checkpoint.nextMulti, 0); assert_eq!(checkpoint.nextMultiOffset, 0xfffe00ff); // Wraparound nextMultiOffset to 0 checkpoint.update_next_multixid(0, 0); assert_eq!(checkpoint.nextMulti, 0); assert_eq!(checkpoint.nextMultiOffset, 0); } #[test] pub fn test_encode_logical_message() { let expected = [ 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 170, 34, 166, 227, 255, 38, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 112, 114, 101, 102, 105, 120, 0, 109, 101, 115, 115, 97, 103, 101, ]; let actual = encode_logical_message("prefix", "message"); assert_eq!(expected, actual[..]); }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs
libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs
use std::path::PathBuf; use std::str::FromStr; use anyhow::*; use clap::{Arg, ArgMatches, Command, value_parser}; use postgres::Client; use postgres_ffi::PgMajorVersion; use wal_craft::*; fn main() -> Result<()> { env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("wal_craft=info")) .init(); let arg_matches = cli().get_matches(); let wal_craft = |arg_matches: &ArgMatches, client: &mut Client| { let intermediate_lsns = match arg_matches .get_one::<String>("type") .map(|s| s.as_str()) .context("'type' is required")? { Simple::NAME => Simple::craft(client)?, LastWalRecordXlogSwitch::NAME => LastWalRecordXlogSwitch::craft(client)?, LastWalRecordXlogSwitchEndsOnPageBoundary::NAME => { LastWalRecordXlogSwitchEndsOnPageBoundary::craft(client)? } WalRecordCrossingSegmentFollowedBySmallOne::NAME => { WalRecordCrossingSegmentFollowedBySmallOne::craft(client)? } LastWalRecordCrossingSegment::NAME => LastWalRecordCrossingSegment::craft(client)?, a => panic!("Unknown --type argument: {a}"), }; let end_of_wal_lsn = client.pg_current_wal_insert_lsn()?; for lsn in intermediate_lsns { println!("intermediate_lsn = {lsn}"); } println!("end_of_wal = {end_of_wal_lsn}"); Ok(()) }; match arg_matches.subcommand() { None => panic!("No subcommand provided"), Some(("print-postgres-config", _)) => { for cfg in REQUIRED_POSTGRES_CONFIG.iter() { println!("{cfg}"); } Ok(()) } Some(("with-initdb", arg_matches)) => { let cfg = Conf { pg_version: *arg_matches .get_one::<PgMajorVersion>("pg-version") .context("'pg-version' is required")?, pg_distrib_dir: arg_matches .get_one::<PathBuf>("pg-distrib-dir") .context("'pg-distrib-dir' is required")? .to_owned(), datadir: arg_matches .get_one::<PathBuf>("datadir") .context("'datadir' is required")? .to_owned(), }; cfg.initdb()?; let srv = cfg.start_server()?; wal_craft(arg_matches, &mut srv.connect_with_timeout()?)?; srv.kill(); Ok(()) } Some(("in-existing", arg_matches)) => wal_craft( arg_matches, &mut postgres::Config::from_str( arg_matches .get_one::<String>("connection") .context("'connection' is required")?, ) .context( "'connection' argument value could not be parsed as a postgres connection string", )? .connect(postgres::NoTls)?, ), Some(_) => panic!("Unknown subcommand"), } } fn cli() -> Command { let type_arg = &Arg::new("type") .help("Type of WAL to craft") .value_parser([ Simple::NAME, LastWalRecordXlogSwitch::NAME, LastWalRecordXlogSwitchEndsOnPageBoundary::NAME, WalRecordCrossingSegmentFollowedBySmallOne::NAME, LastWalRecordCrossingSegment::NAME, ]) .required(true); Command::new("Postgres WAL crafter") .about("Crafts Postgres databases with specific WAL properties") .subcommand( Command::new("print-postgres-config") .about("Print the configuration required for PostgreSQL server before running this script") ) .subcommand( Command::new("with-initdb") .about("Craft WAL in a new data directory first initialized with initdb") .arg(type_arg) .arg( Arg::new("datadir") .help("Data directory for the Postgres server") .value_parser(value_parser!(PathBuf)) .required(true) ) .arg( Arg::new("pg-distrib-dir") .long("pg-distrib-dir") .value_parser(value_parser!(PathBuf)) .help("Directory with Postgres distributions (bin and lib directories, e.g. pg_install containing subpath `v14/bin/postgresql`)") .default_value("/usr/local") ) .arg( Arg::new("pg-version") .long("pg-version") .help("Postgres version to use for the initial tenant") .value_parser(value_parser!(u32)) .required(true) ) ) .subcommand( Command::new("in-existing") .about("Craft WAL at an existing recently created Postgres database. Note that server may append new WAL entries on shutdown.") .arg(type_arg) .arg( Arg::new("connection") .help("Connection string to the Postgres database to populate") .required(true) ) ) } #[test] fn verify_cli() { cli().debug_assert(); }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/wal_craft_test_export.rs
libs/postgres_ffi/src/wal_craft_test_export.rs
//! This module is for WAL craft to test with postgres_ffi. Should not import any thing in normal usage. pub use super::PG_MAJORVERSION; pub use super::xlog_utils::*; pub use super::bindings::*; pub use crate::WAL_SEGMENT_SIZE;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/pg_constants_v14.rs
libs/postgres_ffi/src/pg_constants_v14.rs
use crate::PgMajorVersion; pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG14; pub const XLOG_DBASE_CREATE: u8 = 0x00; pub const XLOG_DBASE_DROP: u8 = 0x10; pub const BKPIMAGE_IS_COMPRESSED: u8 = 0x02; /* page image is compressed */ pub const BKPIMAGE_APPLY: u8 = 0x04; /* page image should be restored during replay */ pub const SIZEOF_RELMAPFILE: usize = 512; /* sizeof(RelMapFile) in relmapper.c */ // List of subdirectories inside pgdata. // Copied from src/bin/initdb/initdb.c pub const PGDATA_SUBDIRS: [&str; 22] = [ "global", "pg_wal/archive_status", "pg_commit_ts", "pg_dynshmem", "pg_notify", "pg_serial", "pg_snapshots", "pg_subtrans", "pg_twophase", "pg_multixact", "pg_multixact/members", "pg_multixact/offsets", "base", "base/1", "pg_replslot", "pg_tblspc", "pg_stat", "pg_stat_tmp", "pg_xact", "pg_logical", "pg_logical/snapshots", "pg_logical/mappings", ]; pub fn bkpimg_is_compressed(bimg_info: u8) -> bool { (bimg_info & BKPIMAGE_IS_COMPRESSED) != 0 }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/nonrelfile_utils.rs
libs/postgres_ffi/src/nonrelfile_utils.rs
//! //! Common utilities for dealing with PostgreSQL non-relation files. //! use crate::pg_constants; use crate::transaction_id_precedes; use bytes::BytesMut; use super::bindings::MultiXactId; pub fn transaction_id_set_status(xid: u32, status: u8, page: &mut BytesMut) { tracing::trace!( "handle_apply_request for RM_XACT_ID-{} (1-commit, 2-abort, 3-sub_commit)", status ); let byteno: usize = ((xid % pg_constants::CLOG_XACTS_PER_PAGE) / pg_constants::CLOG_XACTS_PER_BYTE) as usize; let bshift: u8 = ((xid % pg_constants::CLOG_XACTS_PER_BYTE) * pg_constants::CLOG_BITS_PER_XACT as u32) as u8; page[byteno] = (page[byteno] & !(pg_constants::CLOG_XACT_BITMASK << bshift)) | (status << bshift); } pub fn transaction_id_get_status(xid: u32, page: &[u8]) -> u8 { let byteno: usize = ((xid % pg_constants::CLOG_XACTS_PER_PAGE) / pg_constants::CLOG_XACTS_PER_BYTE) as usize; let bshift: u8 = ((xid % pg_constants::CLOG_XACTS_PER_BYTE) * pg_constants::CLOG_BITS_PER_XACT as u32) as u8; (page[byteno] >> bshift) & pg_constants::CLOG_XACT_BITMASK } // See CLOGPagePrecedes in clog.c pub const fn clogpage_precedes(page1: u32, page2: u32) -> bool { let mut xid1 = page1 * pg_constants::CLOG_XACTS_PER_PAGE; xid1 += pg_constants::FIRST_NORMAL_TRANSACTION_ID + 1; let mut xid2 = page2 * pg_constants::CLOG_XACTS_PER_PAGE; xid2 += pg_constants::FIRST_NORMAL_TRANSACTION_ID + 1; transaction_id_precedes(xid1, xid2) && transaction_id_precedes(xid1, xid2 + pg_constants::CLOG_XACTS_PER_PAGE - 1) } // See SlruMayDeleteSegment() in slru.c pub fn slru_may_delete_clogsegment(segpage: u32, cutoff_page: u32) -> bool { let seg_last_page = segpage + pg_constants::SLRU_PAGES_PER_SEGMENT - 1; assert_eq!(segpage % pg_constants::SLRU_PAGES_PER_SEGMENT, 0); clogpage_precedes(segpage, cutoff_page) && clogpage_precedes(seg_last_page, cutoff_page) } // Multixact utils pub fn mx_offset_to_flags_offset(xid: MultiXactId) -> usize { ((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32) % pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE as u32 * pg_constants::MULTIXACT_MEMBERGROUP_SIZE as u32) as usize } pub fn mx_offset_to_flags_bitshift(xid: MultiXactId) -> u16 { (xid as u16) % pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP * pg_constants::MXACT_MEMBER_BITS_PER_XACT } /* Location (byte offset within page) of TransactionId of given member */ pub fn mx_offset_to_member_offset(xid: MultiXactId) -> usize { mx_offset_to_flags_offset(xid) + (pg_constants::MULTIXACT_FLAGBYTES_PER_GROUP + (xid as u16 % pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP) * 4) as usize } fn mx_offset_to_member_page(xid: u32) -> u32 { xid / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32 } pub fn mx_offset_to_member_segment(xid: u32) -> i32 { (mx_offset_to_member_page(xid) / pg_constants::SLRU_PAGES_PER_SEGMENT) as i32 } #[cfg(test)] mod tests { use super::*; #[test] fn test_multixid_calc() { // Check that the mx_offset_* functions produce the same values as the // corresponding PostgreSQL C macros (MXOffsetTo*). These test values // were generated by calling the PostgreSQL macros with a little C // program. assert_eq!(mx_offset_to_member_segment(0), 0); assert_eq!(mx_offset_to_member_page(0), 0); assert_eq!(mx_offset_to_flags_offset(0), 0); assert_eq!(mx_offset_to_flags_bitshift(0), 0); assert_eq!(mx_offset_to_member_offset(0), 4); assert_eq!(mx_offset_to_member_segment(1), 0); assert_eq!(mx_offset_to_member_page(1), 0); assert_eq!(mx_offset_to_flags_offset(1), 0); assert_eq!(mx_offset_to_flags_bitshift(1), 8); assert_eq!(mx_offset_to_member_offset(1), 8); assert_eq!(mx_offset_to_member_segment(123456789), 2358); assert_eq!(mx_offset_to_member_page(123456789), 75462); assert_eq!(mx_offset_to_flags_offset(123456789), 4780); assert_eq!(mx_offset_to_flags_bitshift(123456789), 8); assert_eq!(mx_offset_to_member_offset(123456789), 4788); assert_eq!(mx_offset_to_member_segment(u32::MAX - 1), 82040); assert_eq!(mx_offset_to_member_page(u32::MAX - 1), 2625285); assert_eq!(mx_offset_to_flags_offset(u32::MAX - 1), 5160); assert_eq!(mx_offset_to_flags_bitshift(u32::MAX - 1), 16); assert_eq!(mx_offset_to_member_offset(u32::MAX - 1), 5172); assert_eq!(mx_offset_to_member_segment(u32::MAX), 82040); assert_eq!(mx_offset_to_member_page(u32::MAX), 2625285); assert_eq!(mx_offset_to_flags_offset(u32::MAX), 5160); assert_eq!(mx_offset_to_flags_bitshift(u32::MAX), 24); assert_eq!(mx_offset_to_member_offset(u32::MAX), 5176); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/lib.rs
libs/postgres_ffi/src/lib.rs
#![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] // bindgen creates some unsafe code with no doc comments. #![allow(clippy::missing_safety_doc)] // noted at 1.63 that in many cases there's u32 -> u32 transmutes in bindgen code. #![allow(clippy::useless_transmute)] // modules included with the postgres_ffi macro depend on the types of the specific version's // types, and trigger a too eager lint. #![allow(clippy::duplicate_mod)] #![deny(clippy::undocumented_unsafe_blocks)] use bytes::Bytes; use utils::bin_ser::SerializeError; use utils::lsn::Lsn; pub use postgres_versioninfo::PgMajorVersion; macro_rules! postgres_ffi { ($version:ident) => { #[path = "."] pub mod $version { pub mod bindings { // bindgen generates bindings for a lot of stuff we don't need #![allow(dead_code)] #![allow(unsafe_op_in_unsafe_fn)] #![allow(clippy::undocumented_unsafe_blocks)] #![allow(clippy::ptr_offset_with_cast)] use serde::{Deserialize, Serialize}; include!(concat!( env!("OUT_DIR"), "/bindings_", stringify!($version), ".rs" )); include!(concat!("pg_constants_", stringify!($version), ".rs")); } pub mod controlfile_utils; pub mod nonrelfile_utils; pub mod wal_craft_test_export; pub mod wal_generator; pub mod waldecoder_handler; pub mod xlog_utils; pub const PG_MAJORVERSION: &str = stringify!($version); // Re-export some symbols from bindings pub use bindings::{CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, XLogRecord}; pub const ZERO_CHECKPOINT: bytes::Bytes = bytes::Bytes::from_static(&[0u8; xlog_utils::SIZEOF_CHECKPOINT]); } }; } #[macro_export] macro_rules! for_all_postgres_versions { ($macro:tt) => { $macro!(v14); $macro!(v15); $macro!(v16); $macro!(v17); }; } for_all_postgres_versions! { postgres_ffi } /// dispatch_pgversion /// /// Run a code block in a context where the postgres_ffi bindings for a /// specific (supported) PostgreSQL version are `use`-ed in scope under the pgv /// identifier. /// If the provided pg_version is not supported, we panic!(), unless the /// optional third argument was provided (in which case that code will provide /// the default handling instead). /// /// Use like /// /// dispatch_pgversion!(my_pgversion, { pgv::constants::XLOG_DBASE_CREATE }) /// dispatch_pgversion!(my_pgversion, pgv::constants::XLOG_DBASE_CREATE) /// /// Other uses are for macro-internal purposes only and strictly unsupported. /// #[macro_export] macro_rules! dispatch_pgversion { ($version:expr, $code:expr) => { dispatch_pgversion!($version, $code, panic!("Unknown PostgreSQL version {}", $version)) }; ($version:expr, $code:expr, $invalid_pgver_handling:expr) => { dispatch_pgversion!( $version => $code, default = $invalid_pgver_handling, pgversions = [ $crate::PgMajorVersion::PG14 => v14, $crate::PgMajorVersion::PG15 => v15, $crate::PgMajorVersion::PG16 => v16, $crate::PgMajorVersion::PG17 => v17, ] ) }; ($pgversion:expr => $code:expr, default = $default:expr, pgversions = [$($sv:pat => $vsv:ident),+ $(,)?]) => { match ($pgversion.clone().into()) { $($sv => { use $crate::$vsv as pgv; $code },)+ #[allow(unreachable_patterns)] _ => { $default } } }; } #[macro_export] macro_rules! enum_pgversion_dispatch { ($name:expr, $typ:ident, $bind:ident, $code:block) => { enum_pgversion_dispatch!( name = $name, bind = $bind, typ = $typ, code = $code, pgversions = [ V14 : v14, V15 : v15, V16 : v16, V17 : v17, ] ) }; (name = $name:expr, bind = $bind:ident, typ = $typ:ident, code = $code:block, pgversions = [$($variant:ident : $md:ident),+ $(,)?]) => { match $name { $( self::$typ::$variant($bind) => { use $crate::$md as pgv; $code } ),+, } }; } #[macro_export] macro_rules! enum_pgversion { {$name:ident, pgv :: $t:ident} => { enum_pgversion!{ name = $name, typ = $t, pgversions = [ V14 : v14, V15 : v15, V16 : v16, V17 : v17, ] } }; {$name:ident, pgv :: $p:ident :: $t:ident} => { enum_pgversion!{ name = $name, path = $p, typ = $t, pgversions = [ V14 : v14, V15 : v15, V16 : v16, V17 : v17, ] } }; {name = $name:ident, typ = $t:ident, pgversions = [$($variant:ident : $md:ident),+ $(,)?]} => { pub enum $name { $($variant ( $crate::$md::$t )),+ } impl self::$name { pub fn pg_version(&self) -> PgMajorVersion { enum_pgversion_dispatch!(self, $name, _ign, { pgv::bindings::MY_PGVERSION }) } } $( impl Into<self::$name> for $crate::$md::$t { fn into(self) -> self::$name { self::$name::$variant (self) } } )+ }; {name = $name:ident, path = $p:ident, $(typ = $t:ident,)? pgversions = [$($variant:ident : $md:ident),+ $(,)?]} => { pub enum $name { $($variant $(($crate::$md::$p::$t))?),+ } impl $name { pub fn pg_version(&self) -> PgMajorVersion { enum_pgversion_dispatch!(self, $name, _ign, { pgv::bindings::MY_PGVERSION }) } } $( impl Into<$name> for $crate::$md::$p::$t { fn into(self) -> $name { $name::$variant (self) } } )+ }; } pub mod pg_constants; pub mod relfile_utils; pub mod walrecord; // Export some widely used datatypes that are unlikely to change across Postgres versions pub use v14::bindings::{ BlockNumber, CheckPoint, ControlFileData, MultiXactId, OffsetNumber, Oid, PageHeaderData, RepOriginId, TimeLineID, TransactionId, XLogRecPtr, XLogRecord, XLogSegNo, uint32, uint64, }; // Likewise for these, although the assumption that these don't change is a little more iffy. pub use v14::bindings::{MultiXactOffset, MultiXactStatus}; pub use v14::xlog_utils::{ XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD, }; // from pg_config.h. These can be changed with configure options --with-blocksize=BLOCKSIZE and // --with-segsize=SEGSIZE, but assume the defaults for now. pub const BLCKSZ: u16 = 8192; pub const RELSEG_SIZE: u32 = 1024 * 1024 * 1024 / (BLCKSZ as u32); pub const XLOG_BLCKSZ: usize = 8192; pub const WAL_SEGMENT_SIZE: usize = 16 * 1024 * 1024; pub const MAX_SEND_SIZE: usize = XLOG_BLCKSZ * 16; // Export some version independent functions that are used outside of this mod pub use v14::bindings::DBState_DB_SHUTDOWNED; pub use v14::xlog_utils::{ XLogFileName, encode_logical_message, get_current_timestamp, to_pg_timestamp, try_from_pg_timestamp, }; pub fn bkpimage_is_compressed(bimg_info: u8, version: PgMajorVersion) -> bool { dispatch_pgversion!(version, pgv::bindings::bkpimg_is_compressed(bimg_info)) } pub fn generate_wal_segment( segno: u64, system_id: u64, pg_version: PgMajorVersion, lsn: Lsn, ) -> Result<Bytes, SerializeError> { assert_eq!(segno, lsn.segment_number(WAL_SEGMENT_SIZE)); dispatch_pgversion!( pg_version, pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn) ) } pub fn generate_pg_control( pg_control_bytes: &[u8], checkpoint_bytes: &[u8], lsn: Lsn, pg_version: PgMajorVersion, ) -> anyhow::Result<(Bytes, u64, bool)> { dispatch_pgversion!( pg_version, pgv::xlog_utils::generate_pg_control(pg_control_bytes, checkpoint_bytes, lsn), anyhow::bail!("Unknown version {}", pg_version) ) } // PG timeline is always 1, changing it doesn't have any useful meaning in Neon. // // NOTE: this is not to be confused with Neon timelines; different concept! // // It's a shaky assumption, that it's always 1. We might import a // PostgreSQL data directory that has gone through timeline bumps, // for example. FIXME later. pub const PG_TLI: u32 = 1; // See TransactionIdIsNormal in transam.h pub const fn transaction_id_is_normal(id: TransactionId) -> bool { id > pg_constants::FIRST_NORMAL_TRANSACTION_ID } // See TransactionIdPrecedes in transam.c pub const fn transaction_id_precedes(id1: TransactionId, id2: TransactionId) -> bool { /* * If either ID is a permanent XID then we can just do unsigned * comparison. If both are normal, do a modulo-2^32 comparison. */ if !(transaction_id_is_normal(id1)) || !transaction_id_is_normal(id2) { return id1 < id2; } let diff = id1.wrapping_sub(id2) as i32; diff < 0 } // Check if page is not yet initialized (port of Postgres PageIsInit() macro) pub fn page_is_new(pg: &[u8]) -> bool { pg[14] == 0 && pg[15] == 0 // pg_upper == 0 } // ExtractLSN from page header pub fn page_get_lsn(pg: &[u8]) -> Lsn { Lsn( ((u32::from_le_bytes(pg[0..4].try_into().unwrap()) as u64) << 32) | u32::from_le_bytes(pg[4..8].try_into().unwrap()) as u64, ) } pub fn page_set_lsn(pg: &mut [u8], lsn: Lsn) { pg[0..4].copy_from_slice(&((lsn.0 >> 32) as u32).to_le_bytes()); pg[4..8].copy_from_slice(&(lsn.0 as u32).to_le_bytes()); } // This is port of function with the same name from freespace.c. // The only difference is that it does not have "level" parameter because XLogRecordPageWithFreeSpace // always call it with level=FSM_BOTTOM_LEVEL pub fn fsm_logical_to_physical(addr: BlockNumber) -> BlockNumber { let mut leafno = addr; const FSM_TREE_DEPTH: u32 = if pg_constants::SLOTS_PER_FSM_PAGE >= 1626 { 3 } else { 4 }; /* Count upper level nodes required to address the leaf page */ let mut pages: BlockNumber = 0; for _l in 0..FSM_TREE_DEPTH { pages += leafno + 1; leafno /= pg_constants::SLOTS_PER_FSM_PAGE; } /* Turn the page count into 0-based block number */ pages - 1 } pub mod waldecoder { use std::num::NonZeroU32; use crate::PgMajorVersion; use bytes::{Buf, Bytes, BytesMut}; use thiserror::Error; use utils::lsn::Lsn; pub enum State { WaitingForRecord, ReassemblingRecord { recordbuf: BytesMut, contlen: NonZeroU32, }, SkippingEverything { skip_until_lsn: Lsn, }, } pub struct WalStreamDecoder { pub lsn: Lsn, pub pg_version: PgMajorVersion, pub inputbuf: BytesMut, pub state: State, } #[derive(Error, Debug, Clone)] #[error("{msg} at {lsn}")] pub struct WalDecodeError { pub msg: String, pub lsn: Lsn, } impl WalStreamDecoder { pub fn new(lsn: Lsn, pg_version: PgMajorVersion) -> WalStreamDecoder { WalStreamDecoder { lsn, pg_version, inputbuf: BytesMut::new(), state: State::WaitingForRecord, } } // The latest LSN position fed to the decoder. pub fn available(&self) -> Lsn { self.lsn + self.inputbuf.remaining() as u64 } /// Returns the LSN up to which the WAL decoder has processed. /// /// If [`Self::poll_decode`] returned a record, then this will return /// the end LSN of said record. pub fn lsn(&self) -> Lsn { self.lsn } pub fn feed_bytes(&mut self, buf: &[u8]) { self.inputbuf.extend_from_slice(buf); } pub fn poll_decode(&mut self) -> Result<Option<(Lsn, Bytes)>, WalDecodeError> { dispatch_pgversion!( self.pg_version, { use pgv::waldecoder_handler::WalStreamDecoderHandler; self.poll_decode_internal() }, Err(WalDecodeError { msg: format!("Unknown version {}", self.pg_version), lsn: self.lsn, }) ) } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/pg_constants.rs
libs/postgres_ffi/src/pg_constants.rs
//! //! Misc constants, copied from PostgreSQL headers. //! //! Only place version-independent constants here. //! //! TODO: These probably should be auto-generated using bindgen, //! rather than copied by hand. Although on the other hand, it's nice //! to have them all here in one place, and have the ability to add //! comments on them. //! use crate::{BLCKSZ, PageHeaderData}; // Note: There are a few more widely-used constants in the postgres_ffi_types::constants crate. // From storage_xlog.h pub const XLOG_SMGR_CREATE: u8 = 0x10; pub const XLOG_SMGR_TRUNCATE: u8 = 0x20; pub const SMGR_TRUNCATE_HEAP: u32 = 0x0001; pub const SMGR_TRUNCATE_VM: u32 = 0x0002; pub const SMGR_TRUNCATE_FSM: u32 = 0x0004; // // From bufpage.h // // Assumes 8 byte alignment const SIZEOF_PAGE_HEADER_DATA: usize = size_of::<PageHeaderData>(); pub const MAXALIGN_SIZE_OF_PAGE_HEADER_DATA: usize = (SIZEOF_PAGE_HEADER_DATA + 7) & !7; // // constants from clog.h // pub const CLOG_XACTS_PER_BYTE: u32 = 4; pub const CLOG_XACTS_PER_PAGE: u32 = BLCKSZ as u32 * CLOG_XACTS_PER_BYTE; pub const CLOG_BITS_PER_XACT: u8 = 2; pub const CLOG_XACT_BITMASK: u8 = (1 << CLOG_BITS_PER_XACT) - 1; pub const TRANSACTION_STATUS_COMMITTED: u8 = 0x01; pub const TRANSACTION_STATUS_ABORTED: u8 = 0x02; pub const TRANSACTION_STATUS_SUB_COMMITTED: u8 = 0x03; pub const CLOG_ZEROPAGE: u8 = 0x00; pub const CLOG_TRUNCATE: u8 = 0x10; // // Constants from visibilitymap.h, visibilitymapdefs.h and visibilitymap.c // pub const SIZE_OF_PAGE_HEADER: u16 = 24; pub const BITS_PER_BYTE: u16 = 8; pub const HEAPBLOCKS_PER_PAGE: u32 = (BLCKSZ - SIZE_OF_PAGE_HEADER) as u32 * 8 / BITS_PER_HEAPBLOCK as u32; pub const HEAPBLOCKS_PER_BYTE: u16 = BITS_PER_BYTE / BITS_PER_HEAPBLOCK; pub const fn HEAPBLK_TO_MAPBLOCK(x: u32) -> u32 { x / HEAPBLOCKS_PER_PAGE } pub const fn HEAPBLK_TO_MAPBYTE(x: u32) -> u32 { (x % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE as u32 } pub const fn HEAPBLK_TO_OFFSET(x: u32) -> u32 { (x % HEAPBLOCKS_PER_BYTE as u32) * BITS_PER_HEAPBLOCK as u32 } pub const BITS_PER_HEAPBLOCK: u16 = 2; pub const VISIBILITYMAP_ALL_VISIBLE: u8 = 0x01; pub const VISIBILITYMAP_ALL_FROZEN: u8 = 0x02; pub const VISIBILITYMAP_VALID_BITS: u8 = 0x03; // From xact.h pub const XLOG_XACT_COMMIT: u8 = 0x00; pub const XLOG_XACT_PREPARE: u8 = 0x10; pub const XLOG_XACT_ABORT: u8 = 0x20; pub const XLOG_XACT_COMMIT_PREPARED: u8 = 0x30; pub const XLOG_XACT_ABORT_PREPARED: u8 = 0x40; // From standbydefs.h pub const XLOG_RUNNING_XACTS: u8 = 0x10; // From srlu.h pub const SLRU_PAGES_PER_SEGMENT: u32 = 32; pub const SLRU_SEG_SIZE: usize = BLCKSZ as usize * SLRU_PAGES_PER_SEGMENT as usize; /* mask for filtering opcodes out of xl_info */ pub const XLOG_XACT_OPMASK: u8 = 0x70; pub const XLOG_HEAP_OPMASK: u8 = 0x70; /* does this record have a 'xinfo' field or not */ pub const XLOG_XACT_HAS_INFO: u8 = 0x80; /* * The following flags, stored in xinfo, determine which information is * contained in commit/abort records. */ pub const XACT_XINFO_HAS_DBINFO: u32 = 1u32 << 0; pub const XACT_XINFO_HAS_SUBXACTS: u32 = 1u32 << 1; pub const XACT_XINFO_HAS_RELFILENODES: u32 = 1u32 << 2; pub const XACT_XINFO_HAS_INVALS: u32 = 1u32 << 3; pub const XACT_XINFO_HAS_TWOPHASE: u32 = 1u32 << 4; pub const XACT_XINFO_HAS_ORIGIN: u32 = 1u32 << 5; // pub const XACT_XINFO_HAS_AE_LOCKS: u32 = 1u32 << 6; // pub const XACT_XINFO_HAS_GID: u32 = 1u32 << 7; // From pg_control.h and rmgrlist.h pub const XLOG_NEXTOID: u8 = 0x30; pub const XLOG_SWITCH: u8 = 0x40; pub const XLOG_FPI_FOR_HINT: u8 = 0xA0; pub const XLOG_FPI: u8 = 0xB0; // From multixact.h pub const FIRST_MULTIXACT_ID: u32 = 1; pub const MAX_MULTIXACT_ID: u32 = 0xFFFFFFFF; pub const MAX_MULTIXACT_OFFSET: u32 = 0xFFFFFFFF; pub const XLOG_MULTIXACT_ZERO_OFF_PAGE: u8 = 0x00; pub const XLOG_MULTIXACT_ZERO_MEM_PAGE: u8 = 0x10; pub const XLOG_MULTIXACT_CREATE_ID: u8 = 0x20; pub const XLOG_MULTIXACT_TRUNCATE_ID: u8 = 0x30; pub const MULTIXACT_OFFSETS_PER_PAGE: u16 = BLCKSZ / 4; pub const MXACT_MEMBER_BITS_PER_XACT: u16 = 8; pub const MXACT_MEMBER_FLAGS_PER_BYTE: u16 = 1; pub const MULTIXACT_FLAGBYTES_PER_GROUP: u16 = 4; pub const MULTIXACT_MEMBERS_PER_MEMBERGROUP: u16 = MULTIXACT_FLAGBYTES_PER_GROUP * MXACT_MEMBER_FLAGS_PER_BYTE; /* size in bytes of a complete group */ pub const MULTIXACT_MEMBERGROUP_SIZE: u16 = 4 * MULTIXACT_MEMBERS_PER_MEMBERGROUP + MULTIXACT_FLAGBYTES_PER_GROUP; pub const MULTIXACT_MEMBERGROUPS_PER_PAGE: u16 = BLCKSZ / MULTIXACT_MEMBERGROUP_SIZE; pub const MULTIXACT_MEMBERS_PER_PAGE: u16 = MULTIXACT_MEMBERGROUPS_PER_PAGE * MULTIXACT_MEMBERS_PER_MEMBERGROUP; // From heapam_xlog.h pub const XLOG_HEAP_INSERT: u8 = 0x00; pub const XLOG_HEAP_DELETE: u8 = 0x10; pub const XLOG_HEAP_UPDATE: u8 = 0x20; pub const XLOG_HEAP_HOT_UPDATE: u8 = 0x40; pub const XLOG_HEAP_LOCK: u8 = 0x60; pub const XLOG_HEAP_INIT_PAGE: u8 = 0x80; pub const XLOG_HEAP2_VISIBLE: u8 = 0x40; pub const XLOG_HEAP2_MULTI_INSERT: u8 = 0x50; pub const XLOG_HEAP2_LOCK_UPDATED: u8 = 0x60; pub const XLH_LOCK_ALL_FROZEN_CLEARED: u8 = 0x01; pub const XLH_INSERT_ALL_FROZEN_SET: u8 = (1 << 5) as u8; pub const XLH_INSERT_ALL_VISIBLE_CLEARED: u8 = (1 << 0) as u8; pub const XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED: u8 = (1 << 0) as u8; pub const XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED: u8 = (1 << 1) as u8; pub const XLH_DELETE_ALL_VISIBLE_CLEARED: u8 = (1 << 0) as u8; // From heapam_xlog.h pub const XLOG_HEAP2_REWRITE: u8 = 0x00; // From replication/message.h pub const XLOG_LOGICAL_MESSAGE: u8 = 0x00; // From rmgrlist.h pub const RM_XLOG_ID: u8 = 0; pub const RM_XACT_ID: u8 = 1; pub const RM_SMGR_ID: u8 = 2; pub const RM_CLOG_ID: u8 = 3; pub const RM_DBASE_ID: u8 = 4; pub const RM_TBLSPC_ID: u8 = 5; pub const RM_MULTIXACT_ID: u8 = 6; pub const RM_RELMAP_ID: u8 = 7; pub const RM_STANDBY_ID: u8 = 8; pub const RM_HEAP2_ID: u8 = 9; pub const RM_HEAP_ID: u8 = 10; pub const RM_REPLORIGIN_ID: u8 = 19; pub const RM_LOGICALMSG_ID: u8 = 21; // from neon_rmgr.h pub const RM_NEON_ID: u8 = 134; pub const XLOG_NEON_HEAP_INIT_PAGE: u8 = 0x80; pub const XLOG_NEON_HEAP_INSERT: u8 = 0x00; pub const XLOG_NEON_HEAP_DELETE: u8 = 0x10; pub const XLOG_NEON_HEAP_UPDATE: u8 = 0x20; pub const XLOG_NEON_HEAP_HOT_UPDATE: u8 = 0x30; pub const XLOG_NEON_HEAP_LOCK: u8 = 0x40; pub const XLOG_NEON_HEAP_MULTI_INSERT: u8 = 0x50; pub const XLOG_NEON_HEAP_VISIBLE: u8 = 0x40; // from xlogreader.h pub const XLR_INFO_MASK: u8 = 0x0F; pub const XLR_RMGR_INFO_MASK: u8 = 0xF0; pub const XLOG_TBLSPC_CREATE: u8 = 0x00; pub const XLOG_TBLSPC_DROP: u8 = 0x10; // // from xlogrecord.h // pub const XLR_MAX_BLOCK_ID: u8 = 32; pub const XLR_BLOCK_ID_DATA_SHORT: u8 = 255; pub const XLR_BLOCK_ID_DATA_LONG: u8 = 254; pub const XLR_BLOCK_ID_ORIGIN: u8 = 253; pub const XLR_BLOCK_ID_TOPLEVEL_XID: u8 = 252; pub const BKPBLOCK_FORK_MASK: u8 = 0x0F; pub const _BKPBLOCK_FLAG_MASK: u8 = 0xF0; pub const BKPBLOCK_HAS_IMAGE: u8 = 0x10; /* block data is an XLogRecordBlockImage */ pub const BKPBLOCK_HAS_DATA: u8 = 0x20; pub const BKPBLOCK_WILL_INIT: u8 = 0x40; /* redo will re-init the page */ pub const BKPBLOCK_SAME_REL: u8 = 0x80; /* RelFileNode omitted, same as previous */ /* Information stored in bimg_info */ pub const BKPIMAGE_HAS_HOLE: u8 = 0x01; /* page image has "hole" */ /* From transam.h */ pub const FIRST_NORMAL_TRANSACTION_ID: u32 = 3; pub const INVALID_TRANSACTION_ID: u32 = 0; /* pg_control.h */ pub const XLOG_CHECKPOINT_SHUTDOWN: u8 = 0x00; pub const XLOG_CHECKPOINT_ONLINE: u8 = 0x10; pub const XLOG_PARAMETER_CHANGE: u8 = 0x60; pub const XLOG_END_OF_RECOVERY: u8 = 0x90; /* From xlog.h */ pub const XLOG_REPLORIGIN_SET: u8 = 0x00; pub const XLOG_REPLORIGIN_DROP: u8 = 0x10; /* xlog_internal.h */ pub const XLP_FIRST_IS_CONTRECORD: u16 = 0x0001; pub const XLP_LONG_HEADER: u16 = 0x0002; /* From replication/slot.h */ pub const REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN: usize = 4*4 /* offset of `slotdata` in ReplicationSlotOnDisk */ + 64 /* NameData */ + 4*4; /* From fsm_internals.h */ const FSM_NODES_PER_PAGE: usize = BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA - 4; const FSM_NON_LEAF_NODES_PER_PAGE: usize = BLCKSZ as usize / 2 - 1; const FSM_LEAF_NODES_PER_PAGE: usize = FSM_NODES_PER_PAGE - FSM_NON_LEAF_NODES_PER_PAGE; pub const SLOTS_PER_FSM_PAGE: u32 = FSM_LEAF_NODES_PER_PAGE as u32; /* From visibilitymap.c */ pub const VM_MAPSIZE: usize = BLCKSZ as usize - MAXALIGN_SIZE_OF_PAGE_HEADER_DATA; pub const VM_BITS_PER_HEAPBLOCK: usize = 2; pub const VM_HEAPBLOCKS_PER_BYTE: usize = 8 / VM_BITS_PER_HEAPBLOCK; pub const VM_HEAPBLOCKS_PER_PAGE: usize = VM_MAPSIZE * VM_HEAPBLOCKS_PER_BYTE; /* From origin.c */ pub const REPLICATION_STATE_MAGIC: u32 = 0x1257DADE; // Don't include postgresql.conf as it is inconvenient on node start: // we need postgresql.conf before basebackup to synchronize safekeepers // so no point in overwriting it during backup restore. Rest of the files // here are not needed before backup so it is okay to edit them after. pub const PGDATA_SPECIAL_FILES: [&str; 3] = ["pg_hba.conf", "pg_ident.conf", "postgresql.auto.conf"]; pub static PG_HBA: &str = include_str!("../samples/pg_hba.conf");
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/pg_constants_v16.rs
libs/postgres_ffi/src/pg_constants_v16.rs
use crate::PgMajorVersion; pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG16; pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; pub const XLOG_DBASE_CREATE_WAL_LOG: u8 = 0x10; pub const XLOG_DBASE_DROP: u8 = 0x20; pub const BKPIMAGE_APPLY: u8 = 0x02; /* page image should be restored during replay */ pub const BKPIMAGE_COMPRESS_PGLZ: u8 = 0x04; /* page image is compressed */ pub const BKPIMAGE_COMPRESS_LZ4: u8 = 0x08; /* page image is compressed */ pub const BKPIMAGE_COMPRESS_ZSTD: u8 = 0x10; /* page image is compressed */ pub const SIZEOF_RELMAPFILE: usize = 524; /* sizeof(RelMapFile) in relmapper.c */ pub use super::super::v14::bindings::PGDATA_SUBDIRS; pub fn bkpimg_is_compressed(bimg_info: u8) -> bool { const ANY_COMPRESS_FLAG: u8 = BKPIMAGE_COMPRESS_PGLZ | BKPIMAGE_COMPRESS_LZ4 | BKPIMAGE_COMPRESS_ZSTD; (bimg_info & ANY_COMPRESS_FLAG) != 0 }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/pg_constants_v15.rs
libs/postgres_ffi/src/pg_constants_v15.rs
use crate::PgMajorVersion; pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG15; pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; pub const XLOG_DBASE_CREATE_WAL_LOG: u8 = 0x10; pub const XLOG_DBASE_DROP: u8 = 0x20; pub const BKPIMAGE_APPLY: u8 = 0x02; /* page image should be restored during replay */ pub const BKPIMAGE_COMPRESS_PGLZ: u8 = 0x04; /* page image is compressed */ pub const BKPIMAGE_COMPRESS_LZ4: u8 = 0x08; /* page image is compressed */ pub const BKPIMAGE_COMPRESS_ZSTD: u8 = 0x10; /* page image is compressed */ pub const SIZEOF_RELMAPFILE: usize = 512; /* sizeof(RelMapFile) in relmapper.c */ pub use super::super::v14::bindings::PGDATA_SUBDIRS; pub fn bkpimg_is_compressed(bimg_info: u8) -> bool { const ANY_COMPRESS_FLAG: u8 = BKPIMAGE_COMPRESS_PGLZ | BKPIMAGE_COMPRESS_LZ4 | BKPIMAGE_COMPRESS_ZSTD; (bimg_info & ANY_COMPRESS_FLAG) != 0 }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/pg_constants_v17.rs
libs/postgres_ffi/src/pg_constants_v17.rs
use crate::PgMajorVersion; pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG17; pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; pub const XLOG_DBASE_CREATE_WAL_LOG: u8 = 0x10; pub const XLOG_DBASE_DROP: u8 = 0x20; pub const BKPIMAGE_APPLY: u8 = 0x02; /* page image should be restored during replay */ pub const BKPIMAGE_COMPRESS_PGLZ: u8 = 0x04; /* page image is compressed */ pub const BKPIMAGE_COMPRESS_LZ4: u8 = 0x08; /* page image is compressed */ pub const BKPIMAGE_COMPRESS_ZSTD: u8 = 0x10; /* page image is compressed */ pub const SIZEOF_RELMAPFILE: usize = 524; /* sizeof(RelMapFile) in relmapper.c */ // List of subdirectories inside pgdata. // Copied from src/bin/initdb/initdb.c pub const PGDATA_SUBDIRS: [&str; 23] = [ "global", "pg_wal/archive_status", "pg_wal/summaries", "pg_commit_ts", "pg_dynshmem", "pg_notify", "pg_serial", "pg_snapshots", "pg_subtrans", "pg_twophase", "pg_multixact", "pg_multixact/members", "pg_multixact/offsets", "base", "base/1", "pg_replslot", "pg_tblspc", "pg_stat", "pg_stat_tmp", "pg_xact", "pg_logical", "pg_logical/snapshots", "pg_logical/mappings", ]; pub fn bkpimg_is_compressed(bimg_info: u8) -> bool { const ANY_COMPRESS_FLAG: u8 = BKPIMAGE_COMPRESS_PGLZ | BKPIMAGE_COMPRESS_LZ4 | BKPIMAGE_COMPRESS_ZSTD; (bimg_info & ANY_COMPRESS_FLAG) != 0 } pub const XLOG_HEAP2_PRUNE_ON_ACCESS: u8 = 0x10; pub const XLOG_HEAP2_PRUNE_VACUUM_SCAN: u8 = 0x20; pub const XLOG_HEAP2_PRUNE_VACUUM_CLEANUP: u8 = 0x30; pub const XLOG_OVERWRITE_CONTRECORD: u8 = 0xD0; pub const XLOG_CHECKPOINT_REDO: u8 = 0xE0;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/xlog_utils.rs
libs/postgres_ffi/src/xlog_utils.rs
// // This file contains common utilities for dealing with PostgreSQL WAL files and // LSNs. // // Many of these functions have been copied from PostgreSQL, and rewritten in // Rust. That's why they don't follow the usual Rust naming conventions, they // have been named the same as the corresponding PostgreSQL functions instead. // use super::super::waldecoder::WalStreamDecoder; use super::bindings::{ CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, XLogLongPageHeaderData, XLogPageHeaderData, XLogRecPtr, XLogRecord, XLogSegNo, XLOG_PAGE_MAGIC, MY_PGVERSION }; use postgres_ffi_types::TimestampTz; use super::wal_generator::LogicalMessageGenerator; use crate::pg_constants; use crate::PG_TLI; use crate::{uint32, uint64, Oid}; use crate::{WAL_SEGMENT_SIZE, XLOG_BLCKSZ}; use bytes::BytesMut; use bytes::{Buf, Bytes}; use serde::Serialize; use std::ffi::{CString, OsStr}; use std::fs::File; use std::io::prelude::*; use std::io::ErrorKind; use std::io::SeekFrom; use std::path::Path; use std::time::SystemTime; use utils::bin_ser::DeserializeError; use utils::bin_ser::SerializeError; use utils::lsn::Lsn; pub const XLOG_FNAME_LEN: usize = 24; pub const XLP_BKP_REMOVABLE: u16 = 0x0004; pub const XLP_FIRST_IS_CONTRECORD: u16 = 0x0001; pub const XLP_REM_LEN_OFFS: usize = 2 + 2 + 4 + 8; pub const XLOG_RECORD_CRC_OFFS: usize = 4 + 4 + 8 + 1 + 1 + 2; pub const XLOG_SIZE_OF_XLOG_SHORT_PHD: usize = size_of::<XLogPageHeaderData>(); pub const XLOG_SIZE_OF_XLOG_LONG_PHD: usize = size_of::<XLogLongPageHeaderData>(); pub const XLOG_SIZE_OF_XLOG_RECORD: usize = size_of::<XLogRecord>(); #[allow(clippy::identity_op)] pub const SIZE_OF_XLOG_RECORD_DATA_HEADER_SHORT: usize = 1 * 2; /// Interval of checkpointing metadata file. We should store metadata file to enforce /// predicate that checkpoint.nextXid is larger than any XID in WAL. /// But flushing checkpoint file for each transaction seems to be too expensive, /// so XID_CHECKPOINT_INTERVAL is used to forward align nextXid and so perform /// metadata checkpoint only once per XID_CHECKPOINT_INTERVAL transactions. /// XID_CHECKPOINT_INTERVAL should not be larger than BLCKSZ*CLOG_XACTS_PER_BYTE /// in order to let CLOG_TRUNCATE mechanism correctly extend CLOG. const XID_CHECKPOINT_INTERVAL: u32 = 1024; pub fn XLogSegmentsPerXLogId(wal_segsz_bytes: usize) -> XLogSegNo { (0x100000000u64 / wal_segsz_bytes as u64) as XLogSegNo } pub fn XLogSegNoOffsetToRecPtr( segno: XLogSegNo, offset: u32, wal_segsz_bytes: usize, ) -> XLogRecPtr { segno * (wal_segsz_bytes as u64) + (offset as u64) } pub fn XLogFileName(tli: TimeLineID, logSegNo: XLogSegNo, wal_segsz_bytes: usize) -> String { format!( "{:>08X}{:>08X}{:>08X}", tli, logSegNo / XLogSegmentsPerXLogId(wal_segsz_bytes), logSegNo % XLogSegmentsPerXLogId(wal_segsz_bytes) ) } pub fn XLogFromFileName( fname: &OsStr, wal_seg_size: usize, ) -> anyhow::Result<(XLogSegNo, TimeLineID)> { if let Some(fname_str) = fname.to_str() { let tli = u32::from_str_radix(&fname_str[0..8], 16)?; let log = u32::from_str_radix(&fname_str[8..16], 16)? as XLogSegNo; let seg = u32::from_str_radix(&fname_str[16..24], 16)? as XLogSegNo; Ok((log * XLogSegmentsPerXLogId(wal_seg_size) + seg, tli)) } else { anyhow::bail!("non-ut8 filename: {:?}", fname); } } pub fn IsXLogFileName(fname: &OsStr) -> bool { if let Some(fname) = fname.to_str() { fname.len() == XLOG_FNAME_LEN && fname.chars().all(|c| c.is_ascii_hexdigit()) } else { false } } pub fn IsPartialXLogFileName(fname: &OsStr) -> bool { if let Some(fname) = fname.to_str() { fname.ends_with(".partial") && IsXLogFileName(OsStr::new(&fname[0..fname.len() - 8])) } else { false } } /// If LSN points to the beginning of the page, then shift it to first record, /// otherwise align on 8-bytes boundary (required for WAL records) pub fn normalize_lsn(lsn: Lsn, seg_sz: usize) -> Lsn { if lsn.0 % XLOG_BLCKSZ as u64 == 0 { let hdr_size = if lsn.0 % seg_sz as u64 == 0 { XLOG_SIZE_OF_XLOG_LONG_PHD } else { XLOG_SIZE_OF_XLOG_SHORT_PHD }; lsn + hdr_size as u64 } else { lsn.align() } } /// Generate a pg_control file, for a basebackup for starting up Postgres at the given LSN /// /// 'pg_control_bytes' and 'checkpoint_bytes' are the contents of those keys persisted in /// the pageserver. They use the same format as the PostgreSQL control file and the /// checkpoint record, but see walingest.rs for how exactly they are kept up to date. /// 'lsn' is the LSN at which we're starting up. /// /// Returns: /// - pg_control file contents /// - system_identifier, extracted from the persisted information /// - true, if we're starting up from a "clean shutdown", i.e. if there was a shutdown /// checkpoint at the given LSN pub fn generate_pg_control( pg_control_bytes: &[u8], checkpoint_bytes: &[u8], lsn: Lsn, ) -> anyhow::Result<(Bytes, u64, bool)> { let mut pg_control = ControlFileData::decode(pg_control_bytes)?; let mut checkpoint = CheckPoint::decode(checkpoint_bytes)?; // Generate new pg_control needed for bootstrap // // NB: In the checkpoint struct that we persist in the pageserver, we have a different // convention for the 'redo' field than in PostgreSQL: On a shutdown checkpoint, // 'redo' points the *end* of the checkpoint WAL record. On PostgreSQL, it points to // the beginning. Furthermore, on an online checkpoint, 'redo' is set to 0. // // We didn't always have this convention however, and old persisted records will have // old REDO values that point to some old LSN. // // The upshot is that if 'redo' is equal to the "current" LSN, there was a shutdown // checkpoint record at that point in WAL, with no new WAL records after it. That case // can be treated as starting from a clean shutdown. All other cases are treated as // non-clean shutdown. In Neon, we don't do WAL replay at startup in either case, so // that distinction doesn't matter very much. As of this writing, it only affects // whether the persisted pg_stats information can be used or not. // // In the Checkpoint struct in the returned pg_control file, the redo pointer is // always set to the LSN we're starting at, to hint that no WAL replay is required. // (There's some neon-specific code in Postgres startup to make that work, though. // Just setting the redo pointer is not sufficient.) let was_shutdown = Lsn(checkpoint.redo) == lsn; checkpoint.redo = normalize_lsn(lsn, WAL_SEGMENT_SIZE).0; // We use DBState_DB_SHUTDOWNED even if it was not a clean shutdown. The // neon-specific code at postgres startup ignores the state stored in the control // file, similar to archive recovery in standalone PostgreSQL. Similarly, the // checkPoint pointer is ignored, so just set it to 0. pg_control.checkPoint = 0; pg_control.checkPointCopy = checkpoint; pg_control.state = DBState_DB_SHUTDOWNED; Ok((pg_control.encode(), pg_control.system_identifier, was_shutdown)) } pub fn get_current_timestamp() -> TimestampTz { to_pg_timestamp(SystemTime::now()) } // Module to reduce the scope of the constants mod timestamp_conversions { use std::time::Duration; use anyhow::Context; use super::*; const UNIX_EPOCH_JDATE: u64 = 2440588; // == date2j(1970, 1, 1) const POSTGRES_EPOCH_JDATE: u64 = 2451545; // == date2j(2000, 1, 1) const SECS_PER_DAY: u64 = 86400; const USECS_PER_SEC: u64 = 1000000; const SECS_DIFF_UNIX_TO_POSTGRES_EPOCH: u64 = (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY; pub fn to_pg_timestamp(time: SystemTime) -> TimestampTz { match time.duration_since(SystemTime::UNIX_EPOCH) { Ok(n) => { ((n.as_secs() - SECS_DIFF_UNIX_TO_POSTGRES_EPOCH) * USECS_PER_SEC + n.subsec_micros() as u64) as i64 } Err(_) => panic!("SystemTime before UNIX EPOCH!"), } } pub fn try_from_pg_timestamp(time: TimestampTz) -> anyhow::Result<SystemTime> { let time: u64 = time .try_into() .context("timestamp before millenium (postgres epoch)")?; let since_unix_epoch = time + SECS_DIFF_UNIX_TO_POSTGRES_EPOCH * USECS_PER_SEC; SystemTime::UNIX_EPOCH .checked_add(Duration::from_micros(since_unix_epoch)) .context("SystemTime overflow") } } pub use timestamp_conversions::{to_pg_timestamp, try_from_pg_timestamp}; // Returns (aligned) end_lsn of the last record in data_dir with WAL segments. // start_lsn must point to some previously known record boundary (beginning of // the next record). If no valid record after is found, start_lsn is returned // back. pub fn find_end_of_wal( data_dir: &Path, wal_seg_size: usize, start_lsn: Lsn, // start reading WAL at this point; must point at record start_lsn. ) -> anyhow::Result<Lsn> { let mut result = start_lsn; let mut curr_lsn = start_lsn; let mut buf = [0u8; XLOG_BLCKSZ]; let pg_version = MY_PGVERSION; tracing::debug!("find_end_of_wal PG_VERSION: {}", pg_version); let mut decoder = WalStreamDecoder::new(start_lsn, pg_version); // loop over segments loop { let segno = curr_lsn.segment_number(wal_seg_size); let seg_file_name = XLogFileName(PG_TLI, segno, wal_seg_size); let seg_file_path = data_dir.join(seg_file_name); match open_wal_segment(&seg_file_path)? { None => { // no more segments tracing::debug!( "find_end_of_wal reached end at {:?}, segment {:?} doesn't exist", result, seg_file_path ); return Ok(result); } Some(mut segment) => { let seg_offs = curr_lsn.segment_offset(wal_seg_size); segment.seek(SeekFrom::Start(seg_offs as u64))?; // loop inside segment while curr_lsn.segment_number(wal_seg_size) == segno { let bytes_read = segment.read(&mut buf)?; if bytes_read == 0 { tracing::debug!( "find_end_of_wal reached end at {:?}, EOF in segment {:?} at offset {}", result, seg_file_path, curr_lsn.segment_offset(wal_seg_size) ); return Ok(result); } curr_lsn += bytes_read as u64; decoder.feed_bytes(&buf[0..bytes_read]); // advance result past all completely read records loop { match decoder.poll_decode() { Ok(Some(record)) => result = record.0, Err(e) => { tracing::debug!( "find_end_of_wal reached end at {:?}, decode error: {:?}", result, e ); return Ok(result); } Ok(None) => break, // need more data } } } } } } } // Open .partial or full WAL segment file, if present. fn open_wal_segment(seg_file_path: &Path) -> anyhow::Result<Option<File>> { let mut partial_path = seg_file_path.to_owned(); partial_path.set_extension("partial"); match File::open(partial_path) { Ok(file) => Ok(Some(file)), Err(e) => match e.kind() { ErrorKind::NotFound => { // .partial not found, try full match File::open(seg_file_path) { Ok(file) => Ok(Some(file)), Err(e) => match e.kind() { ErrorKind::NotFound => Ok(None), _ => Err(e.into()), }, } } _ => Err(e.into()), }, } } impl XLogRecord { pub fn from_slice(buf: &[u8]) -> Result<XLogRecord, DeserializeError> { use utils::bin_ser::LeSer; XLogRecord::des(buf) } pub fn from_bytes<B: Buf>(buf: &mut B) -> Result<XLogRecord, DeserializeError> { use utils::bin_ser::LeSer; XLogRecord::des_from(&mut buf.reader()) } pub fn encode(&self) -> Result<Bytes, SerializeError> { use utils::bin_ser::LeSer; Ok(self.ser()?.into()) } // Is this record an XLOG_SWITCH record? They need some special processing, pub fn is_xlog_switch_record(&self) -> bool { self.xl_info == pg_constants::XLOG_SWITCH && self.xl_rmid == pg_constants::RM_XLOG_ID } } impl XLogPageHeaderData { pub fn from_bytes<B: Buf>(buf: &mut B) -> Result<XLogPageHeaderData, DeserializeError> { use utils::bin_ser::LeSer; XLogPageHeaderData::des_from(&mut buf.reader()) } pub fn encode(&self) -> Result<Bytes, SerializeError> { use utils::bin_ser::LeSer; self.ser().map(|b| b.into()) } } impl XLogLongPageHeaderData { pub fn from_bytes<B: Buf>(buf: &mut B) -> Result<XLogLongPageHeaderData, DeserializeError> { use utils::bin_ser::LeSer; XLogLongPageHeaderData::des_from(&mut buf.reader()) } pub fn encode(&self) -> Result<Bytes, SerializeError> { use utils::bin_ser::LeSer; self.ser().map(|b| b.into()) } } pub const SIZEOF_CHECKPOINT: usize = size_of::<CheckPoint>(); impl CheckPoint { pub fn encode(&self) -> Result<Bytes, SerializeError> { use utils::bin_ser::LeSer; Ok(self.ser()?.into()) } pub fn decode(buf: &[u8]) -> Result<CheckPoint, DeserializeError> { use utils::bin_ser::LeSer; CheckPoint::des(buf) } /// Update next XID based on provided new_xid and stored epoch. /// Next XID should be greater than new_xid. This handles 32-bit /// XID wraparound correctly. /// /// Returns 'true' if the XID was updated. pub fn update_next_xid(&mut self, xid: u32) -> bool { // nextXid should be greater than any XID in WAL, so increment provided XID and check for wraparround. let mut new_xid = std::cmp::max( xid.wrapping_add(1), pg_constants::FIRST_NORMAL_TRANSACTION_ID, ); // To reduce number of metadata checkpoints, we forward align XID on XID_CHECKPOINT_INTERVAL. // XID_CHECKPOINT_INTERVAL should not be larger than BLCKSZ*CLOG_XACTS_PER_BYTE new_xid = new_xid.wrapping_add(XID_CHECKPOINT_INTERVAL - 1) & !(XID_CHECKPOINT_INTERVAL - 1); let full_xid = self.nextXid.value; let old_xid = full_xid as u32; if new_xid.wrapping_sub(old_xid) as i32 > 0 { let mut epoch = full_xid >> 32; if new_xid < old_xid { // wrap-around epoch += 1; } let nextXid = (epoch << 32) | new_xid as u64; if nextXid != self.nextXid.value { self.nextXid = FullTransactionId { value: nextXid }; return true; } } false } /// Advance next multi-XID/offset to those given in arguments. /// /// It's important that this handles wraparound correctly. This should match the /// MultiXactAdvanceNextMXact() logic in PostgreSQL's xlog_redo() function. /// /// Returns 'true' if the Checkpoint was updated. pub fn update_next_multixid(&mut self, multi_xid: u32, multi_offset: u32) -> bool { let mut modified = false; if multi_xid.wrapping_sub(self.nextMulti) as i32 > 0 { self.nextMulti = multi_xid; modified = true; } if multi_offset.wrapping_sub(self.nextMultiOffset) as i32 > 0 { self.nextMultiOffset = multi_offset; modified = true; } modified } } /// Generate new, empty WAL segment, with correct block headers at the first /// page of the segment and the page that contains the given LSN. /// We need this segment to start compute node. pub fn generate_wal_segment(segno: u64, system_id: u64, lsn: Lsn) -> Result<Bytes, SerializeError> { let mut seg_buf = BytesMut::with_capacity(WAL_SEGMENT_SIZE); let pageaddr = XLogSegNoOffsetToRecPtr(segno, 0, WAL_SEGMENT_SIZE); let page_off = lsn.block_offset(); let seg_off = lsn.segment_offset(WAL_SEGMENT_SIZE); let first_page_only = seg_off < XLOG_BLCKSZ; // If first records starts in the middle of the page, pretend in page header // there is a fake record which ends where first real record starts. This // makes pg_waldump etc happy. let (shdr_rem_len, infoflags) = if first_page_only && seg_off > 0 { assert!(seg_off >= XLOG_SIZE_OF_XLOG_LONG_PHD); // xlp_rem_len doesn't include page header, hence the subtraction. ( seg_off - XLOG_SIZE_OF_XLOG_LONG_PHD, pg_constants::XLP_FIRST_IS_CONTRECORD, ) } else { (0, 0) }; let hdr = XLogLongPageHeaderData { std: { XLogPageHeaderData { xlp_magic: XLOG_PAGE_MAGIC as u16, xlp_info: pg_constants::XLP_LONG_HEADER | infoflags, xlp_tli: PG_TLI, xlp_pageaddr: pageaddr, xlp_rem_len: shdr_rem_len as u32, ..Default::default() // Put 0 in padding fields. } }, xlp_sysid: system_id, xlp_seg_size: WAL_SEGMENT_SIZE as u32, xlp_xlog_blcksz: XLOG_BLCKSZ as u32, }; let hdr_bytes = hdr.encode()?; seg_buf.extend_from_slice(&hdr_bytes); //zero out the rest of the file seg_buf.resize(WAL_SEGMENT_SIZE, 0); if !first_page_only { let block_offset = lsn.page_offset_in_segment(WAL_SEGMENT_SIZE) as usize; // see comments above about XLP_FIRST_IS_CONTRECORD and xlp_rem_len. let (xlp_rem_len, xlp_info) = if page_off > 0 { assert!(page_off >= XLOG_SIZE_OF_XLOG_SHORT_PHD as u64); ( (page_off - XLOG_SIZE_OF_XLOG_SHORT_PHD as u64) as u32, pg_constants::XLP_FIRST_IS_CONTRECORD, ) } else { (0, 0) }; let header = XLogPageHeaderData { xlp_magic: XLOG_PAGE_MAGIC as u16, xlp_info, xlp_tli: PG_TLI, xlp_pageaddr: lsn.page_lsn().0, xlp_rem_len, ..Default::default() // Put 0 in padding fields. }; let hdr_bytes = header.encode()?; debug_assert!(seg_buf.len() > block_offset + hdr_bytes.len()); debug_assert_ne!(block_offset, 0); seg_buf[block_offset..block_offset + hdr_bytes.len()].copy_from_slice(&hdr_bytes[..]); } Ok(seg_buf.freeze()) } #[repr(C)] #[derive(Serialize)] pub struct XlLogicalMessage { pub db_id: Oid, pub transactional: uint32, // bool, takes 4 bytes due to alignment in C structures pub prefix_size: uint64, pub message_size: uint64, } impl XlLogicalMessage { pub fn encode(&self) -> Bytes { use utils::bin_ser::LeSer; self.ser().unwrap().into() } } /// Create new WAL record for non-transactional logical message. /// Used for creating artificial WAL for tests, as LogicalMessage /// record is basically no-op. pub fn encode_logical_message(prefix: &str, message: &str) -> Bytes { // This function can take untrusted input, so discard any NUL bytes in the prefix string. let prefix = CString::new(prefix.replace('\0', "")).expect("no NULs"); let message = message.as_bytes(); LogicalMessageGenerator::new(&prefix, message) .next() .unwrap() .encode(Lsn(0)) } #[cfg(test)] mod tests { use super::*; #[test] fn test_ts_conversion() { let now = SystemTime::now(); let round_trip = try_from_pg_timestamp(to_pg_timestamp(now)).unwrap(); let now_since = now.duration_since(SystemTime::UNIX_EPOCH).unwrap(); let round_trip_since = round_trip.duration_since(SystemTime::UNIX_EPOCH).unwrap(); assert_eq!(now_since.as_micros(), round_trip_since.as_micros()); let now_pg = get_current_timestamp(); let round_trip_pg = to_pg_timestamp(try_from_pg_timestamp(now_pg).unwrap()); assert_eq!(now_pg, round_trip_pg); } // If you need to craft WAL and write tests for this module, put it at wal_craft crate. }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/walrecord.rs
libs/postgres_ffi/src/walrecord.rs
//! This module houses types used in decoding of PG WAL //! records. //! //! TODO: Generate separate types for each supported PG version use bytes::{Buf, Bytes}; use postgres_ffi_types::TimestampTz; use serde::{Deserialize, Serialize}; use utils::bin_ser::DeserializeError; use utils::lsn::Lsn; use crate::{ BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, PgMajorVersion, RepOriginId, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, }; #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct XlMultiXactCreate { pub mid: MultiXactId, /* new MultiXact's ID */ pub moff: MultiXactOffset, /* its starting offset in members file */ pub nmembers: u32, /* number of member XIDs */ pub members: Vec<MultiXactMember>, } impl XlMultiXactCreate { pub fn decode(buf: &mut Bytes) -> XlMultiXactCreate { let mid = buf.get_u32_le(); let moff = buf.get_u32_le(); let nmembers = buf.get_u32_le(); let mut members = Vec::new(); for _ in 0..nmembers { members.push(MultiXactMember::decode(buf)); } XlMultiXactCreate { mid, moff, nmembers, members, } } } #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct XlMultiXactTruncate { pub oldest_multi_db: Oid, /* to-be-truncated range of multixact offsets */ pub start_trunc_off: MultiXactId, /* just for completeness' sake */ pub end_trunc_off: MultiXactId, /* to-be-truncated range of multixact members */ pub start_trunc_memb: MultiXactOffset, pub end_trunc_memb: MultiXactOffset, } impl XlMultiXactTruncate { pub fn decode(buf: &mut Bytes) -> XlMultiXactTruncate { XlMultiXactTruncate { oldest_multi_db: buf.get_u32_le(), start_trunc_off: buf.get_u32_le(), end_trunc_off: buf.get_u32_le(), start_trunc_memb: buf.get_u32_le(), end_trunc_memb: buf.get_u32_le(), } } } #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct XlRelmapUpdate { pub dbid: Oid, /* database ID, or 0 for shared map */ pub tsid: Oid, /* database's tablespace, or pg_global */ pub nbytes: i32, /* size of relmap data */ } impl XlRelmapUpdate { pub fn decode(buf: &mut Bytes) -> XlRelmapUpdate { XlRelmapUpdate { dbid: buf.get_u32_le(), tsid: buf.get_u32_le(), nbytes: buf.get_i32_le(), } } } #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct XlReploriginDrop { pub node_id: RepOriginId, } impl XlReploriginDrop { pub fn decode(buf: &mut Bytes) -> XlReploriginDrop { XlReploriginDrop { node_id: buf.get_u16_le(), } } } #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct XlReploriginSet { pub remote_lsn: Lsn, pub node_id: RepOriginId, } impl XlReploriginSet { pub fn decode(buf: &mut Bytes) -> XlReploriginSet { XlReploriginSet { remote_lsn: Lsn(buf.get_u64_le()), node_id: buf.get_u16_le(), } } } #[repr(C)] #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct RelFileNode { pub spcnode: Oid, /* tablespace */ pub dbnode: Oid, /* database */ pub relnode: Oid, /* relation */ } #[repr(C)] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct MultiXactMember { pub xid: TransactionId, pub status: MultiXactStatus, } impl MultiXactMember { pub fn decode(buf: &mut Bytes) -> MultiXactMember { MultiXactMember { xid: buf.get_u32_le(), status: buf.get_u32_le(), } } } /// DecodedBkpBlock represents per-page data contained in a WAL record. #[derive(Default)] pub struct DecodedBkpBlock { /* Is this block ref in use? */ //in_use: bool, /* Identify the block this refers to */ pub rnode_spcnode: u32, pub rnode_dbnode: u32, pub rnode_relnode: u32, // Note that we have a few special forknum values for non-rel files. pub forknum: u8, pub blkno: u32, /* copy of the fork_flags field from the XLogRecordBlockHeader */ pub flags: u8, /* Information on full-page image, if any */ pub has_image: bool, /* has image, even for consistency checking */ pub apply_image: bool, /* has image that should be restored */ pub will_init: bool, /* record doesn't need previous page version to apply */ //char *bkp_image; pub hole_offset: u16, pub hole_length: u16, pub bimg_offset: u32, pub bimg_len: u16, pub bimg_info: u8, /* Buffer holding the rmgr-specific data associated with this block */ has_data: bool, data_len: u16, } impl DecodedBkpBlock { pub fn new() -> DecodedBkpBlock { Default::default() } } #[derive(Default)] pub struct DecodedWALRecord { pub xl_xid: TransactionId, pub xl_info: u8, pub xl_rmid: u8, pub record: Bytes, // raw XLogRecord pub blocks: Vec<DecodedBkpBlock>, pub main_data_offset: usize, pub origin_id: u16, } impl DecodedWALRecord { /// Check if this WAL record represents a legacy "copy" database creation, which populates new relations /// by reading other existing relations' data blocks. This is more complex to apply than new-style database /// creations which simply include all the desired blocks in the WAL, so we need a helper function to detect this case. pub fn is_dbase_create_copy(&self, pg_version: PgMajorVersion) -> bool { if self.xl_rmid == pg_constants::RM_DBASE_ID { let info = self.xl_info & pg_constants::XLR_RMGR_INFO_MASK; match pg_version { PgMajorVersion::PG14 => { // Postgres 14 database creations are always the legacy kind info == crate::v14::bindings::XLOG_DBASE_CREATE } PgMajorVersion::PG15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY, PgMajorVersion::PG16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY, PgMajorVersion::PG17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY, } } else { false } } } /// Main routine to decode a WAL record and figure out which blocks are modified // // See xlogrecord.h for details // The overall layout of an XLOG record is: // Fixed-size header (XLogRecord struct) // XLogRecordBlockHeader struct // If pg_constants::BKPBLOCK_HAS_IMAGE, an XLogRecordBlockImageHeader struct follows // If pg_constants::BKPIMAGE_HAS_HOLE and pg_constants::BKPIMAGE_IS_COMPRESSED, an // XLogRecordBlockCompressHeader struct follows. // If pg_constants::BKPBLOCK_SAME_REL is not set, a RelFileNode follows // BlockNumber follows // XLogRecordBlockHeader struct // ... // XLogRecordDataHeader[Short|Long] struct // block data // block data // ... // main data // // // For performance reasons, the caller provides the DecodedWALRecord struct and the function just fills it in. // It would be more natural for this function to return a DecodedWALRecord as return value, // but reusing the caller-supplied struct avoids an allocation. // This code is in the hot path for digesting incoming WAL, and is very performance sensitive. // pub fn decode_wal_record( record: Bytes, decoded: &mut DecodedWALRecord, pg_version: PgMajorVersion, ) -> anyhow::Result<()> { let mut rnode_spcnode: u32 = 0; let mut rnode_dbnode: u32 = 0; let mut rnode_relnode: u32 = 0; let mut got_rnode = false; let mut origin_id: u16 = 0; let mut buf = record.clone(); // 1. Parse XLogRecord struct // FIXME: assume little-endian here let xlogrec = XLogRecord::from_bytes(&mut buf)?; tracing::trace!( "decode_wal_record xl_rmid = {} xl_info = {}", xlogrec.xl_rmid, xlogrec.xl_info ); let remaining: usize = xlogrec.xl_tot_len as usize - XLOG_SIZE_OF_XLOG_RECORD; if buf.remaining() != remaining { //TODO error } let mut max_block_id = 0; let mut blocks_total_len: u32 = 0; let mut main_data_len = 0; let mut datatotal: u32 = 0; decoded.blocks.clear(); // 2. Decode the headers. // XLogRecordBlockHeaders if any, // XLogRecordDataHeader[Short|Long] while buf.remaining() > datatotal as usize { let block_id = buf.get_u8(); match block_id { pg_constants::XLR_BLOCK_ID_DATA_SHORT => { /* XLogRecordDataHeaderShort */ main_data_len = buf.get_u8() as u32; datatotal += main_data_len; } pg_constants::XLR_BLOCK_ID_DATA_LONG => { /* XLogRecordDataHeaderLong */ main_data_len = buf.get_u32_le(); datatotal += main_data_len; } pg_constants::XLR_BLOCK_ID_ORIGIN => { // RepOriginId is uint16 origin_id = buf.get_u16_le(); } pg_constants::XLR_BLOCK_ID_TOPLEVEL_XID => { // TransactionId is uint32 buf.advance(4); } 0..=pg_constants::XLR_MAX_BLOCK_ID => { /* XLogRecordBlockHeader */ let mut blk = DecodedBkpBlock::new(); if block_id <= max_block_id { // TODO //report_invalid_record(state, // "out-of-order block_id %u at %X/%X", // block_id, // (uint32) (state->ReadRecPtr >> 32), // (uint32) state->ReadRecPtr); // goto err; } max_block_id = block_id; let fork_flags: u8 = buf.get_u8(); blk.forknum = fork_flags & pg_constants::BKPBLOCK_FORK_MASK; blk.flags = fork_flags; blk.has_image = (fork_flags & pg_constants::BKPBLOCK_HAS_IMAGE) != 0; blk.has_data = (fork_flags & pg_constants::BKPBLOCK_HAS_DATA) != 0; blk.will_init = (fork_flags & pg_constants::BKPBLOCK_WILL_INIT) != 0; blk.data_len = buf.get_u16_le(); /* TODO cross-check that the HAS_DATA flag is set iff data_length > 0 */ datatotal += blk.data_len as u32; blocks_total_len += blk.data_len as u32; if blk.has_image { blk.bimg_len = buf.get_u16_le(); blk.hole_offset = buf.get_u16_le(); blk.bimg_info = buf.get_u8(); blk.apply_image = dispatch_pgversion!( pg_version, (blk.bimg_info & pgv::bindings::BKPIMAGE_APPLY) != 0 ); let blk_img_is_compressed = crate::bkpimage_is_compressed(blk.bimg_info, pg_version); if blk_img_is_compressed { tracing::debug!("compressed block image , pg_version = {}", pg_version); } if blk_img_is_compressed { if blk.bimg_info & pg_constants::BKPIMAGE_HAS_HOLE != 0 { blk.hole_length = buf.get_u16_le(); } else { blk.hole_length = 0; } } else { blk.hole_length = BLCKSZ - blk.bimg_len; } datatotal += blk.bimg_len as u32; blocks_total_len += blk.bimg_len as u32; /* * cross-check that hole_offset > 0, hole_length > 0 and * bimg_len < BLCKSZ if the HAS_HOLE flag is set. */ if blk.bimg_info & pg_constants::BKPIMAGE_HAS_HOLE != 0 && (blk.hole_offset == 0 || blk.hole_length == 0 || blk.bimg_len == BLCKSZ) { // TODO /* report_invalid_record(state, "pg_constants::BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X", (unsigned int) blk->hole_offset, (unsigned int) blk->hole_length, (unsigned int) blk->bimg_len, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; */ } /* * cross-check that hole_offset == 0 and hole_length == 0 if * the HAS_HOLE flag is not set. */ if blk.bimg_info & pg_constants::BKPIMAGE_HAS_HOLE == 0 && (blk.hole_offset != 0 || blk.hole_length != 0) { // TODO /* report_invalid_record(state, "pg_constants::BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X", (unsigned int) blk->hole_offset, (unsigned int) blk->hole_length, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; */ } /* * cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED * flag is set. */ if !blk_img_is_compressed && blk.bimg_len == BLCKSZ { // TODO /* report_invalid_record(state, "pg_constants::BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X", (unsigned int) blk->bimg_len, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; */ } /* * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor * IS_COMPRESSED flag is set. */ if blk.bimg_info & pg_constants::BKPIMAGE_HAS_HOLE == 0 && !blk_img_is_compressed && blk.bimg_len != BLCKSZ { // TODO /* report_invalid_record(state, "neither pg_constants::BKPIMAGE_HAS_HOLE nor pg_constants::BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X", (unsigned int) blk->data_len, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; */ } } if fork_flags & pg_constants::BKPBLOCK_SAME_REL == 0 { rnode_spcnode = buf.get_u32_le(); rnode_dbnode = buf.get_u32_le(); rnode_relnode = buf.get_u32_le(); got_rnode = true; } else if !got_rnode { // TODO /* report_invalid_record(state, "pg_constants::BKPBLOCK_SAME_REL set but no previous rel at %X/%X", (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; */ } blk.rnode_spcnode = rnode_spcnode; blk.rnode_dbnode = rnode_dbnode; blk.rnode_relnode = rnode_relnode; blk.blkno = buf.get_u32_le(); tracing::trace!( "this record affects {}/{}/{} blk {}", rnode_spcnode, rnode_dbnode, rnode_relnode, blk.blkno ); decoded.blocks.push(blk); } _ => { // TODO: invalid block_id } } } // 3. Decode blocks. let mut ptr = record.len() - buf.remaining(); for blk in decoded.blocks.iter_mut() { if blk.has_image { blk.bimg_offset = ptr as u32; ptr += blk.bimg_len as usize; } if blk.has_data { ptr += blk.data_len as usize; } } // We don't need them, so just skip blocks_total_len bytes buf.advance(blocks_total_len as usize); assert_eq!(ptr, record.len() - buf.remaining()); let main_data_offset = (xlogrec.xl_tot_len - main_data_len) as usize; // 4. Decode main_data if main_data_len > 0 { assert_eq!(buf.remaining(), main_data_len as usize); } decoded.xl_xid = xlogrec.xl_xid; decoded.xl_info = xlogrec.xl_info; decoded.xl_rmid = xlogrec.xl_rmid; decoded.record = record; decoded.origin_id = origin_id; decoded.main_data_offset = main_data_offset; Ok(()) } pub mod v14 { use bytes::{Buf, Bytes}; use crate::{OffsetNumber, TransactionId}; #[repr(C)] #[derive(Debug)] pub struct XlHeapInsert { pub offnum: OffsetNumber, pub flags: u8, } impl XlHeapInsert { pub fn decode(buf: &mut Bytes) -> XlHeapInsert { XlHeapInsert { offnum: buf.get_u16_le(), flags: buf.get_u8(), } } } #[repr(C)] #[derive(Debug)] pub struct XlHeapMultiInsert { pub flags: u8, pub _padding: u8, pub ntuples: u16, } impl XlHeapMultiInsert { pub fn decode(buf: &mut Bytes) -> XlHeapMultiInsert { XlHeapMultiInsert { flags: buf.get_u8(), _padding: buf.get_u8(), ntuples: buf.get_u16_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlHeapDelete { pub xmax: TransactionId, pub offnum: OffsetNumber, pub _padding: u16, pub t_cid: u32, pub infobits_set: u8, pub flags: u8, } impl XlHeapDelete { pub fn decode(buf: &mut Bytes) -> XlHeapDelete { XlHeapDelete { xmax: buf.get_u32_le(), offnum: buf.get_u16_le(), _padding: buf.get_u16_le(), t_cid: buf.get_u32_le(), infobits_set: buf.get_u8(), flags: buf.get_u8(), } } } #[repr(C)] #[derive(Debug)] pub struct XlHeapUpdate { pub old_xmax: TransactionId, pub old_offnum: OffsetNumber, pub old_infobits_set: u8, pub flags: u8, pub t_cid: u32, pub new_xmax: TransactionId, pub new_offnum: OffsetNumber, } impl XlHeapUpdate { pub fn decode(buf: &mut Bytes) -> XlHeapUpdate { XlHeapUpdate { old_xmax: buf.get_u32_le(), old_offnum: buf.get_u16_le(), old_infobits_set: buf.get_u8(), flags: buf.get_u8(), t_cid: buf.get_u32_le(), new_xmax: buf.get_u32_le(), new_offnum: buf.get_u16_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlHeapLock { pub locking_xid: TransactionId, pub offnum: OffsetNumber, pub _padding: u16, pub t_cid: u32, pub infobits_set: u8, pub flags: u8, } impl XlHeapLock { pub fn decode(buf: &mut Bytes) -> XlHeapLock { XlHeapLock { locking_xid: buf.get_u32_le(), offnum: buf.get_u16_le(), _padding: buf.get_u16_le(), t_cid: buf.get_u32_le(), infobits_set: buf.get_u8(), flags: buf.get_u8(), } } } #[repr(C)] #[derive(Debug)] pub struct XlHeapLockUpdated { pub xmax: TransactionId, pub offnum: OffsetNumber, pub infobits_set: u8, pub flags: u8, } impl XlHeapLockUpdated { pub fn decode(buf: &mut Bytes) -> XlHeapLockUpdated { XlHeapLockUpdated { xmax: buf.get_u32_le(), offnum: buf.get_u16_le(), infobits_set: buf.get_u8(), flags: buf.get_u8(), } } } #[repr(C)] #[derive(Debug)] pub struct XlParameterChange { pub max_connections: i32, pub max_worker_processes: i32, pub max_wal_senders: i32, pub max_prepared_xacts: i32, pub max_locks_per_xact: i32, pub wal_level: i32, pub wal_log_hints: bool, pub track_commit_timestamp: bool, pub _padding: [u8; 2], } impl XlParameterChange { pub fn decode(buf: &mut Bytes) -> XlParameterChange { XlParameterChange { max_connections: buf.get_i32_le(), max_worker_processes: buf.get_i32_le(), max_wal_senders: buf.get_i32_le(), max_prepared_xacts: buf.get_i32_le(), max_locks_per_xact: buf.get_i32_le(), wal_level: buf.get_i32_le(), wal_log_hints: buf.get_u8() != 0, track_commit_timestamp: buf.get_u8() != 0, _padding: [buf.get_u8(), buf.get_u8()], } } } } pub mod v15 { pub use super::v14::{ XlHeapDelete, XlHeapInsert, XlHeapLock, XlHeapLockUpdated, XlHeapMultiInsert, XlHeapUpdate, XlParameterChange, }; } pub mod v16 { use bytes::{Buf, Bytes}; pub use super::v14::{XlHeapInsert, XlHeapLockUpdated, XlHeapMultiInsert, XlParameterChange}; use crate::{OffsetNumber, TransactionId}; pub struct XlHeapDelete { pub xmax: TransactionId, pub offnum: OffsetNumber, pub infobits_set: u8, pub flags: u8, } impl XlHeapDelete { pub fn decode(buf: &mut Bytes) -> XlHeapDelete { XlHeapDelete { xmax: buf.get_u32_le(), offnum: buf.get_u16_le(), infobits_set: buf.get_u8(), flags: buf.get_u8(), } } } #[repr(C)] #[derive(Debug)] pub struct XlHeapUpdate { pub old_xmax: TransactionId, pub old_offnum: OffsetNumber, pub old_infobits_set: u8, pub flags: u8, pub new_xmax: TransactionId, pub new_offnum: OffsetNumber, } impl XlHeapUpdate { pub fn decode(buf: &mut Bytes) -> XlHeapUpdate { XlHeapUpdate { old_xmax: buf.get_u32_le(), old_offnum: buf.get_u16_le(), old_infobits_set: buf.get_u8(), flags: buf.get_u8(), new_xmax: buf.get_u32_le(), new_offnum: buf.get_u16_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlHeapLock { pub locking_xid: TransactionId, pub offnum: OffsetNumber, pub infobits_set: u8, pub flags: u8, } impl XlHeapLock { pub fn decode(buf: &mut Bytes) -> XlHeapLock { XlHeapLock { locking_xid: buf.get_u32_le(), offnum: buf.get_u16_le(), infobits_set: buf.get_u8(), flags: buf.get_u8(), } } } /* Since PG16, we have the Neon RMGR (RM_NEON_ID) to manage Neon-flavored WAL. */ pub mod rm_neon { use bytes::{Buf, Bytes}; use crate::{OffsetNumber, TransactionId}; #[repr(C)] #[derive(Debug)] pub struct XlNeonHeapInsert { pub offnum: OffsetNumber, pub flags: u8, } impl XlNeonHeapInsert { pub fn decode(buf: &mut Bytes) -> XlNeonHeapInsert { XlNeonHeapInsert { offnum: buf.get_u16_le(), flags: buf.get_u8(), } } } #[repr(C)] #[derive(Debug)] pub struct XlNeonHeapMultiInsert { pub flags: u8, pub _padding: u8, pub ntuples: u16, pub t_cid: u32, } impl XlNeonHeapMultiInsert { pub fn decode(buf: &mut Bytes) -> XlNeonHeapMultiInsert { XlNeonHeapMultiInsert { flags: buf.get_u8(), _padding: buf.get_u8(), ntuples: buf.get_u16_le(), t_cid: buf.get_u32_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlNeonHeapDelete { pub xmax: TransactionId, pub offnum: OffsetNumber, pub infobits_set: u8, pub flags: u8, pub t_cid: u32, } impl XlNeonHeapDelete { pub fn decode(buf: &mut Bytes) -> XlNeonHeapDelete { XlNeonHeapDelete { xmax: buf.get_u32_le(), offnum: buf.get_u16_le(), infobits_set: buf.get_u8(), flags: buf.get_u8(), t_cid: buf.get_u32_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlNeonHeapUpdate { pub old_xmax: TransactionId, pub old_offnum: OffsetNumber, pub old_infobits_set: u8, pub flags: u8, pub t_cid: u32, pub new_xmax: TransactionId, pub new_offnum: OffsetNumber, } impl XlNeonHeapUpdate { pub fn decode(buf: &mut Bytes) -> XlNeonHeapUpdate { XlNeonHeapUpdate { old_xmax: buf.get_u32_le(), old_offnum: buf.get_u16_le(), old_infobits_set: buf.get_u8(), flags: buf.get_u8(), t_cid: buf.get_u32(), new_xmax: buf.get_u32_le(), new_offnum: buf.get_u16_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlNeonHeapLock { pub locking_xid: TransactionId, pub t_cid: u32, pub offnum: OffsetNumber, pub infobits_set: u8, pub flags: u8, } impl XlNeonHeapLock { pub fn decode(buf: &mut Bytes) -> XlNeonHeapLock { XlNeonHeapLock { locking_xid: buf.get_u32_le(), t_cid: buf.get_u32_le(), offnum: buf.get_u16_le(), infobits_set: buf.get_u8(), flags: buf.get_u8(), } } } } } pub mod v17 { use bytes::{Buf, Bytes}; pub use super::v14::XlHeapLockUpdated; pub use super::v16::{ XlHeapDelete, XlHeapInsert, XlHeapLock, XlHeapMultiInsert, XlHeapUpdate, XlParameterChange, rm_neon, }; pub use crate::TimeLineID; pub use postgres_ffi_types::TimestampTz; #[repr(C)] #[derive(Debug)] pub struct XlEndOfRecovery { pub end_time: TimestampTz, pub this_time_line_id: TimeLineID, pub prev_time_line_id: TimeLineID, pub wal_level: i32, } impl XlEndOfRecovery { pub fn decode(buf: &mut Bytes) -> XlEndOfRecovery { XlEndOfRecovery { end_time: buf.get_i64_le(), this_time_line_id: buf.get_u32_le(), prev_time_line_id: buf.get_u32_le(), wal_level: buf.get_i32_le(), } } } } #[repr(C)] #[derive(Debug)] pub struct XlSmgrCreate { pub rnode: RelFileNode, // FIXME: This is ForkNumber in storage_xlog.h. That's an enum. Does it have // well-defined size? pub forknum: u8, } impl XlSmgrCreate { pub fn decode(buf: &mut Bytes) -> XlSmgrCreate { XlSmgrCreate { rnode: RelFileNode { spcnode: buf.get_u32_le(), /* tablespace */ dbnode: buf.get_u32_le(), /* database */ relnode: buf.get_u32_le(), /* relation */ }, forknum: buf.get_u32_le() as u8, } } } #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct XlSmgrTruncate { pub blkno: BlockNumber, pub rnode: RelFileNode, pub flags: u32, } impl XlSmgrTruncate { pub fn decode(buf: &mut Bytes) -> XlSmgrTruncate { XlSmgrTruncate { blkno: buf.get_u32_le(), rnode: RelFileNode { spcnode: buf.get_u32_le(), /* tablespace */ dbnode: buf.get_u32_le(), /* database */ relnode: buf.get_u32_le(), /* relation */ }, flags: buf.get_u32_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlCreateDatabase { pub db_id: Oid, pub tablespace_id: Oid, pub src_db_id: Oid, pub src_tablespace_id: Oid, } impl XlCreateDatabase { pub fn decode(buf: &mut Bytes) -> XlCreateDatabase { XlCreateDatabase { db_id: buf.get_u32_le(), tablespace_id: buf.get_u32_le(), src_db_id: buf.get_u32_le(), src_tablespace_id: buf.get_u32_le(), } } } #[repr(C)] #[derive(Debug)] pub struct XlDropDatabase { pub db_id: Oid, pub n_tablespaces: Oid, /* number of tablespace IDs */ pub tablespace_ids: Vec<Oid>, } impl XlDropDatabase { pub fn decode(buf: &mut Bytes) -> XlDropDatabase { let mut rec = XlDropDatabase { db_id: buf.get_u32_le(), n_tablespaces: buf.get_u32_le(), tablespace_ids: Vec::<Oid>::new(), }; for _i in 0..rec.n_tablespaces { let id = buf.get_u32_le(); rec.tablespace_ids.push(id); } rec } } /// /// Note: Parsing some fields is missing, because they're not needed. /// /// This is similar to the xl_xact_parsed_commit and /// xl_xact_parsed_abort structs in PostgreSQL, but we use the same /// struct for commits and aborts. /// #[derive(Clone, Debug, Serialize, Deserialize)] pub struct XlXactParsedRecord { pub xid: TransactionId, pub info: u8, pub xact_time: TimestampTz, pub xinfo: u32, pub db_id: Oid, /* MyDatabaseId */ pub ts_id: Oid, /* MyDatabaseTableSpace */ pub subxacts: Vec<TransactionId>, pub xnodes: Vec<RelFileNode>, pub origin_lsn: Lsn, } impl XlXactParsedRecord { /// Decode a XLOG_XACT_COMMIT/ABORT/COMMIT_PREPARED/ABORT_PREPARED /// record. This should agree with the ParseCommitRecord and ParseAbortRecord /// functions in PostgreSQL (in src/backend/access/rmgr/xactdesc.c) pub fn decode(buf: &mut Bytes, mut xid: TransactionId, xl_info: u8) -> XlXactParsedRecord { let info = xl_info & pg_constants::XLOG_XACT_OPMASK; // The record starts with time of commit/abort let xact_time = buf.get_i64_le(); let xinfo = if xl_info & pg_constants::XLOG_XACT_HAS_INFO != 0 { buf.get_u32_le() } else { 0 }; let db_id; let ts_id; if xinfo & pg_constants::XACT_XINFO_HAS_DBINFO != 0 { db_id = buf.get_u32_le(); ts_id = buf.get_u32_le(); } else { db_id = 0; ts_id = 0; } let mut subxacts = Vec::<TransactionId>::new(); if xinfo & pg_constants::XACT_XINFO_HAS_SUBXACTS != 0 { let nsubxacts = buf.get_i32_le(); for _i in 0..nsubxacts { let subxact = buf.get_u32_le(); subxacts.push(subxact); } } let mut xnodes = Vec::<RelFileNode>::new(); if xinfo & pg_constants::XACT_XINFO_HAS_RELFILENODES != 0 { let nrels = buf.get_i32_le(); for _i in 0..nrels { let spcnode = buf.get_u32_le(); let dbnode = buf.get_u32_le();
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/waldecoder_handler.rs
libs/postgres_ffi/src/waldecoder_handler.rs
//! //! Basic WAL stream decoding. //! //! This understands the WAL page and record format, enough to figure out where the WAL record //! boundaries are, and to reassemble WAL records that cross page boundaries. //! //! This functionality is needed by both the pageserver and the safekeepers. The pageserver needs //! to look deeper into the WAL records to also understand which blocks they modify, the code //! for that is in pageserver/src/walrecord.rs //! use super::super::waldecoder::{State, WalDecodeError, WalStreamDecoder}; use super::bindings::{XLogLongPageHeaderData, XLogPageHeaderData, XLogRecord, XLOG_PAGE_MAGIC}; use super::xlog_utils::*; use crate::WAL_SEGMENT_SIZE; use bytes::{Buf, BufMut, Bytes, BytesMut}; use crc32c::*; use std::cmp::min; use std::num::NonZeroU32; use utils::lsn::Lsn; pub trait WalStreamDecoderHandler { fn validate_page_header(&self, hdr: &XLogPageHeaderData) -> Result<(), WalDecodeError>; fn poll_decode_internal(&mut self) -> Result<Option<(Lsn, Bytes)>, WalDecodeError>; fn complete_record(&mut self, recordbuf: Bytes) -> Result<(Lsn, Bytes), WalDecodeError>; } // // This is a trick to support several postgres versions simultaneously. // // Page decoding code depends on postgres bindings, so it is compiled for each version. // Thus WalStreamDecoder implements several WalStreamDecoderHandler traits. // WalStreamDecoder poll_decode() method dispatches to the right handler based on the postgres version. // Other methods are internal and are not dispatched. // // It is similar to having several impl blocks for the same struct, // but the impls here are in different modules, so need to use a trait. // impl WalStreamDecoderHandler for WalStreamDecoder { fn validate_page_header(&self, hdr: &XLogPageHeaderData) -> Result<(), WalDecodeError> { let validate_impl = || { if hdr.xlp_magic != XLOG_PAGE_MAGIC as u16 { return Err(format!( "invalid xlog page header: xlp_magic={}, expected {}", hdr.xlp_magic, XLOG_PAGE_MAGIC )); } if hdr.xlp_pageaddr != self.lsn.0 { return Err(format!( "invalid xlog page header: xlp_pageaddr={}, expected {}", hdr.xlp_pageaddr, self.lsn )); } match self.state { State::WaitingForRecord => { if hdr.xlp_info & XLP_FIRST_IS_CONTRECORD != 0 { return Err( "invalid xlog page header: unexpected XLP_FIRST_IS_CONTRECORD".into(), ); } if hdr.xlp_rem_len != 0 { return Err(format!( "invalid xlog page header: xlp_rem_len={}, but it's not a contrecord", hdr.xlp_rem_len )); } } State::ReassemblingRecord { contlen, .. } => { if hdr.xlp_info & XLP_FIRST_IS_CONTRECORD == 0 { return Err( "invalid xlog page header: XLP_FIRST_IS_CONTRECORD expected, not found" .into(), ); } if hdr.xlp_rem_len != contlen.get() { return Err(format!( "invalid xlog page header: xlp_rem_len={}, expected {}", hdr.xlp_rem_len, contlen.get() )); } } State::SkippingEverything { .. } => { panic!("Should not be validating page header in the SkippingEverything state"); } }; Ok(()) }; validate_impl().map_err(|msg| WalDecodeError { msg, lsn: self.lsn }) } /// Attempt to decode another WAL record from the input that has been fed to the /// decoder so far. /// /// Returns one of the following: /// Ok((Lsn, Bytes)): a tuple containing the LSN of next record, and the record itself /// Ok(None): there is not enough data in the input buffer. Feed more by calling the `feed_bytes` function /// Err(WalDecodeError): an error occurred while decoding, meaning the input was invalid. /// fn poll_decode_internal(&mut self) -> Result<Option<(Lsn, Bytes)>, WalDecodeError> { // Run state machine that validates page headers, and reassembles records // that cross page boundaries. loop { // parse and verify page boundaries as we go // However, we may have to skip some page headers if we're processing the XLOG_SWITCH record or skipping padding for whatever reason. match self.state { State::WaitingForRecord | State::ReassemblingRecord { .. } => { if self.lsn.segment_offset(WAL_SEGMENT_SIZE) == 0 { // parse long header if self.inputbuf.remaining() < XLOG_SIZE_OF_XLOG_LONG_PHD { return Ok(None); } let hdr = XLogLongPageHeaderData::from_bytes(&mut self.inputbuf).map_err( |e| WalDecodeError { msg: format!("long header deserialization failed {e}"), lsn: self.lsn, }, )?; self.validate_page_header(&hdr.std)?; self.lsn += XLOG_SIZE_OF_XLOG_LONG_PHD as u64; } else if self.lsn.block_offset() == 0 { if self.inputbuf.remaining() < XLOG_SIZE_OF_XLOG_SHORT_PHD { return Ok(None); } let hdr = XLogPageHeaderData::from_bytes(&mut self.inputbuf).map_err(|e| { WalDecodeError { msg: format!("header deserialization failed {e}"), lsn: self.lsn, } })?; self.validate_page_header(&hdr)?; self.lsn += XLOG_SIZE_OF_XLOG_SHORT_PHD as u64; } } State::SkippingEverything { .. } => {} } // now read page contents match &mut self.state { State::WaitingForRecord => { // need to have at least the xl_tot_len field if self.inputbuf.remaining() < 4 { return Ok(None); } // peek xl_tot_len at the beginning of the record. // FIXME: assumes little-endian let xl_tot_len = (&self.inputbuf[0..4]).get_u32_le(); if (xl_tot_len as usize) < XLOG_SIZE_OF_XLOG_RECORD { return Err(WalDecodeError { msg: format!("invalid xl_tot_len {xl_tot_len}"), lsn: self.lsn, }); } // Fast path for the common case that the whole record fits on the page. let pageleft = self.lsn.remaining_in_block() as u32; if self.inputbuf.remaining() >= xl_tot_len as usize && xl_tot_len <= pageleft { self.lsn += xl_tot_len as u64; let recordbuf = self.inputbuf.copy_to_bytes(xl_tot_len as usize); return Ok(Some(self.complete_record(recordbuf)?)); } else { // Need to assemble the record from pieces. Remember the size of the // record, and loop back. On next iterations, we will reach the branch // below, and copy the part of the record that was on this or next page(s) // to 'recordbuf'. Subsequent iterations will skip page headers, and // append the continuations from the next pages to 'recordbuf'. self.state = State::ReassemblingRecord { recordbuf: BytesMut::with_capacity(xl_tot_len as usize), contlen: NonZeroU32::new(xl_tot_len).unwrap(), } } } State::ReassemblingRecord { recordbuf, contlen } => { // we're continuing a record, possibly from previous page. let pageleft = self.lsn.remaining_in_block() as u32; // read the rest of the record, or as much as fits on this page. let n = min(contlen.get(), pageleft) as usize; if self.inputbuf.remaining() < n { return Ok(None); } recordbuf.put(self.inputbuf.split_to(n)); self.lsn += n as u64; *contlen = match NonZeroU32::new(contlen.get() - n as u32) { Some(x) => x, None => { // The record is now complete. let recordbuf = std::mem::replace(recordbuf, BytesMut::new()).freeze(); return Ok(Some(self.complete_record(recordbuf)?)); } } } State::SkippingEverything { skip_until_lsn } => { assert!(*skip_until_lsn >= self.lsn); let n = skip_until_lsn.0 - self.lsn.0; if self.inputbuf.remaining() < n as usize { return Ok(None); } self.inputbuf.advance(n as usize); self.lsn += n; self.state = State::WaitingForRecord; } } } } fn complete_record(&mut self, recordbuf: Bytes) -> Result<(Lsn, Bytes), WalDecodeError> { // We now have a record in the 'recordbuf' local variable. let xlogrec = XLogRecord::from_slice(&recordbuf[0..XLOG_SIZE_OF_XLOG_RECORD]).map_err(|e| { WalDecodeError { msg: format!("xlog record deserialization failed {e}"), lsn: self.lsn, } })?; let mut crc = 0; crc = crc32c_append(crc, &recordbuf[XLOG_RECORD_CRC_OFFS + 4..]); crc = crc32c_append(crc, &recordbuf[0..XLOG_RECORD_CRC_OFFS]); if crc != xlogrec.xl_crc { return Err(WalDecodeError { msg: "WAL record crc mismatch".into(), lsn: self.lsn, }); } // XLOG_SWITCH records are special. If we see one, we need to skip // to the next WAL segment. let next_lsn = if xlogrec.is_xlog_switch_record() { tracing::trace!("saw xlog switch record at {}", self.lsn); self.lsn + self.lsn.calc_padding(WAL_SEGMENT_SIZE as u64) } else { // Pad to an 8-byte boundary self.lsn.align() }; self.state = State::SkippingEverything { skip_until_lsn: next_lsn, }; // We should return LSN of the next record, not the last byte of this record or // the byte immediately after. Note that this handles both XLOG_SWITCH and usual // records, the former "spans" until the next WAL segment (see test_xlog_switch). Ok((next_lsn, recordbuf)) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/wal_generator.rs
libs/postgres_ffi/src/wal_generator.rs
use std::ffi::{CStr, CString}; use bytes::{Bytes, BytesMut}; use crc32c::crc32c_append; use utils::lsn::Lsn; use super::bindings::{RmgrId, XLogLongPageHeaderData, XLogPageHeaderData, XLOG_PAGE_MAGIC}; use super::xlog_utils::{ XlLogicalMessage, XLOG_RECORD_CRC_OFFS, XLOG_SIZE_OF_XLOG_RECORD, XLP_BKP_REMOVABLE, XLP_FIRST_IS_CONTRECORD, }; use super::XLogRecord; use crate::pg_constants::{ RM_LOGICALMSG_ID, XLOG_LOGICAL_MESSAGE, XLP_LONG_HEADER, XLR_BLOCK_ID_DATA_LONG, XLR_BLOCK_ID_DATA_SHORT, }; use crate::{WAL_SEGMENT_SIZE, XLOG_BLCKSZ}; /// A WAL record payload. Will be prefixed by an XLogRecord header when encoded. pub struct Record { pub rmid: RmgrId, pub info: u8, pub data: Bytes, } impl Record { /// Encodes the WAL record including an XLogRecord header. prev_lsn is the start position of /// the previous record in the WAL -- this is ignored by the Safekeeper, but not Postgres. pub fn encode(&self, prev_lsn: Lsn) -> Bytes { // Prefix data with block ID and length. let data_header = Bytes::from(match self.data.len() { 0 => vec![], 1..=255 => vec![XLR_BLOCK_ID_DATA_SHORT, self.data.len() as u8], 256.. => { let len_bytes = (self.data.len() as u32).to_le_bytes(); [&[XLR_BLOCK_ID_DATA_LONG], len_bytes.as_slice()].concat() } }); // Construct the WAL record header. let mut header = XLogRecord { xl_tot_len: (XLOG_SIZE_OF_XLOG_RECORD + data_header.len() + self.data.len()) as u32, xl_xid: 0, xl_prev: prev_lsn.into(), xl_info: self.info, xl_rmid: self.rmid, __bindgen_padding_0: [0; 2], xl_crc: 0, // see below }; // Compute the CRC checksum for the data, and the header up to the CRC field. let mut crc = 0; crc = crc32c_append(crc, &data_header); crc = crc32c_append(crc, &self.data); crc = crc32c_append(crc, &header.encode().unwrap()[0..XLOG_RECORD_CRC_OFFS]); header.xl_crc = crc; // Encode the final header and record. let header = header.encode().unwrap(); [header, data_header, self.data.clone()].concat().into() } } /// Generates WAL record payloads. /// /// TODO: currently only provides LogicalMessageGenerator for trivial noop messages. Add a generator /// that creates a table and inserts rows. pub trait RecordGenerator: Iterator<Item = Record> {} impl<I: Iterator<Item = Record>> RecordGenerator for I {} /// Generates binary WAL for use in tests and benchmarks. The provided record generator constructs /// the WAL records. It is used as an iterator which yields encoded bytes for a single WAL record, /// including internal page headers if it spans pages. Concatenating the bytes will yield a /// complete, well-formed WAL, which can be chunked at segment boundaries if desired. Not optimized /// for performance. /// /// The WAL format is version-dependant (see e.g. `XLOG_PAGE_MAGIC`), so make sure to import this /// for the appropriate Postgres version (e.g. `postgres_ffi::v17::wal_generator::WalGenerator`). /// /// A WAL is split into 16 MB segments. Each segment is split into 8 KB pages, with headers. /// Records are arbitrary length, 8-byte aligned, and may span pages. The layout is e.g.: /// /// | Segment 1 | Segment 2 | Segment 3 | /// | Page 1 | Page 2 | Page 3 | Page 4 | Page 5 | Page 6 | Page 7 | Page 8 | Page 9 | /// | R1 | R2 |R3| R4 | R5 | R6 | R7 | R8 | #[derive(Default)] pub struct WalGenerator<R: RecordGenerator> { /// Generates record payloads for the WAL. pub record_generator: R, /// Current LSN to append the next record at. /// /// Callers can modify this (and prev_lsn) to restart generation at a different LSN, but should /// ensure that the LSN is on a valid record boundary (i.e. we can't start appending in the /// middle on an existing record or header, or beyond the end of the existing WAL). pub lsn: Lsn, /// The starting LSN of the previous record. Used in WAL record headers. The Safekeeper doesn't /// care about this, unlike Postgres, but we include it for completeness. pub prev_lsn: Lsn, } impl<R: RecordGenerator> WalGenerator<R> { // Hardcode the sys and timeline ID. We can make them configurable if we care about them. const SYS_ID: u64 = 0; const TIMELINE_ID: u32 = 1; /// Creates a new WAL generator with the given record generator. pub fn new(record_generator: R, start_lsn: Lsn) -> WalGenerator<R> { Self { record_generator, lsn: start_lsn, prev_lsn: start_lsn, } } /// Appends a record with an arbitrary payload at the current LSN, then increments the LSN. /// Returns the WAL bytes for the record, including page headers and padding, and the start LSN. fn append_record(&mut self, record: Record) -> (Lsn, Bytes) { let record = record.encode(self.prev_lsn); let record = Self::insert_pages(record, self.lsn); let record = Self::pad_record(record, self.lsn); let lsn = self.lsn; self.prev_lsn = self.lsn; self.lsn += record.len() as u64; (lsn, record) } /// Inserts page headers on 8KB page boundaries. Takes the current LSN position where the record /// is to be appended. fn insert_pages(record: Bytes, mut lsn: Lsn) -> Bytes { // Fast path: record fits in current page, and the page already has a header. if lsn.remaining_in_block() as usize >= record.len() && lsn.block_offset() > 0 { return record; } let mut pages = BytesMut::new(); let mut remaining = record.clone(); // Bytes::clone() is cheap while !remaining.is_empty() { // At new page boundary, inject page header. if lsn.block_offset() == 0 { let mut page_header = XLogPageHeaderData { xlp_magic: XLOG_PAGE_MAGIC as u16, xlp_info: XLP_BKP_REMOVABLE, xlp_tli: Self::TIMELINE_ID, xlp_pageaddr: lsn.0, xlp_rem_len: 0, __bindgen_padding_0: [0; 4], }; // If the record was split across page boundaries, mark as continuation. if remaining.len() < record.len() { page_header.xlp_rem_len = remaining.len() as u32; page_header.xlp_info |= XLP_FIRST_IS_CONTRECORD; } // At start of segment, use a long page header. let page_header = if lsn.segment_offset(WAL_SEGMENT_SIZE) == 0 { page_header.xlp_info |= XLP_LONG_HEADER; XLogLongPageHeaderData { std: page_header, xlp_sysid: Self::SYS_ID, xlp_seg_size: WAL_SEGMENT_SIZE as u32, xlp_xlog_blcksz: XLOG_BLCKSZ as u32, } .encode() .unwrap() } else { page_header.encode().unwrap() }; pages.extend_from_slice(&page_header); lsn += page_header.len() as u64; } // Append the record up to the next page boundary, if any. let page_free = lsn.remaining_in_block() as usize; let chunk = remaining.split_to(std::cmp::min(page_free, remaining.len())); pages.extend_from_slice(&chunk); lsn += chunk.len() as u64; } pages.freeze() } /// Records must be 8-byte aligned. Take an encoded record (including any injected page /// boundaries), starting at the given LSN, and add any necessary padding at the end. fn pad_record(record: Bytes, mut lsn: Lsn) -> Bytes { lsn += record.len() as u64; let padding = lsn.calc_padding(8u64) as usize; if padding == 0 { return record; } [record, Bytes::from(vec![0; padding])].concat().into() } } /// Generates WAL records as an iterator. impl<R: RecordGenerator> Iterator for WalGenerator<R> { type Item = (Lsn, Bytes); fn next(&mut self) -> Option<Self::Item> { let record = self.record_generator.next()?; Some(self.append_record(record)) } } /// Generates logical message records (effectively noops) with a fixed message. pub struct LogicalMessageGenerator { prefix: CString, message: Vec<u8>, } impl LogicalMessageGenerator { const DB_ID: u32 = 0; // hardcoded for now const RM_ID: RmgrId = RM_LOGICALMSG_ID; const INFO: u8 = XLOG_LOGICAL_MESSAGE; /// Creates a new LogicalMessageGenerator. pub fn new(prefix: &CStr, message: &[u8]) -> Self { Self { prefix: prefix.to_owned(), message: message.to_owned(), } } /// Encodes a logical message. fn encode(prefix: &CStr, message: &[u8]) -> Bytes { let prefix = prefix.to_bytes_with_nul(); let header = XlLogicalMessage { db_id: Self::DB_ID, transactional: 0, prefix_size: prefix.len() as u64, message_size: message.len() as u64, }; [&header.encode(), prefix, message].concat().into() } /// Computes how large a value must be to get a record of the given size. Convenience method to /// construct records of pre-determined size. Panics if the record size is too small. pub fn make_value_size(record_size: usize, prefix: &CStr) -> usize { let xlog_header_size = XLOG_SIZE_OF_XLOG_RECORD; let lm_header_size = size_of::<XlLogicalMessage>(); let prefix_size = prefix.to_bytes_with_nul().len(); let data_header_size = match record_size - xlog_header_size - 2 { 0..=255 => 2, 256..=258 => panic!("impossible record_size {record_size}"), 259.. => 5, }; record_size .checked_sub(xlog_header_size + lm_header_size + prefix_size + data_header_size) .expect("record_size too small") } } impl Iterator for LogicalMessageGenerator { type Item = Record; fn next(&mut self) -> Option<Self::Item> { Some(Record { rmid: Self::RM_ID, info: Self::INFO, data: Self::encode(&self.prefix, &self.message), }) } } impl WalGenerator<LogicalMessageGenerator> { /// Convenience method for appending a WAL record with an arbitrary logical message at the /// current WAL LSN position. Returns the start LSN and resulting WAL bytes. pub fn append_logical_message(&mut self, prefix: &CStr, message: &[u8]) -> (Lsn, Bytes) { let record = Record { rmid: LogicalMessageGenerator::RM_ID, info: LogicalMessageGenerator::INFO, data: LogicalMessageGenerator::encode(prefix, message), }; self.append_record(record) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/controlfile_utils.rs
libs/postgres_ffi/src/controlfile_utils.rs
//! //! Utilities for reading and writing the PostgreSQL control file. //! //! The PostgreSQL control file is one the first things that the PostgreSQL //! server reads when it starts up. It indicates whether the server was shut //! down cleanly, or if it crashed or was restored from online backup so that //! WAL recovery needs to be performed. It also contains a copy of the latest //! checkpoint record and its location in the WAL. //! //! The control file also contains fields for detecting whether the //! data directory is compatible with a postgres binary. That includes //! a version number, configuration options that can be set at //! compilation time like the block size, and the platform's alignment //! and endianness information. (The PostgreSQL on-disk file format is //! not portable across platforms.) //! //! The control file is stored in the PostgreSQL data directory, as //! `global/pg_control`. The data stored in it is designed to be smaller than //! 512 bytes, on the assumption that it can be updated atomically. The actual //! file is larger, 8192 bytes, but the rest of it is just filled with zeros. //! //! See src/include/catalog/pg_control.h in the PostgreSQL sources for more //! information. You can use PostgreSQL's pg_controldata utility to view its //! contents. //! use super::bindings::{ControlFileData, PG_CONTROL_FILE_SIZE}; use anyhow::{bail, Result}; use bytes::{Bytes, BytesMut}; /// Equivalent to sizeof(ControlFileData) in C const SIZEOF_CONTROLDATA: usize = size_of::<ControlFileData>(); impl ControlFileData { /// Compute the offset of the `crc` field within the `ControlFileData` struct. /// Equivalent to offsetof(ControlFileData, crc) in C. const fn pg_control_crc_offset() -> usize { std::mem::offset_of!(ControlFileData, crc) } /// /// Interpret a slice of bytes as a Postgres control file. /// pub fn decode(buf: &[u8]) -> Result<ControlFileData> { use utils::bin_ser::LeSer; // Check that the slice has the expected size. The control file is // padded with zeros up to a 512 byte sector size, so accept a // larger size too, so that the caller can just the whole file // contents without knowing the exact size of the struct. if buf.len() < SIZEOF_CONTROLDATA { bail!("control file is too short"); } // Compute the expected CRC of the content. let OFFSETOF_CRC = Self::pg_control_crc_offset(); let expectedcrc = crc32c::crc32c(&buf[0..OFFSETOF_CRC]); // Use serde to deserialize the input as a ControlFileData struct. let controlfile = ControlFileData::des_prefix(buf)?; // Check the CRC if expectedcrc != controlfile.crc { bail!( "invalid CRC in control file: expected {:08X}, was {:08X}", expectedcrc, controlfile.crc ); } Ok(controlfile) } /// /// Convert a struct representing a Postgres control file into raw bytes. /// /// The CRC is recomputed to match the contents of the fields. pub fn encode(&self) -> Bytes { use utils::bin_ser::LeSer; // Serialize into a new buffer. let b = self.ser().unwrap(); // Recompute the CRC let OFFSETOF_CRC = Self::pg_control_crc_offset(); let newcrc = crc32c::crc32c(&b[0..OFFSETOF_CRC]); let mut buf = BytesMut::with_capacity(PG_CONTROL_FILE_SIZE as usize); buf.extend_from_slice(&b[0..OFFSETOF_CRC]); buf.extend_from_slice(&newcrc.to_ne_bytes()); // Fill the rest of the control file with zeros. buf.resize(PG_CONTROL_FILE_SIZE as usize, 0); buf.into() } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/src/relfile_utils.rs
libs/postgres_ffi/src/relfile_utils.rs
//! //! Common utilities for dealing with PostgreSQL relation files. //! use once_cell::sync::OnceCell; use regex::Regex; use postgres_ffi_types::forknum::*; /// Parse a filename of a relation file. Returns (relfilenode, forknum, segno) tuple. /// /// Formats: /// /// ```text /// <oid> /// <oid>_<fork name> /// <oid>.<segment number> /// <oid>_<fork name>.<segment number> /// ``` /// /// See functions relpath() and _mdfd_segpath() in PostgreSQL sources. /// pub fn parse_relfilename(fname: &str) -> Result<(u32, u8, u32), FilePathError> { static RELFILE_RE: OnceCell<Regex> = OnceCell::new(); RELFILE_RE.get_or_init(|| { Regex::new(r"^(?P<relnode>\d+)(_(?P<forkname>[a-z]+))?(\.(?P<segno>\d+))?$").unwrap() }); let caps = RELFILE_RE .get() .unwrap() .captures(fname) .ok_or(FilePathError::InvalidFileName)?; let relnode_str = caps.name("relnode").unwrap().as_str(); let relnode = relnode_str .parse::<u32>() .map_err(|_e| FilePathError::InvalidFileName)?; let forkname = caps.name("forkname").map(|f| f.as_str()); let forknum = forkname_to_number(forkname)?; let segno_match = caps.name("segno"); let segno = if segno_match.is_none() { 0 } else { segno_match .unwrap() .as_str() .parse::<u32>() .map_err(|_e| FilePathError::InvalidFileName)? }; Ok((relnode, forknum, segno)) } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_valid_relfilenames() { assert_eq!(parse_relfilename("1234"), Ok((1234, 0, 0))); assert_eq!(parse_relfilename("1234_fsm"), Ok((1234, 1, 0))); assert_eq!(parse_relfilename("1234_vm"), Ok((1234, 2, 0))); assert_eq!(parse_relfilename("1234_init"), Ok((1234, 3, 0))); assert_eq!(parse_relfilename("1234.12"), Ok((1234, 0, 12))); assert_eq!(parse_relfilename("1234_fsm.12"), Ok((1234, 1, 12))); assert_eq!(parse_relfilename("1234_vm.12"), Ok((1234, 2, 12))); assert_eq!(parse_relfilename("1234_init.12"), Ok((1234, 3, 12))); // relfilenode is unsigned, so it can go up to 2^32-1 assert_eq!(parse_relfilename("3147483648"), Ok((3147483648, 0, 0))); } #[test] fn test_parse_invalid_relfilenames() { assert_eq!( parse_relfilename("foo"), Err(FilePathError::InvalidFileName) ); assert_eq!( parse_relfilename("1.2.3"), Err(FilePathError::InvalidFileName) ); assert_eq!( parse_relfilename("1234_invalid"), Err(FilePathError::InvalidForkName) ); assert_eq!( parse_relfilename("1234_"), Err(FilePathError::InvalidFileName) ); // too large for u32 assert_eq!( parse_relfilename("12345678901"), Err(FilePathError::InvalidFileName) ); assert_eq!( parse_relfilename("-1234"), Err(FilePathError::InvalidFileName) ); } #[test] fn test_parse_weird_relfilenames() { // we accept 0 for the relfilenode, but PostgreSQL should never do that. assert_eq!(parse_relfilename("0"), Ok((0, 0, 0))); // PostgreSQL has a limit of 2^32-2 blocks in a table. With 8k block size and // 1 GB segments, the max segment number is 32767. But we accept larger values // currently. assert_eq!(parse_relfilename("1.123456"), Ok((1, 0, 123456))); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/postgres_ffi/benches/waldecoder.rs
libs/postgres_ffi/benches/waldecoder.rs
use std::ffi::CStr; use criterion::{Bencher, Criterion, criterion_group, criterion_main}; use postgres_ffi::v17::wal_generator::LogicalMessageGenerator; use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler; use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_versioninfo::PgMajorVersion; use pprof::criterion::{Output, PProfProfiler}; use utils::lsn::Lsn; const KB: usize = 1024; // Register benchmarks with Criterion. criterion_group!( name = benches; config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); targets = bench_complete_record, ); criterion_main!(benches); /// Benchmarks WalStreamDecoder::complete_record() for a logical message of varying size. fn bench_complete_record(c: &mut Criterion) { let mut g = c.benchmark_group("complete_record"); for size in [64, KB, 8 * KB, 128 * KB] { // Kind of weird to change the group throughput per benchmark, but it's the only way // to vary it per benchmark. It works. g.throughput(criterion::Throughput::Bytes(size as u64)); g.bench_function(format!("size={size}"), |b| run_bench(b, size).unwrap()); } fn run_bench(b: &mut Bencher, size: usize) -> anyhow::Result<()> { const PREFIX: &CStr = c""; let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX); let value = vec![1; value_size]; let mut decoder = WalStreamDecoder::new(Lsn(0), PgMajorVersion::PG17); let msg = LogicalMessageGenerator::new(PREFIX, &value) .next() .unwrap() .encode(Lsn(0)); assert_eq!(msg.len(), size); b.iter(|| { let msg = msg.clone(); // Bytes::clone() is cheap decoder.complete_record(msg).unwrap(); }); Ok(()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/serde_percent.rs
libs/utils/src/serde_percent.rs
//! A serde::Deserialize type for percentages. //! //! See [`Percent`] for details. use serde::{Deserialize, Serialize}; /// If the value is not an integer between 0 and 100, /// deserialization fails with a descriptive error. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct Percent(#[serde(deserialize_with = "deserialize_pct_0_to_100")] u8); impl Percent { pub const fn new(pct: u8) -> Option<Self> { if pct <= 100 { Some(Percent(pct)) } else { None } } pub fn get(&self) -> u8 { self.0 } } fn deserialize_pct_0_to_100<'de, D>(deserializer: D) -> Result<u8, D::Error> where D: serde::de::Deserializer<'de>, { let v: u8 = serde::de::Deserialize::deserialize(deserializer)?; if v > 100 { return Err(serde::de::Error::custom( "must be an integer between 0 and 100", )); } Ok(v) } #[cfg(test)] mod tests { use super::Percent; #[derive(serde::Deserialize, serde::Serialize, Debug, PartialEq, Eq)] struct Foo { bar: Percent, } #[test] fn basics() { let input = r#"{ "bar": 50 }"#; let foo: Foo = serde_json::from_str(input).unwrap(); assert_eq!(foo.bar.get(), 50); } #[test] fn null_handling() { let input = r#"{ "bar": null }"#; let res: Result<Foo, _> = serde_json::from_str(input); assert!(res.is_err()); } #[test] fn zero() { let input = r#"{ "bar": 0 }"#; let foo: Foo = serde_json::from_str(input).unwrap(); assert_eq!(foo.bar.get(), 0); } #[test] fn out_of_range_above() { let input = r#"{ "bar": 101 }"#; let res: Result<Foo, _> = serde_json::from_str(input); assert!(res.is_err()); } #[test] fn out_of_range_below() { let input = r#"{ "bar": -1 }"#; let res: Result<Foo, _> = serde_json::from_str(input); assert!(res.is_err()); } #[test] fn float() { let input = r#"{ "bar": 50.5 }"#; let res: Result<Foo, _> = serde_json::from_str(input); assert!(res.is_err()); } #[test] fn string() { let input = r#"{ "bar": "50 %" }"#; let res: Result<Foo, _> = serde_json::from_str(input); assert!(res.is_err()); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/backoff.rs
libs/utils/src/backoff.rs
use std::fmt::{Debug, Display}; use std::time::Duration; use futures::Future; use tokio_util::sync::CancellationToken; pub const DEFAULT_BASE_BACKOFF_SECONDS: f64 = 0.1; pub const DEFAULT_MAX_BACKOFF_SECONDS: f64 = 3.0; pub async fn exponential_backoff( n: u32, base_increment: f64, max_seconds: f64, cancel: &CancellationToken, ) { let backoff_duration_seconds = exponential_backoff_duration_seconds(n, base_increment, max_seconds); if backoff_duration_seconds > 0.0 { tracing::info!( "Backoff: waiting {backoff_duration_seconds} seconds before processing with the task", ); drop( tokio::time::timeout( std::time::Duration::from_secs_f64(backoff_duration_seconds), cancel.cancelled(), ) .await, ) } } pub fn exponential_backoff_duration(n: u32, base_increment: f64, max_seconds: f64) -> Duration { let seconds = exponential_backoff_duration_seconds(n, base_increment, max_seconds); Duration::from_secs_f64(seconds) } pub fn exponential_backoff_duration_seconds(n: u32, base_increment: f64, max_seconds: f64) -> f64 { if n == 0 { 0.0 } else { (1.0 + base_increment).powf(f64::from(n)).min(max_seconds) } } /// Retries passed operation until one of the following conditions are met: /// - encountered error is considered as permanent (non-retryable) /// - retries have been exhausted /// - cancellation token has been cancelled /// /// `is_permanent` closure should be used to provide distinction between permanent/non-permanent /// errors. When attempts cross `warn_threshold` function starts to emit log warnings. /// `description` argument is added to log messages. Its value should identify the `op` is doing /// `cancel` cancels new attempts and the backoff sleep. /// /// If attempts fail, they are being logged with `{:#}` which works for anyhow, but does not work /// for any other error type. Final failed attempt is logged with `{:?}`. /// /// Returns `None` if cancellation was noticed during backoff or the terminal result. pub async fn retry<T, O, F, E>( mut op: O, is_permanent: impl Fn(&E) -> bool, warn_threshold: u32, max_retries: u32, description: &str, cancel: &CancellationToken, ) -> Option<Result<T, E>> where // Not std::error::Error because anyhow::Error doesnt implement it. // For context see https://github.com/dtolnay/anyhow/issues/63 E: Display + Debug + 'static, O: FnMut() -> F, F: Future<Output = Result<T, E>>, { let mut attempts = 0; loop { if cancel.is_cancelled() { return None; } let result = op().await; match &result { Ok(_) => { if attempts > 0 { tracing::info!("{description} succeeded after {attempts} retries"); } return Some(result); } // These are "permanent" errors that should not be retried. Err(e) if is_permanent(e) => { return Some(result); } // Assume that any other failure might be transient, and the operation might // succeed if we just keep trying. Err(err) if attempts < warn_threshold => { tracing::info!("{description} failed, will retry (attempt {attempts}): {err:#}"); } Err(err) if attempts < max_retries => { tracing::warn!("{description} failed, will retry (attempt {attempts}): {err:#}"); } Err(err) => { // Operation failed `max_attempts` times. Time to give up. tracing::warn!( "{description} still failed after {attempts} retries, giving up: {err:?}" ); return Some(result); } } // sleep and retry exponential_backoff( attempts, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, cancel, ) .await; attempts += 1; } } #[cfg(test)] mod tests { use std::io; use tokio::sync::Mutex; use super::*; #[test] fn backoff_defaults_produce_growing_backoff_sequence() { let mut current_backoff_value = None; for i in 0..10_000 { let new_backoff_value = exponential_backoff_duration_seconds( i, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, ); if let Some(old_backoff_value) = current_backoff_value.replace(new_backoff_value) { assert!( old_backoff_value <= new_backoff_value, "{i}th backoff value {new_backoff_value} is smaller than the previous one {old_backoff_value}" ) } } assert_eq!( current_backoff_value.expect("Should have produced backoff values to compare"), DEFAULT_MAX_BACKOFF_SECONDS, "Given big enough of retries, backoff should reach its allowed max value" ); } #[tokio::test(start_paused = true)] async fn retry_always_error() { let count = Mutex::new(0); retry( || async { *count.lock().await += 1; Result::<(), io::Error>::Err(io::Error::from(io::ErrorKind::Other)) }, |_e| false, 1, 1, "work", &CancellationToken::new(), ) .await .expect("not cancelled") .expect_err("it can only fail"); assert_eq!(*count.lock().await, 2); } #[tokio::test(start_paused = true)] async fn retry_ok_after_err() { let count = Mutex::new(0); retry( || async { let mut locked = count.lock().await; if *locked > 1 { Ok(()) } else { *locked += 1; Err(io::Error::from(io::ErrorKind::Other)) } }, |_e| false, 2, 2, "work", &CancellationToken::new(), ) .await .expect("not cancelled") .expect("success on second try"); } #[tokio::test(start_paused = true)] async fn dont_retry_permanent_errors() { let count = Mutex::new(0); let _ = retry( || async { let mut locked = count.lock().await; if *locked > 1 { Ok(()) } else { *locked += 1; Err(io::Error::from(io::ErrorKind::Other)) } }, |_e| true, 2, 2, "work", &CancellationToken::new(), ) .await .expect("was not cancellation") .expect_err("it was permanent error"); assert_eq!(*count.lock().await, 1); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/pid_file.rs
libs/utils/src/pid_file.rs
//! Abstraction to create & read pidfiles. //! //! A pidfile is a file in the filesystem that stores a process's PID. //! Its purpose is to implement a singleton behavior where only //! one process of some "kind" is supposed to be running at a given time. //! The "kind" is identified by the pidfile. //! //! During process startup, the process that is supposed to be a singleton //! must [claim][`claim_for_current_process`] the pidfile first. //! If that is unsuccessful, the process must not act as the singleton, i.e., //! it must not access any of the resources that only the singleton may access. //! //! A common need is to signal a running singleton process, e.g., to make //! it shut down and exit. //! For that, we have to [`read`] the pidfile. The result of the `read` operation //! tells us if there is any singleton process, and if so, what PID it has. //! We can then proceed to signal it, although some caveats still apply. //! Read the function-level documentation of [`read`] for that. //! //! ## Never Remove Pidfiles //! //! It would be natural to assume that the process who claimed the pidfile //! should remove it upon exit to avoid leaving a stale pidfile in place. //! However, we already have a reliable way to detect staleness of the pidfile, //! i.e., the `flock` that [claiming][`claim_for_current_process`] puts on it. //! //! And further, removing pidfiles would introduce a **catastrophic race condition** //! where two processes are running that are supposed to be singletons. //! Suppose we were to remove our pidfile during process shutdown. //! Here is how the race plays out: //! - Suppose we have a service called `myservice` with pidfile `myservice.pidfile`. //! - Process `A` starts to shut down. //! - Process `B` is just starting up //! - It `open("myservice.pid", O_WRONLY|O_CREAT)` the file //! - It blocks on `flock` //! - Process `A` removes the pidfile as the last step of its shutdown procedure //! - `unlink("myservice.pid") //! - Process `A` exits //! - This releases its `flock` and unblocks `B` //! - Process `B` still has the file descriptor for `myservice.pid` open //! - Process `B` writes its PID into `myservice.pid`. //! - But the `myservice.pid` file has been unlinked, so, there is `myservice.pid` //! in the directory. //! - Process `C` starts //! - It `open("myservice.pid", O_WRONLY|O_CREAT)` which creates a new file (new inode) //! - It `flock`s the file, which, since it's a different file, does not block //! - It writes its PID into the file //! //! At this point, `B` and `C` are running, which is hazardous. //! Morale of the story: don't unlink pidfiles, ever. use std::ops::Deref; use anyhow::Context; use camino::Utf8Path; use nix::unistd::Pid; use crate::lock_file::{self, LockFileRead}; /// Keeps a claim on a pidfile alive until it is dropped. /// Returned by [`claim_for_current_process`]. #[must_use] pub struct PidFileGuard(lock_file::LockFileGuard); impl Deref for PidFileGuard { type Target = lock_file::LockFileGuard; fn deref(&self) -> &Self::Target { &self.0 } } /// Try to claim `path` as a pidfile for the current process. /// /// If another process has already claimed the pidfile, and it is still running, /// this function returns ane error. /// Otherwise, the function `flock`s the file and updates its contents to the /// current process's PID. /// If the update fails, the flock is released and an error returned. /// On success, the function returns a [`PidFileGuard`] to keep the flock alive. /// /// ### Maintaining A Claim /// /// It is the caller's responsibility to maintain the claim. /// The claim ends as soon as the returned guard object is dropped. /// To maintain the claim for the remaining lifetime of the current process, /// use [`std::mem::forget`] or similar. pub fn claim_for_current_process(path: &Utf8Path) -> anyhow::Result<PidFileGuard> { let unwritten_lock_file = lock_file::create_exclusive(path).context("lock file")?; // if any of the next steps fail, we drop the file descriptor and thereby release the lock let guard = unwritten_lock_file .write_content(Pid::this().to_string()) .context("write pid to lock file")?; Ok(PidFileGuard(guard)) } /// Returned by [`read`]. pub enum PidFileRead { /// No file exists at the given path. NotExist, /// The given pidfile is currently not claimed by any process. /// To determine this, the [`read`] operation acquired /// an exclusive flock on the file. The lock is still held and responsibility /// to release it is returned through the guard object. /// Before releasing it, other [`claim_for_current_process`] or [`read`] calls /// will fail. /// /// ### Caveats /// /// Do not unlink the pidfile from the filesystem. See module-comment for why. NotHeldByAnyProcess(PidFileGuard), /// The given pidfile is still claimed by another process whose PID is given /// as part of this variant. /// /// ### Caveats /// /// 1. The other process might exit at any time, turning the given PID stale. /// 2. There is a small window in which `claim_for_current_process` has already /// locked the file but not yet updates its contents. [`read`] will return /// this variant here, but with the old file contents, i.e., a stale PID. /// /// The kernel is free to recycle PID once it has been `wait(2)`ed upon by /// its creator. Thus, acting upon a stale PID, e.g., by issuing a `kill` /// system call on it, bears the risk of killing an unrelated process. /// This is an inherent limitation of using pidfiles. /// The only race-free solution is to have a supervisor-process with a lifetime /// that exceeds that of all of its child-processes (e.g., `runit`, `supervisord`). LockedByOtherProcess(Pid), } /// Try to read the file at the given path as a pidfile that was previously created /// through [`claim_for_current_process`]. /// /// On success, this function returns a [`PidFileRead`]. /// Check its docs for a description of the meaning of its different variants. pub fn read(pidfile: &Utf8Path) -> anyhow::Result<PidFileRead> { let res = lock_file::read_and_hold_lock_file(pidfile).context("read and hold pid file")?; let ret = match res { LockFileRead::NotExist => PidFileRead::NotExist, LockFileRead::NotHeldByAnyProcess(guard, _) => { PidFileRead::NotHeldByAnyProcess(PidFileGuard(guard)) } LockFileRead::LockedByOtherProcess { not_locked_file: _not_locked_file, content, } => { // XXX the read races with the write in claim_pid_file_for_pid(). // But pids are smaller than a page, so the kernel page cache will lock for us. // The only problem is that we might get the old contents here. // Can only fix that by implementing some scheme that downgrades the // exclusive lock to shared lock in claim_pid_file_for_pid(). PidFileRead::LockedByOtherProcess(parse_pidfile_content(&content)?) } }; Ok(ret) } fn parse_pidfile_content(content: &str) -> anyhow::Result<Pid> { let pid: i32 = content .parse() .map_err(|_| anyhow::anyhow!("parse pidfile content to PID"))?; if pid < 1 { anyhow::bail!("bad value in pidfile '{pid}'"); } Ok(Pid::from_raw(pid)) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/measured_stream.rs
libs/utils/src/measured_stream.rs
use std::io::Read; use std::pin::Pin; use std::{io, task}; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; pin_project! { /// This stream tracks all writes and calls user provided /// callback when the underlying stream is flushed. pub struct MeasuredStream<S, R, W> { #[pin] stream: S, write_count: usize, inc_read_count: R, inc_write_count: W, } } impl<S, R, W> MeasuredStream<S, R, W> { pub fn new(stream: S, inc_read_count: R, inc_write_count: W) -> Self { Self { stream, write_count: 0, inc_read_count, inc_write_count, } } } impl<S: AsyncRead + Unpin, R: FnMut(usize), W> AsyncRead for MeasuredStream<S, R, W> { fn poll_read( self: Pin<&mut Self>, context: &mut task::Context<'_>, buf: &mut ReadBuf<'_>, ) -> task::Poll<io::Result<()>> { let this = self.project(); let filled = buf.filled().len(); this.stream.poll_read(context, buf).map_ok(|()| { let cnt = buf.filled().len() - filled; // Increment the read count. (this.inc_read_count)(cnt); }) } } impl<S: AsyncWrite + Unpin, R, W: FnMut(usize)> AsyncWrite for MeasuredStream<S, R, W> { fn poll_write( self: Pin<&mut Self>, context: &mut task::Context<'_>, buf: &[u8], ) -> task::Poll<io::Result<usize>> { let this = self.project(); this.stream.poll_write(context, buf).map_ok(|cnt| { // Increment the write count. *this.write_count += cnt; cnt }) } fn poll_flush( self: Pin<&mut Self>, context: &mut task::Context<'_>, ) -> task::Poll<io::Result<()>> { let this = self.project(); this.stream.poll_flush(context).map_ok(|()| { // Call the user provided callback and reset the write count. (this.inc_write_count)(*this.write_count); *this.write_count = 0; }) } fn poll_shutdown( self: Pin<&mut Self>, context: &mut task::Context<'_>, ) -> task::Poll<io::Result<()>> { self.project().stream.poll_shutdown(context) } } /// Wrapper for a reader that counts bytes read. /// /// Similar to MeasuredStream but it's one way and it's sync pub struct MeasuredReader<R: Read> { inner: R, byte_count: usize, } impl<R: Read> MeasuredReader<R> { pub fn new(reader: R) -> Self { Self { inner: reader, byte_count: 0, } } pub fn get_byte_count(&self) -> usize { self.byte_count } } impl<R: Read> Read for MeasuredReader<R> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { let result = self.inner.read(buf); if let Ok(n_bytes) = result { self.byte_count += n_bytes } result } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/rate_limit.rs
libs/utils/src/rate_limit.rs
//! A helper to rate limit operations. use std::time::{Duration, Instant}; pub struct RateLimit { last: Option<Instant>, interval: Duration, dropped: u64, } pub struct RateLimitStats(u64); impl std::fmt::Display for RateLimitStats { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{} dropped calls", self.0) } } impl RateLimit { pub const fn new(interval: Duration) -> Self { Self { last: None, interval, dropped: 0, } } /// Call `f` if the rate limit allows. /// Don't call it otherwise. pub fn call<F: FnOnce()>(&mut self, f: F) { self.call2(|_| f()) } pub fn call2<F: FnOnce(RateLimitStats)>(&mut self, f: F) { let now = Instant::now(); match self.last { Some(last) if now - last <= self.interval => { // ratelimit self.dropped += 1; } _ => { self.last = Some(now); f(RateLimitStats(self.dropped)); self.dropped = 0; } } } } #[cfg(test)] mod tests { use std::sync::atomic::AtomicUsize; #[test] fn basics() { use std::sync::atomic::Ordering::Relaxed; use std::time::Duration; use super::RateLimit; let called = AtomicUsize::new(0); let mut f = RateLimit::new(Duration::from_millis(100)); let cl = || { called.fetch_add(1, Relaxed); }; f.call(cl); assert_eq!(called.load(Relaxed), 1); f.call(cl); assert_eq!(called.load(Relaxed), 1); f.call(cl); assert_eq!(called.load(Relaxed), 1); std::thread::sleep(Duration::from_millis(100)); f.call(cl); assert_eq!(called.load(Relaxed), 2); f.call(cl); assert_eq!(called.load(Relaxed), 2); std::thread::sleep(Duration::from_millis(100)); f.call(cl); assert_eq!(called.load(Relaxed), 3); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/lib.rs
libs/utils/src/lib.rs
//! `utils` is intended to be a place to put code that is shared //! between other crates in this repository. #![deny(clippy::undocumented_unsafe_blocks)] pub mod backoff; /// `Lsn` type implements common tasks on Log Sequence Numbers pub mod lsn; /// SeqWait allows waiting for a future sequence number to arrive pub mod seqwait; /// A simple Read-Copy-Update implementation. pub mod simple_rcu; /// append only ordered map implemented with a Vec pub mod vec_map; pub mod bin_ser; // helper functions for creating and fsyncing pub mod crashsafe; // common authentication routines pub mod auth; // utility functions and helper traits for unified unique id generation/serialization etc. pub mod id; // utility functions to obtain reachable IP addresses in PS/SK nodes. pub mod ip_address; pub mod shard; mod hex; pub use hex::Hex; // definition of the Generation type for pageserver attachment APIs pub mod generation; // common log initialisation routine pub mod logging; pub mod lock_file; pub mod pid_file; // Utility for binding TcpListeners with proper socket options. pub mod tcp_listener; // Default signal handling pub mod sentry_init; pub mod signals; pub mod fs_ext; pub mod measured_stream; pub mod serde_percent; pub mod serde_regex; pub mod serde_system_time; pub mod pageserver_feedback; pub mod postgres_client; pub mod tracing_span_assert; pub mod leaky_bucket; pub mod rate_limit; /// Simple once-barrier and a guard which keeps barrier awaiting. pub mod completion; /// Reporting utilities pub mod error; /// async timeout helper pub mod timeout; pub mod span; pub mod sync; pub mod failpoint_support; pub mod yielding_loop; pub mod zstd; pub mod env; pub mod poison; pub mod toml_edit_ext; pub mod circuit_breaker; pub mod try_rcu; pub mod guard_arc_swap; pub mod elapsed_accum; #[cfg(target_os = "linux")] pub mod linux_socket_ioctl; pub mod metrics_collector; // Re-export used in macro. Avoids adding git-version as dep in target crates. #[doc(hidden)] pub use git_version; /// This is a shortcut to embed git sha into binaries and avoid copying the same build script to all packages /// /// we have several cases: /// * building locally from git repo /// * building in CI from git repo /// * building in docker (either in CI or locally) /// /// One thing to note is that .git is not available in docker (and it is bad to include it there). /// When building locally, the `git_version` is used to query .git. When building on CI and docker, /// we don't build the actual PR branch commits, but always a "phantom" would be merge commit to /// the target branch -- the actual PR commit from which we build from is supplied as GIT_VERSION /// environment variable. /// /// We ended up with this compromise between phantom would be merge commits vs. pull request branch /// heads due to old logs becoming more reliable (github could gc the phantom merge commit /// anytime) in #4641. /// /// To avoid running buildscript every recompilation, we use rerun-if-env-changed option. /// So the build script will be run only when GIT_VERSION envvar has changed. /// /// Why not to use buildscript to get git commit sha directly without procmacro from different crate? /// Caching and workspaces complicates that. In case `utils` is not /// recompiled due to caching then version may become outdated. /// git_version crate handles that case by introducing a dependency on .git internals via include_bytes! macro, /// so if we changed the index state git_version will pick that up and rerun the macro. /// /// Note that with git_version prefix is `git:` and in case of git version from env its `git-env:`. /// /// ############################################################################################# /// TODO this macro is not the way the library is intended to be used, see <https://github.com/neondatabase/neon/issues/1565> for details. /// We used `cachepot` to reduce our current CI build times: <https://github.com/neondatabase/cloud/pull/1033#issuecomment-1100935036> /// Yet, it seems to ignore the GIT_VERSION env variable, passed to Docker build, even with build.rs that contains /// `println!("cargo:rerun-if-env-changed=GIT_VERSION");` code for cachepot cache invalidation. /// The problem needs further investigation and regular `const` declaration instead of a macro. #[macro_export] macro_rules! project_git_version { ($const_identifier:ident) => { // this should try GIT_VERSION first only then git_version::git_version! const $const_identifier: &::core::primitive::str = { const __COMMIT_FROM_GIT: &::core::primitive::str = $crate::git_version::git_version! { prefix = "", fallback = "unknown", args = ["--abbrev=40", "--always", "--dirty=-modified"] // always use full sha }; const __ARG: &[&::core::primitive::str; 2] = &match ::core::option_env!("GIT_VERSION") { ::core::option::Option::Some(x) => ["git-env:", x], ::core::option::Option::None => ["git:", __COMMIT_FROM_GIT], }; $crate::__const_format::concatcp!(__ARG[0], __ARG[1]) }; }; } /// This is a shortcut to embed build tag into binaries and avoid copying the same build script to all packages #[macro_export] macro_rules! project_build_tag { ($const_identifier:ident) => { const $const_identifier: &::core::primitive::str = { const __ARG: &[&::core::primitive::str; 2] = &match ::core::option_env!("BUILD_TAG") { ::core::option::Option::Some(x) => ["build_tag-env:", x], ::core::option::Option::None => ["build_tag:", ""], }; $crate::__const_format::concatcp!(__ARG[0], __ARG[1]) }; }; } /// Re-export for `project_git_version` macro #[doc(hidden)] pub use const_format as __const_format; /// Same as `assert!`, but evaluated during compilation and gets optimized out in runtime. #[macro_export] macro_rules! const_assert { ($($args:tt)*) => { const _: () = assert!($($args)*); }; }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/failpoint_support.rs
libs/utils/src/failpoint_support.rs
//! Failpoint support code shared between pageserver and safekeepers. use tokio_util::sync::CancellationToken; /// Declare a failpoint that can use to `pause` failpoint action. /// We don't want to block the executor thread, hence, spawn_blocking + await. /// /// Optionally pass a cancellation token, and this failpoint will drop out of /// its pause when the cancellation token fires. This is useful for testing /// cases where we would like to block something, but test its clean shutdown behavior. /// The macro evaluates to a Result in that case, where Ok(()) is the case /// where the failpoint was not paused, and Err() is the case where cancellation /// token fired while evaluating the failpoint. /// /// Remember to unpause the failpoint in the test; until that happens, one of the /// limited number of spawn_blocking thread pool threads is leaked. #[macro_export] macro_rules! pausable_failpoint { ($name:literal) => {{ if cfg!(feature = "testing") { let cancel = ::tokio_util::sync::CancellationToken::new(); let _ = $crate::pausable_failpoint!($name, &cancel); } }}; ($name:literal, $cancel:expr) => {{ if cfg!(feature = "testing") { let failpoint_fut = ::tokio::task::spawn_blocking({ let current = ::tracing::Span::current(); move || { let _entered = current.entered(); ::tracing::info!("at failpoint {}", $name); ::fail::fail_point!($name); } }); let cancel_fut = async move { $cancel.cancelled().await; }; ::tokio::select! { res = failpoint_fut => { res.expect("spawn_blocking"); // continue with execution Ok(()) }, _ = cancel_fut => { Err(()) } } } else { Ok(()) } }}; } pub use pausable_failpoint; /// use with fail::cfg("$name", "return(2000)") /// /// The effect is similar to a "sleep(2000)" action, i.e. we sleep for the /// specified time (in milliseconds). The main difference is that we use async /// tokio sleep function. Another difference is that we print lines to the log, /// which can be useful in tests to check that the failpoint was hit. /// /// Optionally pass a cancellation token, and this failpoint will drop out of /// its sleep when the cancellation token fires. This is useful for testing /// cases where we would like to block something, but test its clean shutdown behavior. #[macro_export] macro_rules! __failpoint_sleep_millis_async { ($name:literal) => {{ // If the failpoint is used with a "return" action, set should_sleep to the // returned value (as string). Otherwise it's set to None. let should_sleep = (|| { ::fail::fail_point!($name, |x| x); ::std::option::Option::None })(); // Sleep if the action was a returned value if let ::std::option::Option::Some(duration_str) = should_sleep { $crate::failpoint_support::failpoint_sleep_helper($name, duration_str).await } }}; ($name:literal, $cancel:expr) => {{ // If the failpoint is used with a "return" action, set should_sleep to the // returned value (as string). Otherwise it's set to None. let should_sleep = (|| { ::fail::fail_point!($name, |x| x); ::std::option::Option::None })(); // Sleep if the action was a returned value if let ::std::option::Option::Some(duration_str) = should_sleep { $crate::failpoint_support::failpoint_sleep_cancellable_helper( $name, duration_str, $cancel, ) .await } }}; } pub use __failpoint_sleep_millis_async as sleep_millis_async; // Helper function used by the macro. (A function has nicer scoping so we // don't need to decorate everything with "::") #[doc(hidden)] pub async fn failpoint_sleep_helper(name: &'static str, duration_str: String) { let millis = duration_str.parse::<u64>().unwrap(); let d = std::time::Duration::from_millis(millis); tracing::info!("failpoint {:?}: sleeping for {:?}", name, d); tokio::time::sleep(d).await; tracing::info!("failpoint {:?}: sleep done", name); } // Helper function used by the macro. (A function has nicer scoping so we // don't need to decorate everything with "::") #[doc(hidden)] pub async fn failpoint_sleep_cancellable_helper( name: &'static str, duration_str: String, cancel: &CancellationToken, ) { let millis = duration_str.parse::<u64>().unwrap(); let d = std::time::Duration::from_millis(millis); tracing::info!("failpoint {:?}: sleeping for {:?}", name, d); tokio::time::timeout(d, cancel.cancelled()).await.ok(); tracing::info!("failpoint {:?}: sleep done", name); } /// Initialize the configured failpoints /// /// You must call this function before any concurrent threads do operations. pub fn init() -> fail::FailScenario<'static> { // The failpoints lib provides support for parsing the `FAILPOINTS` env var. // We want non-default behavior for `exit`, though, so, we handle it separately. // // Format for FAILPOINTS is "name=actions" separated by ";". let actions = std::env::var("FAILPOINTS"); if actions.is_ok() { // SAFETY: this function should before any threads start and access env vars concurrently unsafe { std::env::remove_var("FAILPOINTS"); } } else { // let the library handle non-utf8, or nothing for not present } let scenario = fail::FailScenario::setup(); if let Ok(val) = actions { val.split(';') .enumerate() .map(|(i, s)| s.split_once('=').ok_or((i, s))) .for_each(|res| { let (name, actions) = match res { Ok(t) => t, Err((i, s)) => { panic!( "startup failpoints: missing action on the {}th failpoint; try `{s}=return`", i + 1, ); } }; if let Err(e) = apply_failpoint(name, actions) { panic!("startup failpoints: failed to apply failpoint {name}={actions}: {e}"); } }); } scenario } pub fn apply_failpoint(name: &str, actions: &str) -> Result<(), String> { if actions == "exit" { fail::cfg_callback(name, exit_failpoint) } else { fail::cfg(name, actions) } } #[inline(never)] fn exit_failpoint() { tracing::info!("Exit requested by failpoint"); std::process::exit(1); }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/sentry_init.rs
libs/utils/src/sentry_init.rs
use std::borrow::Cow; use std::env; use sentry::ClientInitGuard; pub use sentry::release_name; use tracing::{error, info}; #[must_use] pub fn init_sentry( release_name: Option<Cow<'static, str>>, extra_options: &[(&str, &str)], ) -> Option<ClientInitGuard> { let Ok(dsn) = env::var("SENTRY_DSN") else { info!("not initializing Sentry, no SENTRY_DSN given"); return None; }; let environment = env::var("SENTRY_ENVIRONMENT").unwrap_or_else(|_| "development".into()); let guard = sentry::init(( dsn, sentry::ClientOptions { release: release_name.clone(), environment: Some(environment.clone().into()), ..Default::default() }, )); sentry::configure_scope(|scope| { for &(key, value) in extra_options { scope.set_extra(key, value.into()); } }); if let Some(dsn) = guard.dsn() { info!( "initialized Sentry for project {}, environment {}, release {} (using API {})", dsn.project_id(), environment, release_name.unwrap_or(Cow::Borrowed("None")), dsn.envelope_api_url(), ); } else { // This should panic during sentry::init(), but we may as well cover it. error!("failed to initialize Sentry, invalid DSN"); } Some(guard) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/timeout.rs
libs/utils/src/timeout.rs
use std::time::Duration; use tokio_util::sync::CancellationToken; #[derive(thiserror::Error, Debug)] pub enum TimeoutCancellableError { #[error("Timed out")] Timeout, #[error("Cancelled")] Cancelled, } /// Wrap [`tokio::time::timeout`] with a CancellationToken. /// /// This wrapper is appropriate for any long running operation in a task /// that ought to respect a CancellationToken (which means most tasks). /// /// The only time you should use a bare tokio::timeout is when the future `F` /// itself respects a CancellationToken: otherwise, always use this wrapper /// with your CancellationToken to ensure that your task does not hold up /// graceful shutdown. pub async fn timeout_cancellable<F>( duration: Duration, cancel: &CancellationToken, future: F, ) -> Result<F::Output, TimeoutCancellableError> where F: std::future::Future, { tokio::select!( r = tokio::time::timeout(duration, future) => { r.map_err(|_| TimeoutCancellableError::Timeout) }, _ = cancel.cancelled() => { Err(TimeoutCancellableError::Cancelled) } ) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/lock_file.rs
libs/utils/src/lock_file.rs
//! A module to create and read lock files. //! //! File locking is done using [`nix::fcntl::Flock`] exclusive locks. //! The only consumer of this module is currently //! [`pid_file`](crate::pid_file). See the module-level comment //! there for potential pitfalls with lock files that are used //! to store PIDs (pidfiles). use std::fs; use std::io::{Read, Write}; use std::ops::Deref; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use nix::errno::Errno::EAGAIN; use nix::fcntl::{Flock, FlockArg}; use crate::crashsafe; /// A handle to an open and flocked, but not-yet-written lock file. /// Returned by [`create_exclusive`]. #[must_use] pub struct UnwrittenLockFile { path: Utf8PathBuf, file: Flock<fs::File>, } /// Returned by [`UnwrittenLockFile::write_content`]. #[must_use] pub struct LockFileGuard(Flock<fs::File>); impl Deref for LockFileGuard { type Target = fs::File; fn deref(&self) -> &Self::Target { &self.0 } } impl UnwrittenLockFile { /// Replace the content of this lock file with the byte representation of `contents`. pub fn write_content(mut self, contents: String) -> anyhow::Result<LockFileGuard> { self.file .set_len(0) .context("Failed to truncate lockfile")?; self.file .write_all(contents.as_bytes()) .with_context(|| format!("Failed to write '{contents}' contents into lockfile"))?; crashsafe::fsync_file_and_parent(&self.path).context("fsync lockfile")?; Ok(LockFileGuard(self.file)) } } /// Creates and opens a lock file in the path, grabs an exclusive flock on it, and returns /// a handle that allows overwriting the locked file's content. /// /// The exclusive lock is released when dropping the returned handle. /// /// It is not an error if the file already exists. /// It is an error if the file is already locked. pub fn create_exclusive(lock_file_path: &Utf8Path) -> anyhow::Result<UnwrittenLockFile> { let lock_file = fs::OpenOptions::new() .create(true) // O_CREAT .truncate(true) .write(true) .open(lock_file_path) .context("open lock file")?; let res = Flock::lock(lock_file, FlockArg::LockExclusiveNonblock); match res { Ok(lock_file) => Ok(UnwrittenLockFile { path: lock_file_path.to_owned(), file: lock_file, }), Err((_, EAGAIN)) => anyhow::bail!("file is already locked"), Err((_, e)) => Err(e).context("flock error"), } } /// Returned by [`read_and_hold_lock_file`]. /// Check out the [`pid_file`](crate::pid_file) module for what the variants mean /// and potential caveats if the lock files that are used to store PIDs. pub enum LockFileRead { /// No file exists at the given path. NotExist, /// No other process held the lock file, so we grabbed an flock /// on it and read its contents. /// Release the flock by dropping the [`LockFileGuard`]. NotHeldByAnyProcess(LockFileGuard, String), /// The file exists but another process was holding an flock on it. LockedByOtherProcess { not_locked_file: fs::File, content: String, }, } /// Open & try to lock the lock file at the given `path`, returning a [handle][`LockFileRead`] to /// inspect its content. /// /// It is not an `Err(...)` if the file does not exist or is already locked. /// Check the [`LockFileRead`] variants for details. pub fn read_and_hold_lock_file(path: &Utf8Path) -> anyhow::Result<LockFileRead> { let res = fs::OpenOptions::new().read(true).open(path); let lock_file = match res { Ok(f) => f, Err(e) => match e.kind() { std::io::ErrorKind::NotFound => return Ok(LockFileRead::NotExist), _ => return Err(e).context("open lock file"), }, }; let res = Flock::lock(lock_file, FlockArg::LockExclusiveNonblock); // We need the content regardless of lock success / failure. // But, read it after flock so that, if it succeeded, the content is consistent. match res { Ok(mut locked_file) => { let mut content = String::new(); locked_file .read_to_string(&mut content) .context("read lock file")?; Ok(LockFileRead::NotHeldByAnyProcess( LockFileGuard(locked_file), content, )) } Err((mut not_locked_file, EAGAIN)) => { let mut content = String::new(); not_locked_file .read_to_string(&mut content) .context("read lock file")?; Ok(LockFileRead::LockedByOtherProcess { not_locked_file, content, }) } Err((_, e)) => Err(e).context("flock error"), } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/id.rs
libs/utils/src/id.rs
use std::fmt; use std::num::ParseIntError; use std::str::FromStr; use anyhow::Context; use hex::FromHex; use rand::Rng; use serde::de::Visitor; use serde::{Deserialize, Serialize}; use thiserror::Error; #[derive(Error, Debug)] pub enum IdError { #[error("invalid id length {0}")] SliceParseError(usize), } /// Neon ID is a 128-bit random ID. /// Used to represent various identifiers. Provides handy utility methods and impls. /// /// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look /// like `[173,80,132,115,129,226,72,254,170,201,135,108,199,26,228,24]`. #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] struct Id([u8; 16]); impl Serialize for Id { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { if serializer.is_human_readable() { serializer.collect_str(self) } else { self.0.serialize(serializer) } } } impl<'de> Deserialize<'de> for Id { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { struct IdVisitor { is_human_readable_deserializer: bool, } impl<'de> Visitor<'de> for IdVisitor { type Value = Id; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { if self.is_human_readable_deserializer { formatter.write_str("value in form of hex string") } else { formatter.write_str("value in form of integer array([u8; 16])") } } fn visit_seq<A>(self, seq: A) -> Result<Self::Value, A::Error> where A: serde::de::SeqAccess<'de>, { let s = serde::de::value::SeqAccessDeserializer::new(seq); let id: [u8; 16] = Deserialize::deserialize(s)?; Ok(Id::from(id)) } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error, { Id::from_str(v).map_err(E::custom) } } if deserializer.is_human_readable() { deserializer.deserialize_str(IdVisitor { is_human_readable_deserializer: true, }) } else { deserializer.deserialize_tuple( 16, IdVisitor { is_human_readable_deserializer: false, }, ) } } } impl Id { pub fn from_slice(src: &[u8]) -> Result<Id, IdError> { if src.len() != 16 { return Err(IdError::SliceParseError(src.len())); } let mut id_array = [0u8; 16]; id_array.copy_from_slice(src); Ok(id_array.into()) } pub fn as_arr(&self) -> [u8; 16] { self.0 } pub fn generate() -> Self { let mut tli_buf = [0u8; 16]; rand::rng().fill(&mut tli_buf); Id::from(tli_buf) } fn hex_encode(&self) -> String { static HEX: &[u8] = b"0123456789abcdef"; let mut buf = vec![0u8; self.0.len() * 2]; for (&b, chunk) in self.0.as_ref().iter().zip(buf.chunks_exact_mut(2)) { chunk[0] = HEX[((b >> 4) & 0xf) as usize]; chunk[1] = HEX[(b & 0xf) as usize]; } // SAFETY: vec constructed out of `HEX`, it can only be ascii unsafe { String::from_utf8_unchecked(buf) } } } impl FromStr for Id { type Err = hex::FromHexError; fn from_str(s: &str) -> Result<Id, Self::Err> { Self::from_hex(s) } } // this is needed for pretty serialization and deserialization of Id's using serde integration with hex crate impl FromHex for Id { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> { let mut buf: [u8; 16] = [0u8; 16]; hex::decode_to_slice(hex, &mut buf)?; Ok(Id(buf)) } } impl AsRef<[u8]> for Id { fn as_ref(&self) -> &[u8] { &self.0 } } impl From<[u8; 16]> for Id { fn from(b: [u8; 16]) -> Self { Id(b) } } impl From<Id> for u128 { fn from(id: Id) -> Self { u128::from_le_bytes(id.0) } } impl fmt::Display for Id { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.hex_encode()) } } impl fmt::Debug for Id { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.hex_encode()) } } macro_rules! id_newtype { ($t:ident) => { impl $t { pub fn from_slice(src: &[u8]) -> Result<$t, IdError> { Ok($t(Id::from_slice(src)?)) } pub fn as_arr(&self) -> [u8; 16] { self.0.as_arr() } pub fn generate() -> Self { $t(Id::generate()) } pub const fn from_array(b: [u8; 16]) -> Self { $t(Id(b)) } } impl FromStr for $t { type Err = hex::FromHexError; fn from_str(s: &str) -> Result<$t, Self::Err> { let value = Id::from_str(s)?; Ok($t(value)) } } impl From<[u8; 16]> for $t { fn from(b: [u8; 16]) -> Self { $t(Id::from(b)) } } impl FromHex for $t { type Error = hex::FromHexError; fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> { Ok($t(Id::from_hex(hex)?)) } } impl AsRef<[u8]> for $t { fn as_ref(&self) -> &[u8] { &self.0.0 } } impl From<$t> for u128 { fn from(id: $t) -> Self { u128::from(id.0) } } impl fmt::Display for $t { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl fmt::Debug for $t { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } }; } /// Neon timeline ID. /// /// They are different from PostgreSQL timeline /// IDs, but serve a similar purpose: they differentiate /// between different "histories" of the same cluster. However, /// PostgreSQL timeline IDs are a bit cumbersome, because they are only /// 32-bits wide, and they must be in ascending order in any given /// timeline history. Those limitations mean that we cannot generate a /// new PostgreSQL timeline ID by just generating a random number. And /// that in turn is problematic for the "pull/push" workflow, where you /// have a local copy of a Neon repository, and you periodically sync /// the local changes with a remote server. When you work "detached" /// from the remote server, you cannot create a PostgreSQL timeline ID /// that's guaranteed to be different from all existing timelines in /// the remote server. For example, if two people are having a clone of /// the repository on their laptops, and they both create a new branch /// with different name. What timeline ID would they assign to their /// branches? If they pick the same one, and later try to push the /// branches to the same remote server, they will get mixed up. /// /// To avoid those issues, Neon has its own concept of timelines that /// is separate from PostgreSQL timelines, and doesn't have those /// limitations. A Neon timeline is identified by a 128-bit ID, which /// is usually printed out as a hex string. /// /// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look /// like `[173,80,132,115,129,226,72,254,170,201,135,108,199,26,228,24]`. /// See [`Id`] for alternative ways to serialize it. #[derive(Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Serialize, Deserialize)] pub struct TimelineId(Id); id_newtype!(TimelineId); impl TryFrom<Option<&str>> for TimelineId { type Error = anyhow::Error; fn try_from(value: Option<&str>) -> Result<Self, Self::Error> { value .unwrap_or_default() .parse::<TimelineId>() .with_context(|| format!("Could not parse timeline id from {value:?}")) } } /// Neon Tenant Id represents identifiar of a particular tenant. /// Is used for distinguishing requests and data belonging to different users. /// /// NOTE: It (de)serializes as an array of hex bytes, so the string representation would look /// like `[173,80,132,115,129,226,72,254,170,201,135,108,199,26,228,24]`. /// See [`Id`] for alternative ways to serialize it. #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord)] pub struct TenantId(Id); id_newtype!(TenantId); /// If needed, reuse small string from proxy/src/types.rc pub type EndpointId = String; // A pair uniquely identifying Neon instance. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct TenantTimelineId { pub tenant_id: TenantId, pub timeline_id: TimelineId, } impl TenantTimelineId { pub fn new(tenant_id: TenantId, timeline_id: TimelineId) -> Self { TenantTimelineId { tenant_id, timeline_id, } } pub fn generate() -> Self { Self::new(TenantId::generate(), TimelineId::generate()) } pub fn empty() -> Self { Self::new(TenantId::from([0u8; 16]), TimelineId::from([0u8; 16])) } } impl fmt::Display for TenantTimelineId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}/{}", self.tenant_id, self.timeline_id) } } impl FromStr for TenantTimelineId { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut parts = s.split('/'); let tenant_id = parts .next() .ok_or_else(|| anyhow::anyhow!("TenantTimelineId must contain tenant_id"))? .parse()?; let timeline_id = parts .next() .ok_or_else(|| anyhow::anyhow!("TenantTimelineId must contain timeline_id"))? .parse()?; if parts.next().is_some() { anyhow::bail!("TenantTimelineId must contain only tenant_id and timeline_id"); } Ok(TenantTimelineId::new(tenant_id, timeline_id)) } } // Unique ID of a storage node (safekeeper or pageserver). Supposed to be issued // by the console. #[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd, Hash, Debug, Serialize, Deserialize)] #[serde(transparent)] pub struct NodeId(pub u64); impl fmt::Display for NodeId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } impl FromStr for NodeId { type Err = ParseIntError; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(NodeId(u64::from_str(s)?)) } } #[cfg(test)] mod tests { use serde_assert::{Deserializer, Serializer, Token, Tokens}; use super::*; use crate::bin_ser::BeSer; #[test] fn test_id_serde_non_human_readable() { let original_id = Id([ 173, 80, 132, 115, 129, 226, 72, 254, 170, 201, 135, 108, 199, 26, 228, 24, ]); let expected_tokens = Tokens(vec![ Token::Tuple { len: 16 }, Token::U8(173), Token::U8(80), Token::U8(132), Token::U8(115), Token::U8(129), Token::U8(226), Token::U8(72), Token::U8(254), Token::U8(170), Token::U8(201), Token::U8(135), Token::U8(108), Token::U8(199), Token::U8(26), Token::U8(228), Token::U8(24), Token::TupleEnd, ]); let serializer = Serializer::builder().is_human_readable(false).build(); let serialized_tokens = original_id.serialize(&serializer).unwrap(); assert_eq!(serialized_tokens, expected_tokens); let mut deserializer = Deserializer::builder() .is_human_readable(false) .tokens(serialized_tokens) .build(); let deserialized_id = Id::deserialize(&mut deserializer).unwrap(); assert_eq!(deserialized_id, original_id); } #[test] fn test_id_serde_human_readable() { let original_id = Id([ 173, 80, 132, 115, 129, 226, 72, 254, 170, 201, 135, 108, 199, 26, 228, 24, ]); let expected_tokens = Tokens(vec![Token::Str(String::from( "ad50847381e248feaac9876cc71ae418", ))]); let serializer = Serializer::builder().is_human_readable(true).build(); let serialized_tokens = original_id.serialize(&serializer).unwrap(); assert_eq!(serialized_tokens, expected_tokens); let mut deserializer = Deserializer::builder() .is_human_readable(true) .tokens(Tokens(vec![Token::Str(String::from( "ad50847381e248feaac9876cc71ae418", ))])) .build(); assert_eq!(Id::deserialize(&mut deserializer).unwrap(), original_id); } macro_rules! roundtrip_type { ($type:ty, $expected_bytes:expr) => {{ let expected_bytes: [u8; 16] = $expected_bytes; let original_id = <$type>::from(expected_bytes); let ser_bytes = original_id.ser().unwrap(); assert_eq!(ser_bytes, expected_bytes); let des_id = <$type>::des(&ser_bytes).unwrap(); assert_eq!(des_id, original_id); }}; } #[test] fn test_id_bincode_serde() { let expected_bytes = [ 173, 80, 132, 115, 129, 226, 72, 254, 170, 201, 135, 108, 199, 26, 228, 24, ]; roundtrip_type!(Id, expected_bytes); } #[test] fn test_tenant_id_bincode_serde() { let expected_bytes = [ 173, 80, 132, 115, 129, 226, 72, 254, 170, 201, 135, 108, 199, 26, 228, 24, ]; roundtrip_type!(TenantId, expected_bytes); } #[test] fn test_timeline_id_bincode_serde() { let expected_bytes = [ 173, 80, 132, 115, 129, 226, 72, 254, 170, 201, 135, 108, 199, 26, 228, 24, ]; roundtrip_type!(TimelineId, expected_bytes); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/completion.rs
libs/utils/src/completion.rs
use tokio_util::task::TaskTracker; use tokio_util::task::task_tracker::TaskTrackerToken; /// While a reference is kept around, the associated [`Barrier::wait`] will wait. /// /// Can be cloned, moved and kept around in futures as "guard objects". #[derive(Clone)] pub struct Completion { token: TaskTrackerToken, } impl std::fmt::Debug for Completion { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Completion") .field("siblings", &self.token.task_tracker().len()) .finish() } } impl Completion { /// Returns true if this completion is associated with the given barrier. pub fn blocks(&self, barrier: &Barrier) -> bool { TaskTracker::ptr_eq(self.token.task_tracker(), &barrier.0) } pub fn barrier(&self) -> Barrier { Barrier(self.token.task_tracker().clone()) } } /// Barrier will wait until all clones of [`Completion`] have been dropped. #[derive(Clone)] pub struct Barrier(TaskTracker); impl std::fmt::Debug for Barrier { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Barrier") .field("remaining", &self.0.len()) .finish() } } impl Default for Barrier { fn default() -> Self { let (_, rx) = channel(); rx } } impl Barrier { pub async fn wait(self) { self.0.wait().await; } pub async fn maybe_wait(barrier: Option<Barrier>) { if let Some(b) = barrier { b.wait().await } } /// Return true if a call to wait() would complete immediately pub fn is_ready(&self) -> bool { futures::future::FutureExt::now_or_never(self.0.wait()).is_some() } } impl PartialEq for Barrier { fn eq(&self, other: &Self) -> bool { TaskTracker::ptr_eq(&self.0, &other.0) } } impl Eq for Barrier {} /// Create new Guard and Barrier pair. pub fn channel() -> (Completion, Barrier) { let tracker = TaskTracker::new(); // otherwise wait never exits tracker.close(); let token = tracker.token(); (Completion { token }, Barrier(tracker)) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/sync.rs
libs/utils/src/sync.rs
pub mod heavier_once_cell; pub mod duplex; pub mod gate; pub mod spsc_fold;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/poison.rs
libs/utils/src/poison.rs
//! Protect a piece of state from reuse after it is left in an inconsistent state. //! //! # Example //! //! ``` //! # tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap().block_on(async { //! use utils::poison::Poison; //! use std::time::Duration; //! //! struct State { //! clean: bool, //! } //! let state = tokio::sync::Mutex::new(Poison::new("mystate", State { clean: true })); //! //! let mut mutex_guard = state.lock().await; //! let mut poison_guard = mutex_guard.check_and_arm()?; //! let state = poison_guard.data_mut(); //! state.clean = false; //! // If we get cancelled at this await point, subsequent check_and_arm() calls will fail. //! tokio::time::sleep(Duration::from_secs(10)).await; //! state.clean = true; //! poison_guard.disarm(); //! # Ok::<(), utils::poison::Error>(()) //! # }); //! ``` use tracing::warn; pub struct Poison<T> { what: &'static str, state: State, data: T, } #[derive(Clone, Copy)] enum State { Clean, Armed, Poisoned { at: chrono::DateTime<chrono::Utc> }, } impl<T> Poison<T> { /// We log `what` `warning!` level if the [`Guard`] gets dropped without being [`Guard::disarm`]ed. pub fn new(what: &'static str, data: T) -> Self { Self { what, state: State::Clean, data, } } /// Check for poisoning and return a [`Guard`] that provides access to the wrapped state. pub fn check_and_arm(&mut self) -> Result<Guard<T>, Error> { match self.state { State::Clean => { self.state = State::Armed; Ok(Guard(self)) } State::Armed => unreachable!("transient state"), State::Poisoned { at } => Err(Error::Poisoned { what: self.what, at, }), } } } /// Armed pointer to a [`Poison`]. /// /// Use [`Self::data`] and [`Self::data_mut`] to access the wrapped state. /// Once modifications are done, use [`Self::disarm`]. /// If [`Guard`] gets dropped instead of calling [`Self::disarm`], the state is poisoned /// and subsequent calls to [`Poison::check_and_arm`] will fail with an error. pub struct Guard<'a, T>(&'a mut Poison<T>); impl<T> Guard<'_, T> { pub fn data(&self) -> &T { &self.0.data } pub fn data_mut(&mut self) -> &mut T { &mut self.0.data } pub fn disarm(self) { match self.0.state { State::Clean => unreachable!("we set it to Armed in check_and_arm()"), State::Armed => { self.0.state = State::Clean; } State::Poisoned { at } => { unreachable!("we fail check_and_arm() if it's in that state: {at}") } } } } impl<T> Drop for Guard<'_, T> { fn drop(&mut self) { match self.0.state { State::Clean => { // set by disarm() } State::Armed => { // still armed => poison it let at = chrono::Utc::now(); self.0.state = State::Poisoned { at }; warn!(at=?at, "poisoning {}", self.0.what); } State::Poisoned { at } => { unreachable!("we fail check_and_arm() if it's in that state: {at}") } } } } #[derive(thiserror::Error, Debug)] pub enum Error { #[error("poisoned at {at}: {what}")] Poisoned { what: &'static str, at: chrono::DateTime<chrono::Utc>, }, }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/yielding_loop.rs
libs/utils/src/yielding_loop.rs
use tokio_util::sync::CancellationToken; #[derive(thiserror::Error, Debug)] pub enum YieldingLoopError { #[error("Cancelled")] Cancelled, } /// Helper for long synchronous loops, e.g. over all tenants in the system. /// /// Periodically yields to avoid blocking the executor, and after resuming /// checks the provided cancellation token to drop out promptly on shutdown. #[inline(always)] pub async fn yielding_loop<I, T, F>( interval: usize, cancel: &CancellationToken, iter: I, mut visitor: F, ) -> Result<(), YieldingLoopError> where I: Iterator<Item = T>, F: FnMut(T), { for (i, item) in iter.enumerate() { visitor(item); if (i + 1) % interval == 0 { tokio::task::yield_now().await; if cancel.is_cancelled() { return Err(YieldingLoopError::Cancelled); } } } Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/generation.rs
libs/utils/src/generation.rs
use std::fmt::Debug; use serde::{Deserialize, Serialize}; /// Tenant generations are used to provide split-brain safety and allow /// multiple pageservers to attach the same tenant concurrently. /// /// See docs/rfcs/025-generation-numbers.md for detail on how generation /// numbers are used. #[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] pub enum Generation { // The None Generation is used in the metadata of layers written before generations were // introduced. A running Tenant always has a valid generation, but the layer metadata may // include None generations. None, Valid(u32), } /// The Generation type represents a number associated with a Tenant, which /// increments every time the tenant is attached to a new pageserver, or /// an attached pageserver restarts. /// /// It is included as a suffix in S3 keys, as a protection against split-brain /// scenarios where pageservers might otherwise issue conflicting writes to /// remote storage impl Generation { pub const MAX: Self = Self::Valid(u32::MAX); /// Create a new Generation that represents a legacy key format with /// no generation suffix pub fn none() -> Self { Self::None } pub const fn new(v: u32) -> Self { Self::Valid(v) } pub fn is_none(&self) -> bool { matches!(self, Self::None) } #[track_caller] pub fn get_suffix(&self) -> impl std::fmt::Display { match self { Self::Valid(v) => GenerationFileSuffix(Some(*v)), Self::None => GenerationFileSuffix(None), } } /// `suffix` is the part after "-" in a key /// /// Returns None if parsing was unsuccessful pub fn parse_suffix(suffix: &str) -> Option<Generation> { u32::from_str_radix(suffix, 16).map(Generation::new).ok() } #[track_caller] pub fn previous(&self) -> Generation { match self { Self::Valid(n) => { if *n == 0 { // Since a tenant may be upgraded from a pre-generations state, interpret the "previous" generation // to 0 as being "no generation". Self::None } else { Self::Valid(n - 1) } } Self::None => Self::None, } } #[track_caller] pub fn next(&self) -> Generation { match self { Self::Valid(n) => Self::Valid(*n + 1), Self::None => Self::Valid(1), } } pub fn into(self) -> Option<u32> { if let Self::Valid(v) = self { Some(v) } else { None } } } struct GenerationFileSuffix(Option<u32>); impl std::fmt::Display for GenerationFileSuffix { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(g) = self.0 { write!(f, "-{g:08x}") } else { Ok(()) } } } impl Serialize for Generation { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { if let Self::Valid(v) = self { v.serialize(serializer) } else { // We should never be asked to serialize a None. Structures // that include an optional generation should convert None to an // Option<Generation>::None Err(serde::ser::Error::custom(format!( "Tried to serialize invalid generation ({self:?})" ))) } } } impl<'de> Deserialize<'de> for Generation { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { Ok(Self::Valid(u32::deserialize(deserializer)?)) } } // We intentionally do not implement Display for Generation, to reduce the // risk of a bug where the generation is used in a format!() string directly // instead of using get_suffix(). impl Debug for Generation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Valid(v) => { write!(f, "{v:08x}") } Self::None => { write!(f, "<none>") } } } } #[cfg(test)] mod test { use super::*; #[test] fn generation_gt() { // Important that a None generation compares less than a valid one, during upgrades from // pre-generation systems. assert!(Generation::none() < Generation::new(0)); assert!(Generation::none() < Generation::new(1)); } #[test] fn suffix_is_stable() { use std::fmt::Write as _; // the suffix must remain stable through-out the pageserver remote storage evolution and // not be changed accidentially without thinking about migration let examples = [ (line!(), Generation::None, ""), (line!(), Generation::Valid(0), "-00000000"), (line!(), Generation::Valid(u32::MAX), "-ffffffff"), ]; let mut s = String::new(); for (line, gen_, expected) in examples { s.clear(); write!(s, "{}", &gen_.get_suffix()).expect("string grows"); assert_eq!(s, expected, "example on {line}"); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/shard.rs
libs/utils/src/shard.rs
//! See `pageserver_api::shard` for description on sharding. use std::ops::RangeInclusive; use std::str::FromStr; use hex::FromHex; use serde::{Deserialize, Serialize}; use crate::id::TenantId; #[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)] pub struct ShardNumber(pub u8); #[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Debug, Hash)] pub struct ShardCount(pub u8); /// Combination of ShardNumber and ShardCount. /// /// For use within the context of a particular tenant, when we need to know which shard we're /// dealing with, but do not need to know the full ShardIdentity (because we won't be doing /// any page->shard mapping), and do not need to know the fully qualified TenantShardId. #[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)] pub struct ShardIndex { pub shard_number: ShardNumber, pub shard_count: ShardCount, } /// Stripe size as number of pages. /// /// NB: don't implement Default, so callers don't lazily use it by mistake. See DEFAULT_STRIPE_SIZE. #[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)] pub struct ShardStripeSize(pub u32); /// Formatting helper, for generating the `shard_id` label in traces. pub struct ShardSlug<'a>(&'a TenantShardId); /// TenantShardId globally identifies a particular shard in a particular tenant. /// /// These are written as `<TenantId>-<ShardSlug>`, for example: /// # The second shard in a two-shard tenant /// 072f1291a5310026820b2fe4b2968934-0102 /// /// If the `ShardCount` is _unsharded_, the `TenantShardId` is written without /// a shard suffix and is equivalent to the encoding of a `TenantId`: this enables /// an unsharded [`TenantShardId`] to be used interchangably with a [`TenantId`]. /// /// The human-readable encoding of an unsharded TenantShardId, such as used in API URLs, /// is both forward and backward compatible with TenantId: a legacy TenantId can be /// decoded as a TenantShardId, and when re-encoded it will be parseable /// as a TenantId. #[derive(Eq, PartialEq, PartialOrd, Ord, Clone, Copy, Hash)] pub struct TenantShardId { pub tenant_id: TenantId, pub shard_number: ShardNumber, pub shard_count: ShardCount, } impl ShardCount { pub const MAX: Self = Self(u8::MAX); pub const MIN: Self = Self(0); pub fn unsharded() -> Self { ShardCount(0) } /// The internal value of a ShardCount may be zero, which means "1 shard, but use /// legacy format for TenantShardId that excludes the shard suffix", also known /// as [`TenantShardId::unsharded`]. /// /// This method returns the actual number of shards, i.e. if our internal value is /// zero, we return 1 (unsharded tenants have 1 shard). pub fn count(&self) -> u8 { if self.0 > 0 { self.0 } else { 1 } } /// The literal internal value: this is **not** the number of shards in the /// tenant, as we have a special zero value for legacy unsharded tenants. Use /// [`Self::count`] if you want to know the cardinality of shards. pub fn literal(&self) -> u8 { self.0 } /// Whether the `ShardCount` is for an unsharded tenant, so uses one shard but /// uses the legacy format for `TenantShardId`. See also the documentation for /// [`Self::count`]. pub fn is_unsharded(&self) -> bool { self.0 == 0 } /// `v` may be zero, or the number of shards in the tenant. `v` is what /// [`Self::literal`] would return. pub const fn new(val: u8) -> Self { Self(val) } } impl ShardNumber { pub const MAX: Self = Self(u8::MAX); } impl TenantShardId { pub fn unsharded(tenant_id: TenantId) -> Self { Self { tenant_id, shard_number: ShardNumber(0), shard_count: ShardCount(0), } } /// The range of all TenantShardId that belong to a particular TenantId. This is useful when /// you have a BTreeMap of TenantShardId, and are querying by TenantId. pub fn tenant_range(tenant_id: TenantId) -> RangeInclusive<Self> { RangeInclusive::new( Self { tenant_id, shard_number: ShardNumber(0), shard_count: ShardCount(0), }, Self { tenant_id, shard_number: ShardNumber::MAX, shard_count: ShardCount::MAX, }, ) } pub fn range(&self) -> RangeInclusive<Self> { RangeInclusive::new(*self, *self) } pub fn shard_slug(&self) -> impl std::fmt::Display + '_ { ShardSlug(self) } /// Convenience for code that has special behavior on the 0th shard. pub fn is_shard_zero(&self) -> bool { self.shard_number == ShardNumber(0) } /// The "unsharded" value is distinct from simply having a single shard: it represents /// a tenant which is not shard-aware at all, and whose storage paths will not include /// a shard suffix. pub fn is_unsharded(&self) -> bool { self.shard_number == ShardNumber(0) && self.shard_count.is_unsharded() } /// Convenience for dropping the tenant_id and just getting the ShardIndex: this /// is useful when logging from code that is already in a span that includes tenant ID, to /// keep messages reasonably terse. pub fn to_index(&self) -> ShardIndex { ShardIndex { shard_number: self.shard_number, shard_count: self.shard_count, } } /// Calculate the children of this TenantShardId when splitting the overall tenant into /// the given number of shards. pub fn split(&self, new_shard_count: ShardCount) -> Vec<TenantShardId> { let effective_old_shard_count = std::cmp::max(self.shard_count.0, 1); let mut child_shards = Vec::new(); for shard_number in 0..ShardNumber(new_shard_count.0).0 { // Key mapping is based on a round robin mapping of key hash modulo shard count, // so our child shards are the ones which the same keys would map to. if shard_number % effective_old_shard_count == self.shard_number.0 { child_shards.push(TenantShardId { tenant_id: self.tenant_id, shard_number: ShardNumber(shard_number), shard_count: new_shard_count, }) } } child_shards } } impl std::fmt::Display for ShardNumber { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } impl std::fmt::Display for ShardCount { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } impl std::fmt::Display for ShardStripeSize { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } impl std::fmt::Display for ShardSlug<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "{:02x}{:02x}", self.0.shard_number.0, self.0.shard_count.0 ) } } impl std::fmt::Display for TenantShardId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.shard_count != ShardCount(0) { write!(f, "{}-{}", self.tenant_id, self.shard_slug()) } else { // Legacy case (shard_count == 0) -- format as just the tenant id. Note that this // is distinct from the normal single shard case (shard count == 1). self.tenant_id.fmt(f) } } } impl std::fmt::Debug for TenantShardId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation write!(f, "{self}") } } impl std::str::FromStr for TenantShardId { type Err = hex::FromHexError; fn from_str(s: &str) -> Result<Self, Self::Err> { // Expect format: 16 byte TenantId, '-', 1 byte shard number, 1 byte shard count if s.len() == 32 { // Legacy case: no shard specified Ok(Self { tenant_id: TenantId::from_str(s)?, shard_number: ShardNumber(0), shard_count: ShardCount(0), }) } else if s.len() == 37 { let bytes = s.as_bytes(); let tenant_id = TenantId::from_hex(&bytes[0..32])?; let mut shard_parts: [u8; 2] = [0u8; 2]; hex::decode_to_slice(&bytes[33..37], &mut shard_parts)?; Ok(Self { tenant_id, shard_number: ShardNumber(shard_parts[0]), shard_count: ShardCount(shard_parts[1]), }) } else { Err(hex::FromHexError::InvalidStringLength) } } } impl From<[u8; 18]> for TenantShardId { fn from(b: [u8; 18]) -> Self { let tenant_id_bytes: [u8; 16] = b[0..16].try_into().unwrap(); Self { tenant_id: TenantId::from(tenant_id_bytes), shard_number: ShardNumber(b[16]), shard_count: ShardCount(b[17]), } } } impl ShardIndex { pub fn new(number: ShardNumber, count: ShardCount) -> Self { Self { shard_number: number, shard_count: count, } } pub fn unsharded() -> Self { Self { shard_number: ShardNumber(0), shard_count: ShardCount(0), } } /// The "unsharded" value is distinct from simply having a single shard: it represents /// a tenant which is not shard-aware at all, and whose storage paths will not include /// a shard suffix. pub fn is_unsharded(&self) -> bool { self.shard_number == ShardNumber(0) && self.shard_count == ShardCount(0) } /// For use in constructing remote storage paths: concatenate this with a TenantId /// to get a fully qualified TenantShardId. /// /// Backward compat: this function returns an empty string if Self::is_unsharded, such /// that the legacy pre-sharding remote key format is preserved. pub fn get_suffix(&self) -> String { if self.is_unsharded() { "".to_string() } else { format!("-{:02x}{:02x}", self.shard_number.0, self.shard_count.0) } } } impl std::fmt::Display for ShardIndex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:02x}{:02x}", self.shard_number.0, self.shard_count.0) } } impl std::fmt::Debug for ShardIndex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation write!(f, "{self}") } } impl std::str::FromStr for ShardIndex { type Err = hex::FromHexError; fn from_str(s: &str) -> Result<Self, Self::Err> { // Expect format: 1 byte shard number, 1 byte shard count if s.len() == 4 { let bytes = s.as_bytes(); let mut shard_parts: [u8; 2] = [0u8; 2]; hex::decode_to_slice(bytes, &mut shard_parts)?; Ok(Self { shard_number: ShardNumber(shard_parts[0]), shard_count: ShardCount(shard_parts[1]), }) } else { Err(hex::FromHexError::InvalidStringLength) } } } impl From<[u8; 2]> for ShardIndex { fn from(b: [u8; 2]) -> Self { Self { shard_number: ShardNumber(b[0]), shard_count: ShardCount(b[1]), } } } impl Serialize for TenantShardId { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { if serializer.is_human_readable() { serializer.collect_str(self) } else { // Note: while human encoding of [`TenantShardId`] is backward and forward // compatible, this binary encoding is not. let mut packed: [u8; 18] = [0; 18]; packed[0..16].clone_from_slice(&self.tenant_id.as_arr()); packed[16] = self.shard_number.0; packed[17] = self.shard_count.0; packed.serialize(serializer) } } } impl<'de> Deserialize<'de> for TenantShardId { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { struct IdVisitor { is_human_readable_deserializer: bool, } impl<'de> serde::de::Visitor<'de> for IdVisitor { type Value = TenantShardId; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { if self.is_human_readable_deserializer { formatter.write_str("value in form of hex string") } else { formatter.write_str("value in form of integer array([u8; 18])") } } fn visit_seq<A>(self, seq: A) -> Result<Self::Value, A::Error> where A: serde::de::SeqAccess<'de>, { let s = serde::de::value::SeqAccessDeserializer::new(seq); let id: [u8; 18] = Deserialize::deserialize(s)?; Ok(TenantShardId::from(id)) } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error, { TenantShardId::from_str(v).map_err(E::custom) } } if deserializer.is_human_readable() { deserializer.deserialize_str(IdVisitor { is_human_readable_deserializer: true, }) } else { deserializer.deserialize_tuple( 18, IdVisitor { is_human_readable_deserializer: false, }, ) } } } impl Serialize for ShardIndex { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { if serializer.is_human_readable() { serializer.collect_str(self) } else { // Binary encoding is not used in index_part.json, but is included in anticipation of // switching various structures (e.g. inter-process communication, remote metadata) to more // compact binary encodings in future. let mut packed: [u8; 2] = [0; 2]; packed[0] = self.shard_number.0; packed[1] = self.shard_count.0; packed.serialize(serializer) } } } impl<'de> Deserialize<'de> for ShardIndex { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { struct IdVisitor { is_human_readable_deserializer: bool, } impl<'de> serde::de::Visitor<'de> for IdVisitor { type Value = ShardIndex; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { if self.is_human_readable_deserializer { formatter.write_str("value in form of hex string") } else { formatter.write_str("value in form of integer array([u8; 2])") } } fn visit_seq<A>(self, seq: A) -> Result<Self::Value, A::Error> where A: serde::de::SeqAccess<'de>, { let s = serde::de::value::SeqAccessDeserializer::new(seq); let id: [u8; 2] = Deserialize::deserialize(s)?; Ok(ShardIndex::from(id)) } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error, { ShardIndex::from_str(v).map_err(E::custom) } } if deserializer.is_human_readable() { deserializer.deserialize_str(IdVisitor { is_human_readable_deserializer: true, }) } else { deserializer.deserialize_tuple( 2, IdVisitor { is_human_readable_deserializer: false, }, ) } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/serde_system_time.rs
libs/utils/src/serde_system_time.rs
//! A `serde::{Deserialize,Serialize}` type for SystemTime with RFC3339 format and millisecond precision. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)] #[serde(transparent)] pub struct SystemTime( #[serde( deserialize_with = "deser_rfc3339_millis", serialize_with = "ser_rfc3339_millis" )] pub std::time::SystemTime, ); fn ser_rfc3339_millis<S: serde::ser::Serializer>( ts: &std::time::SystemTime, serializer: S, ) -> Result<S::Ok, S::Error> { serializer.collect_str(&humantime::format_rfc3339_millis(*ts)) } fn deser_rfc3339_millis<'de, D>(deserializer: D) -> Result<std::time::SystemTime, D::Error> where D: serde::de::Deserializer<'de>, { let s: String = serde::de::Deserialize::deserialize(deserializer)?; humantime::parse_rfc3339(&s).map_err(serde::de::Error::custom) } #[cfg(test)] mod tests { use super::*; /// Helper function to make a SystemTime have millisecond precision by truncating additional nanoseconds. fn to_millisecond_precision(time: SystemTime) -> SystemTime { match time.0.duration_since(std::time::SystemTime::UNIX_EPOCH) { Ok(duration) => { let total_millis = duration.as_secs() * 1_000 + u64::from(duration.subsec_millis()); SystemTime( std::time::SystemTime::UNIX_EPOCH + std::time::Duration::from_millis(total_millis), ) } Err(_) => time, } } #[test] fn test_serialize_deserialize() { let input = SystemTime(std::time::SystemTime::now()); let expected_serialized = format!("\"{}\"", humantime::format_rfc3339_millis(input.0)); let serialized = serde_json::to_string(&input).unwrap(); assert_eq!(expected_serialized, serialized); let deserialized: SystemTime = serde_json::from_str(&expected_serialized).unwrap(); assert_eq!(to_millisecond_precision(input), deserialized); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/simple_rcu.rs
libs/utils/src/simple_rcu.rs
//! //! RCU stands for Read-Copy-Update. It's a synchronization mechanism somewhat //! similar to a lock, but it allows readers to "hold on" to an old value of RCU //! without blocking writers, and allows writing a new value without blocking //! readers. When you update the value, the new value is immediately visible //! to new readers, but the update waits until all existing readers have //! finished, so that on return, no one sees the old value anymore. //! //! This implementation isn't wait-free; it uses an RwLock that is held for a //! short duration when the value is read or updated. //! //! # Examples //! //! Read a value and do things with it while holding the guard: //! //! ``` //! # let rcu = utils::simple_rcu::Rcu::new(1); //! { //! let read = rcu.read(); //! println!("the current value is {}", *read); //! // exiting the scope drops the read-guard, and allows concurrent writers //! // to finish. //! } //! ``` //! //! Increment the value by one, and wait for old readers to finish: //! //! ``` //! # async fn dox() { //! # let rcu = utils::simple_rcu::Rcu::new(1); //! let write_guard = rcu.lock_for_write(); //! //! // NB: holding `write_guard` blocks new readers and writers. Keep this section short! //! let new_value = *write_guard + 1; //! //! let waitlist = write_guard.store_and_unlock(new_value); // consumes `write_guard` //! //! // Concurrent reads and writes are now possible again. Wait for all the readers //! // that still observe the old value to finish. //! waitlist.wait().await; //! # } //! ``` //! #![warn(missing_docs)] use std::ops::Deref; use std::sync::{Arc, RwLock, RwLockWriteGuard, Weak}; use tokio::sync::watch; /// Rcu allows multiple readers to read and hold onto a value without blocking /// (for very long). /// /// Storing to the Rcu updates the value, making new readers immediately see /// the new value, but it also waits for all current readers to finish. pub struct Rcu<V> { inner: RwLock<RcuInner<V>>, } struct RcuInner<V> { current_cell: Arc<RcuCell<V>>, old_cells: Vec<Weak<RcuCell<V>>>, } /// /// RcuCell holds one value. It can be the latest one, or an old one. /// struct RcuCell<V> { value: V, /// A dummy channel. We never send anything to this channel. The point is /// that when the RcuCell is dropped, any subscribed Receivers will be notified /// that the channel is closed. Updaters can use this to wait out until the /// RcuCell has been dropped, i.e. until the old value is no longer in use. /// /// We never send anything to this, we just need to hold onto it so that the /// Receivers will be notified when it's dropped. watch: watch::Sender<()>, } impl<V> RcuCell<V> { fn new(value: V) -> Self { let (watch_sender, _) = watch::channel(()); RcuCell { value, watch: watch_sender, } } } impl<V> Rcu<V> { /// Create a new `Rcu`, initialized to `starting_val` pub fn new(starting_val: V) -> Self { let inner = RcuInner { current_cell: Arc::new(RcuCell::new(starting_val)), old_cells: Vec::new(), }; Self { inner: RwLock::new(inner), } } /// /// Read current value. Any store() calls will block until the returned /// guard object is dropped. /// pub fn read(&self) -> RcuReadGuard<V> { let current_cell = Arc::clone(&self.inner.read().unwrap().current_cell); RcuReadGuard { cell: current_cell } } /// /// Lock the current value for updating. Returns a guard object that can be /// used to read the current value, and to store a new value. /// /// Note: holding the write-guard blocks concurrent readers, so you should /// finish the update and drop the guard quickly! Multiple writers can be /// waiting on the RcuWriteGuard::store step at the same time, however. /// pub fn lock_for_write(&self) -> RcuWriteGuard<'_, V> { let inner = self.inner.write().unwrap(); RcuWriteGuard { inner } } } /// /// Read guard returned by `read` /// pub struct RcuReadGuard<V> { cell: Arc<RcuCell<V>>, } impl<V> Deref for RcuReadGuard<V> { type Target = V; fn deref(&self) -> &V { &self.cell.value } } /// /// Write guard returned by `write` /// /// NB: Holding this guard blocks all concurrent `read` and `write` calls, so it should only be /// held for a short duration! /// /// Calling [`Self::store_and_unlock`] consumes the guard, making new reads and new writes possible /// again. /// pub struct RcuWriteGuard<'a, V> { inner: RwLockWriteGuard<'a, RcuInner<V>>, } impl<V> Deref for RcuWriteGuard<'_, V> { type Target = V; fn deref(&self) -> &V { &self.inner.current_cell.value } } impl<V> RcuWriteGuard<'_, V> { /// /// Store a new value. The new value will be written to the Rcu immediately, /// and will be immediately seen by any `read` calls that start afterwards. /// /// Returns a list of readers that can see old values. You can call `wait()` /// on it to wait for them to finish. /// pub fn store_and_unlock(mut self, new_val: V) -> RcuWaitList { let new_cell = Arc::new(RcuCell::new(new_val)); let mut watches = Vec::new(); { let old = std::mem::replace(&mut self.inner.current_cell, new_cell); self.inner.old_cells.push(Arc::downgrade(&old)); // cleanup old cells that no longer have any readers, and collect // the watches for any that do. self.inner.old_cells.retain(|weak| { if let Some(cell) = weak.upgrade() { watches.push(cell.watch.subscribe()); true } else { false } }); } RcuWaitList(watches) } } /// /// List of readers who can still see old values. /// pub struct RcuWaitList(Vec<watch::Receiver<()>>); impl RcuWaitList { /// /// Wait for old readers to finish. /// pub async fn wait(mut self) { // after all the old_cells are no longer in use, we're done for w in self.0.iter_mut() { // This will block until the Receiver is closed. That happens when // the RcuCell is dropped. #[allow(clippy::single_match)] match w.changed().await { Ok(_) => panic!("changed() unexpectedly succeeded on dummy channel"), Err(_) => { // closed, which means that the cell has been dropped, and // its value is no longer in use } } } } } #[cfg(test)] mod tests { use std::sync::Mutex; use std::time::Duration; use super::*; #[tokio::test] async fn two_writers() { let rcu = Rcu::new(1); let read1 = rcu.read(); assert_eq!(*read1, 1); let write2 = rcu.lock_for_write(); assert_eq!(*write2, 1); let wait2 = write2.store_and_unlock(2); let read2 = rcu.read(); assert_eq!(*read2, 2); let write3 = rcu.lock_for_write(); assert_eq!(*write3, 2); let wait3 = write3.store_and_unlock(3); // new reader can see the new value, and old readers continue to see the old values. let read3 = rcu.read(); assert_eq!(*read3, 3); assert_eq!(*read2, 2); assert_eq!(*read1, 1); let log = Arc::new(Mutex::new(Vec::new())); // Wait for the old readers to finish in separate tasks. let log_clone = Arc::clone(&log); let task2 = tokio::spawn(async move { wait2.wait().await; log_clone.lock().unwrap().push("wait2 done"); }); let log_clone = Arc::clone(&log); let task3 = tokio::spawn(async move { wait3.wait().await; log_clone.lock().unwrap().push("wait3 done"); }); // without this sleep the test can pass on accident if the writer is slow tokio::time::sleep(Duration::from_millis(100)).await; // Release first reader. This allows first write to finish, but calling // wait() on the 'task3' would still block. log.lock().unwrap().push("dropping read1"); drop(read1); task2.await.unwrap(); assert!(!task3.is_finished()); tokio::time::sleep(Duration::from_millis(100)).await; // Release second reader, and finish second writer. log.lock().unwrap().push("dropping read2"); drop(read2); task3.await.unwrap(); assert_eq!( log.lock().unwrap().as_slice(), &[ "dropping read1", "wait2 done", "dropping read2", "wait3 done" ] ); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/crashsafe.rs
libs/utils/src/crashsafe.rs
use std::borrow::Cow; use std::fs::{self, File}; use std::io::{self, Write}; use std::os::fd::AsFd; use camino::{Utf8Path, Utf8PathBuf}; /// Similar to [`std::fs::create_dir`], except we fsync the /// created directory and its parent. pub fn create_dir(path: impl AsRef<Utf8Path>) -> io::Result<()> { let path = path.as_ref(); fs::create_dir(path)?; fsync_file_and_parent(path)?; Ok(()) } /// Similar to [`std::fs::create_dir_all`], except we fsync all /// newly created directories and the pre-existing parent. pub fn create_dir_all(path: impl AsRef<Utf8Path>) -> io::Result<()> { let mut path = path.as_ref(); let mut dirs_to_create = Vec::new(); // Figure out which directories we need to create. loop { match path.metadata() { Ok(metadata) if metadata.is_dir() => break, Ok(_) => { return Err(io::Error::new( io::ErrorKind::AlreadyExists, format!("non-directory found in path: {path}"), )); } Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} Err(e) => return Err(e), } dirs_to_create.push(path); match path.parent() { Some(parent) => path = parent, None => { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("can't find parent of path '{path}'"), )); } } } // Create directories from parent to child. for &path in dirs_to_create.iter().rev() { fs::create_dir(path)?; } // Fsync the created directories from child to parent. for &path in dirs_to_create.iter() { fsync(path)?; } // If we created any new directories, fsync the parent. if !dirs_to_create.is_empty() { fsync(path)?; } Ok(()) } /// Adds a suffix to the file(directory) name, either appending the suffix to the end of its extension, /// or if there's no extension, creates one and puts a suffix there. pub fn path_with_suffix_extension( original_path: impl AsRef<Utf8Path>, suffix: &str, ) -> Utf8PathBuf { let new_extension = match original_path.as_ref().extension() { Some(extension) => Cow::Owned(format!("{extension}.{suffix}")), None => Cow::Borrowed(suffix), }; original_path.as_ref().with_extension(new_extension) } pub fn fsync_file_and_parent(file_path: &Utf8Path) -> io::Result<()> { let parent = file_path .parent() .ok_or_else(|| io::Error::other(format!("File {file_path:?} has no parent")))?; fsync(file_path)?; fsync(parent)?; Ok(()) } pub fn fsync(path: &Utf8Path) -> io::Result<()> { File::open(path) .map_err(|e| io::Error::new(e.kind(), format!("Failed to open the file {path:?}: {e}"))) .and_then(|file| { file.sync_all().map_err(|e| { io::Error::new( e.kind(), format!("Failed to sync file {path:?} data and metadata: {e}"), ) }) }) .map_err(|e| io::Error::new(e.kind(), format!("Failed to fsync file {path:?}: {e}"))) } pub async fn fsync_async(path: impl AsRef<Utf8Path>) -> Result<(), std::io::Error> { tokio::fs::File::open(path.as_ref()).await?.sync_all().await } pub async fn fsync_async_opt( path: impl AsRef<Utf8Path>, do_fsync: bool, ) -> Result<(), std::io::Error> { if do_fsync { fsync_async(path.as_ref()).await?; } Ok(()) } /// Like postgres' durable_rename, renames a file and issues fsyncs to make it durable. After /// returning, both the file and rename are guaranteed to be persisted. Both paths must be on the /// same file system. /// /// Unlike postgres, it only fsyncs 1) the file to make contents durable, and 2) the directory to /// make the rename durable. This sequence ensures the target file will never be incomplete. /// /// Postgres also: /// /// * Fsyncs the target file, if it exists, before the rename, to ensure either the new or existing /// file survives a crash. Current callers don't need this as it should already be fsynced if /// durability is needed. /// /// * Fsyncs the file after the rename. This can be required with certain OSes or file systems (e.g. /// NFS), but not on Linux with most common file systems like ext4 (which we currently use). /// /// An audit of 8 other databases found that none fsynced the file after a rename: /// <https://github.com/neondatabase/neon/pull/9686#discussion_r1837180535> /// /// eBPF probes confirmed that this is sufficient with ext4, XFS, and ZFS, but possibly not Btrfs: /// <https://github.com/neondatabase/neon/pull/9686#discussion_r1837926218> /// /// virtual_file.rs has similar code, but it doesn't use vfs. /// /// Useful links: <https://lwn.net/Articles/457667/> /// <https://www.postgresql.org/message-id/flat/56583BDD.9060302%402ndquadrant.com> /// <https://thunk.org/tytso/blog/2009/03/15/dont-fear-the-fsync/> pub async fn durable_rename( old_path: impl AsRef<Utf8Path>, new_path: impl AsRef<Utf8Path>, do_fsync: bool, ) -> io::Result<()> { // first fsync the file fsync_async_opt(old_path.as_ref(), do_fsync).await?; // Time to do the real deal. tokio::fs::rename(old_path.as_ref(), new_path.as_ref()).await?; // Now fsync the parent let parent = match new_path.as_ref().parent() { Some(p) => p, None => Utf8Path::new("./"), // assume current dir if there is no parent }; fsync_async_opt(parent, do_fsync).await?; Ok(()) } /// Writes a file to the specified `final_path` in a crash safe fasion, using [`std::fs`]. /// /// The file is first written to the specified `tmp_path`, and in a second /// step, the `tmp_path` is renamed to the `final_path`. Intermediary fsync /// and atomic rename guarantee that, if we crash at any point, there will never /// be a partially written file at `final_path` (but maybe at `tmp_path`). /// /// Callers are responsible for serializing calls of this function for a given `final_path`. /// If they don't, there may be an error due to conflicting `tmp_path`, or there will /// be no error and the content of `final_path` will be the "winner" caller's `content`. /// I.e., the atomticity guarantees still hold. pub fn overwrite( final_path: &Utf8Path, tmp_path: &Utf8Path, content: &[u8], ) -> std::io::Result<()> { let Some(final_path_parent) = final_path.parent() else { return Err(std::io::Error::from_raw_os_error( nix::errno::Errno::EINVAL as i32, )); }; std::fs::remove_file(tmp_path).or_else(crate::fs_ext::ignore_not_found)?; let mut file = std::fs::OpenOptions::new() .write(true) // Use `create_new` so that, if we race with ourselves or something else, // we bail out instead of causing damage. .create_new(true) .open(tmp_path)?; file.write_all(content)?; file.sync_all()?; drop(file); // don't keep the fd open for longer than we have to std::fs::rename(tmp_path, final_path)?; let final_parent_dirfd = std::fs::OpenOptions::new() .read(true) .open(final_path_parent)?; final_parent_dirfd.sync_all()?; Ok(()) } /// Syncs the filesystem for the given file descriptor. #[cfg_attr(target_os = "macos", allow(unused_variables))] pub fn syncfs(fd: impl AsFd) -> anyhow::Result<()> { // Linux guarantees durability for syncfs. // POSIX doesn't have syncfs, and further does not actually guarantee durability of sync(). #[cfg(target_os = "linux")] { use anyhow::Context; nix::unistd::syncfs(fd).context("syncfs")?; } #[cfg(target_os = "macos")] { // macOS is not a production platform for Neon, don't even bother. } #[cfg(not(any(target_os = "linux", target_os = "macos")))] { compile_error!("Unsupported OS"); } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn test_create_dir_fsyncd() { let dir = camino_tempfile::tempdir().unwrap(); let existing_dir_path = dir.path(); let err = create_dir(existing_dir_path).unwrap_err(); assert_eq!(err.kind(), io::ErrorKind::AlreadyExists); let child_dir = existing_dir_path.join("child"); create_dir(child_dir).unwrap(); let nested_child_dir = existing_dir_path.join("child1").join("child2"); let err = create_dir(nested_child_dir).unwrap_err(); assert_eq!(err.kind(), io::ErrorKind::NotFound); } #[test] fn test_create_dir_all_fsyncd() { let dir = camino_tempfile::tempdir().unwrap(); let existing_dir_path = dir.path(); create_dir_all(existing_dir_path).unwrap(); let child_dir = existing_dir_path.join("child"); assert!(!child_dir.exists()); create_dir_all(&child_dir).unwrap(); assert!(child_dir.exists()); let nested_child_dir = existing_dir_path.join("child1").join("child2"); assert!(!nested_child_dir.exists()); create_dir_all(&nested_child_dir).unwrap(); assert!(nested_child_dir.exists()); let file_path = existing_dir_path.join("file"); std::fs::write(&file_path, b"").unwrap(); let err = create_dir_all(&file_path).unwrap_err(); assert_eq!(err.kind(), io::ErrorKind::AlreadyExists); let invalid_dir_path = file_path.join("folder"); create_dir_all(invalid_dir_path).unwrap_err(); } #[test] fn test_path_with_suffix_extension() { let p = Utf8PathBuf::from("/foo/bar"); assert_eq!( &path_with_suffix_extension(p, "temp").to_string(), "/foo/bar.temp" ); let p = Utf8PathBuf::from("/foo/bar"); assert_eq!( &path_with_suffix_extension(p, "temp.temp").to_string(), "/foo/bar.temp.temp" ); let p = Utf8PathBuf::from("/foo/bar.baz"); assert_eq!( &path_with_suffix_extension(p, "temp.temp").to_string(), "/foo/bar.baz.temp.temp" ); let p = Utf8PathBuf::from("/foo/bar.baz"); assert_eq!( &path_with_suffix_extension(p, ".temp").to_string(), "/foo/bar.baz..temp" ); let p = Utf8PathBuf::from("/foo/bar/dir/"); assert_eq!( &path_with_suffix_extension(p, ".temp").to_string(), "/foo/bar/dir..temp" ); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/hex.rs
libs/utils/src/hex.rs
/// Useful type for asserting that expected bytes match reporting the bytes more readable /// array-syntax compatible hex bytes. /// /// # Usage /// /// ``` /// use utils::Hex; /// /// let actual = serialize_something(); /// let expected = [0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64]; /// /// // the type implements PartialEq and on mismatch, both sides are printed in 16 wide multiline /// // output suffixed with an array style length for easier comparisons. /// assert_eq!(Hex(&actual), Hex(&expected)); /// /// // with `let expected = [0x68];` the error would had been: /// // assertion `left == right` failed /// // left: [0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64; 11] /// // right: [0x68; 1] /// # fn serialize_something() -> Vec<u8> { "hello world".as_bytes().to_vec() } /// ``` pub struct Hex<S>(pub S); impl<S: AsRef<[u8]>> std::fmt::Debug for Hex<S> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "[")?; let chunks = self.0.as_ref().chunks(16); for (i, c) in chunks.enumerate() { if i > 0 && !c.is_empty() { writeln!(f, ", ")?; } for (j, b) in c.iter().enumerate() { if j > 0 { write!(f, ", ")?; } write!(f, "0x{b:02x}")?; } } write!(f, "; {}]", self.0.as_ref().len()) } } impl<R: AsRef<[u8]>, L: AsRef<[u8]>> PartialEq<Hex<R>> for Hex<L> { fn eq(&self, other: &Hex<R>) -> bool { let left = self.0.as_ref(); let right = other.0.as_ref(); left == right } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/tcp_listener.rs
libs/utils/src/tcp_listener.rs
use std::io; use std::net::{TcpListener, ToSocketAddrs}; use nix::sys::socket::setsockopt; use nix::sys::socket::sockopt::ReuseAddr; /// Bind a [`TcpListener`] to addr with `SO_REUSEADDR` set to true. pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> { let listener = TcpListener::bind(addr)?; setsockopt(&listener, ReuseAddr, &true)?; Ok(listener) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/env.rs
libs/utils/src/env.rs
//! Wrapper around `std::env::var` for parsing environment variables. use std::fmt::Display; use std::str::FromStr; /// For types `V` that implement [`FromStr`]. pub fn var<V, E>(varname: &str) -> Option<V> where V: FromStr<Err = E>, E: Display, { match std::env::var(varname) { Ok(s) => Some( s.parse() .map_err(|e| { format!("failed to parse env var {varname} using FromStr::parse: {e:#}") }) .unwrap(), ), Err(std::env::VarError::NotPresent) => None, Err(std::env::VarError::NotUnicode(_)) => { panic!("env var {varname} is not unicode") } } } /// For types `V` that implement [`serde::de::DeserializeOwned`]. pub fn var_serde_json_string<V>(varname: &str) -> Option<V> where V: serde::de::DeserializeOwned, { match std::env::var(varname) { Ok(s) => Some({ let value = serde_json::Value::String(s); serde_json::from_value(value) .map_err(|e| { format!("failed to parse env var {varname} as a serde_json json string: {e:#}") }) .unwrap() }), Err(std::env::VarError::NotPresent) => None, Err(std::env::VarError::NotUnicode(_)) => { panic!("env var {varname} is not unicode") } } } /* BEGIN_HADRON */ pub enum DeploymentMode { Local, Dev, Staging, Prod, } pub fn get_deployment_mode() -> Option<DeploymentMode> { match std::env::var("DEPLOYMENT_MODE") { Ok(env) => match env.as_str() { "development" => Some(DeploymentMode::Dev), "staging" => Some(DeploymentMode::Staging), "production" => Some(DeploymentMode::Prod), _ => { tracing::error!("Unexpected DEPLOYMENT_MODE: {}", env); None } }, Err(_) => { // tracing::error!("DEPLOYMENT_MODE not set"); None } } } pub fn is_dev_or_staging() -> bool { matches!( get_deployment_mode(), Some(DeploymentMode::Dev) | Some(DeploymentMode::Staging) ) } pub enum TestingMode { Chaos, Stress, } pub fn get_test_mode() -> Option<TestingMode> { match std::env::var("HADRON_TEST_MODE") { Ok(env) => match env.as_str() { "chaos" => Some(TestingMode::Chaos), "stress" => Some(TestingMode::Stress), _ => { tracing::error!("Unexpected HADRON_TEST_MODE: {}", env); None } }, Err(_) => { tracing::error!("HADRON_TEST_MODE not set"); None } } } pub fn is_chaos_testing() -> bool { matches!(get_test_mode(), Some(TestingMode::Chaos)) } /* END_HADRON */
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/zstd.rs
libs/utils/src/zstd.rs
use std::io::SeekFrom; use anyhow::{Context, Result}; use async_compression::Level; use async_compression::tokio::bufread::ZstdDecoder; use async_compression::tokio::write::ZstdEncoder; use async_compression::zstd::CParameter; use camino::Utf8Path; use nix::NixPath; use tokio::fs::{File, OpenOptions}; use tokio::io::{AsyncBufRead, AsyncSeekExt, AsyncWriteExt}; use tokio_tar::{Archive, Builder, HeaderMode}; use walkdir::WalkDir; /// Creates a Zstandard tarball. pub async fn create_zst_tarball(path: &Utf8Path, tarball: &Utf8Path) -> Result<(File, u64)> { let file = OpenOptions::new() .create(true) .truncate(true) .read(true) .write(true) .open(&tarball) .await .with_context(|| format!("tempfile creation {tarball}"))?; let mut paths = Vec::new(); for entry in WalkDir::new(path) { let entry = entry?; let metadata = entry.metadata().expect("error getting dir entry metadata"); // Also allow directories so that we also get empty directories if !(metadata.is_file() || metadata.is_dir()) { continue; } let path = entry.into_path(); paths.push(path); } // Do a sort to get a more consistent listing paths.sort_unstable(); let zstd = ZstdEncoder::with_quality_and_params( file, Level::Default, &[CParameter::enable_long_distance_matching(true)], ); let mut builder = Builder::new(zstd); // Use reproducible header mode builder.mode(HeaderMode::Deterministic); for p in paths { let rel_path = p.strip_prefix(path)?; if rel_path.is_empty() { // The top directory should not be compressed, // the tar crate doesn't like that continue; } builder.append_path_with_name(&p, rel_path).await?; } let mut zstd = builder.into_inner().await?; zstd.shutdown().await?; let mut compressed = zstd.into_inner(); let compressed_len = compressed.metadata().await?.len(); compressed.seek(SeekFrom::Start(0)).await?; Ok((compressed, compressed_len)) } /// Creates a Zstandard tarball. pub async fn extract_zst_tarball( path: &Utf8Path, tarball: impl AsyncBufRead + Unpin, ) -> Result<()> { let decoder = Box::pin(ZstdDecoder::new(tarball)); let mut archive = Archive::new(decoder); archive.unpack(path).await?; Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/try_rcu.rs
libs/utils/src/try_rcu.rs
//! Try RCU extension lifted from <https://github.com/vorner/arc-swap/issues/94#issuecomment-1987154023> pub trait ArcSwapExt<T> { /// [`ArcSwap::rcu`](arc_swap::ArcSwap::rcu), but with Result that short-circuits on error. fn try_rcu<R, F, E>(&self, f: F) -> Result<T, E> where F: FnMut(&T) -> Result<R, E>, R: Into<T>; } impl<T, S> ArcSwapExt<T> for arc_swap::ArcSwapAny<T, S> where T: arc_swap::RefCnt, S: arc_swap::strategy::CaS<T>, { fn try_rcu<R, F, E>(&self, mut f: F) -> Result<T, E> where F: FnMut(&T) -> Result<R, E>, R: Into<T>, { fn ptr_eq<Base, A, B>(a: A, b: B) -> bool where A: arc_swap::AsRaw<Base>, B: arc_swap::AsRaw<Base>, { let a = a.as_raw(); let b = b.as_raw(); std::ptr::eq(a, b) } let mut cur = self.load(); loop { let new = f(&cur)?.into(); let prev = self.compare_and_swap(&*cur, new); let swapped = ptr_eq(&*cur, &*prev); if swapped { return Ok(arc_swap::Guard::into_inner(prev)); } else { cur = prev; } } } } #[cfg(test)] mod tests { use std::sync::Arc; use arc_swap::ArcSwap; use super::*; #[test] fn test_try_rcu_success() { let swap = ArcSwap::from(Arc::new(42)); let result = swap.try_rcu(|value| -> Result<_, String> { Ok(**value + 1) }); assert!(result.is_ok()); assert_eq!(**swap.load(), 43); } #[test] fn test_try_rcu_error() { let swap = ArcSwap::from(Arc::new(42)); let result = swap.try_rcu(|value| -> Result<i32, _> { if **value == 42 { Err("err") } else { Ok(**value + 1) } }); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "err"); assert_eq!(**swap.load(), 42); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/elapsed_accum.rs
libs/utils/src/elapsed_accum.rs
use std::time::{Duration, Instant}; #[derive(Default)] pub struct ElapsedAccum { accum: Duration, } impl ElapsedAccum { pub fn get(&self) -> Duration { self.accum } pub fn guard(&mut self) -> impl Drop + '_ { let start = Instant::now(); scopeguard::guard(start, |last_wait_at| { self.accum += Instant::now() - last_wait_at; }) } pub async fn measure<Fut, O>(&mut self, fut: Fut) -> O where Fut: Future<Output = O>, { let _guard = self.guard(); fut.await } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/error.rs
libs/utils/src/error.rs
/// Create a reporter for an error that outputs similar to [`anyhow::Error`] with Display with alternative setting. /// /// It can be used with `anyhow::Error` as well. /// /// Why would one use this instead of converting to `anyhow::Error` on the spot? Because /// anyhow::Error would also capture a stacktrace on the spot, which you would later discard after /// formatting. /// /// ## Usage /// /// ```rust /// #[derive(Debug, thiserror::Error)] /// enum MyCoolError { /// #[error("should never happen")] /// Bad(#[source] std::io::Error), /// } /// /// # fn failing_call() -> Result<(), MyCoolError> { Err(MyCoolError::Bad(std::io::ErrorKind::PermissionDenied.into())) } /// /// # fn main() { /// use utils::error::report_compact_sources; /// /// if let Err(e) = failing_call() { /// let e = report_compact_sources(&e); /// assert_eq!(format!("{e}"), "should never happen: permission denied"); /// } /// # } /// ``` /// /// ## TODO /// /// When we are able to describe return position impl trait in traits, this should of course be an /// extension trait. Until then avoid boxing with this more ackward interface. pub fn report_compact_sources<E: std::error::Error>(e: &E) -> impl std::fmt::Display + '_ { struct AnyhowDisplayAlternateAlike<'a, E>(&'a E); impl<E: std::error::Error> std::fmt::Display for AnyhowDisplayAlternateAlike<'_, E> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0)?; // why is E a generic parameter here? hope that rustc will see through a default // Error::source implementation and leave the following out if there cannot be any // sources: Sources(self.0.source()).try_for_each(|src| write!(f, ": {src}")) } } struct Sources<'a>(Option<&'a (dyn std::error::Error + 'static)>); impl<'a> Iterator for Sources<'a> { type Item = &'a (dyn std::error::Error + 'static); fn next(&mut self) -> Option<Self::Item> { let rem = self.0; let next = self.0.and_then(|x| x.source()); self.0 = next; rem } } AnyhowDisplayAlternateAlike(e) } #[cfg(test)] mod tests { use super::report_compact_sources; #[test] fn report_compact_sources_examples() { use std::fmt::Write; #[derive(Debug, thiserror::Error)] enum EvictionError { #[error("cannot evict a remote layer")] CannotEvictRemoteLayer, #[error("stat failed")] StatFailed(#[source] std::io::Error), #[error("layer was no longer part of LayerMap")] LayerNotFound(#[source] anyhow::Error), } let examples = [ ( line!(), EvictionError::CannotEvictRemoteLayer, "cannot evict a remote layer", ), ( line!(), EvictionError::StatFailed(std::io::ErrorKind::PermissionDenied.into()), "stat failed: permission denied", ), ( line!(), EvictionError::LayerNotFound(anyhow::anyhow!("foobar")), "layer was no longer part of LayerMap: foobar", ), ]; let mut s = String::new(); for (line, example, expected) in examples { s.clear(); write!(s, "{}", report_compact_sources(&example)).expect("string grows"); assert_eq!(s, expected, "example on line {line}"); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/serde_regex.rs
libs/utils/src/serde_regex.rs
//! A `serde::{Deserialize,Serialize}` type for regexes. use std::ops::Deref; #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[serde(transparent)] pub struct Regex( #[serde( deserialize_with = "deserialize_regex", serialize_with = "serialize_regex" )] regex::Regex, ); fn deserialize_regex<'de, D>(deserializer: D) -> Result<regex::Regex, D::Error> where D: serde::de::Deserializer<'de>, { let s: String = serde::de::Deserialize::deserialize(deserializer)?; let re = regex::Regex::new(&s).map_err(serde::de::Error::custom)?; Ok(re) } fn serialize_regex<S>(re: &regex::Regex, serializer: S) -> Result<S::Ok, S::Error> where S: serde::ser::Serializer, { serializer.collect_str(re.as_str()) } impl Deref for Regex { type Target = regex::Regex; fn deref(&self) -> &regex::Regex { &self.0 } } impl PartialEq for Regex { fn eq(&self, other: &Regex) -> bool { // comparing the automatons would be quite complicated self.as_str() == other.as_str() } } impl Eq for Regex {} #[cfg(test)] mod tests { #[test] fn roundtrip() { let input = r#""foo.*bar""#; let re: super::Regex = serde_json::from_str(input).unwrap(); assert!(re.is_match("foo123bar")); assert!(!re.is_match("foo")); let output = serde_json::to_string(&re).unwrap(); assert_eq!(output, input); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/span.rs
libs/utils/src/span.rs
//! Tracing span helpers. /// Records the given fields in the current span, as a single call. The fields must already have /// been declared for the span (typically with empty values). #[macro_export] macro_rules! span_record { ($($tokens:tt)*) => {$crate::span_record_in!(::tracing::Span::current(), $($tokens)*)}; } /// Records the given fields in the given span, as a single call. The fields must already have been /// declared for the span (typically with empty values). #[macro_export] macro_rules! span_record_in { ($span:expr, $($tokens:tt)*) => { if let Some(meta) = $span.metadata() { $span.record_all(&tracing::valueset!(meta.fields(), $($tokens)*)); } }; }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/auth.rs
libs/utils/src/auth.rs
// For details about authentication see docs/authentication.md use std::borrow::Cow; use std::fmt::Display; use std::fs; use std::sync::Arc; use anyhow::Result; use arc_swap::ArcSwap; use camino::Utf8Path; use jsonwebtoken::{ Algorithm, DecodingKey, EncodingKey, Header, TokenData, Validation, decode, encode, }; use pem::Pem; use serde::{Deserialize, Deserializer, Serialize, de::DeserializeOwned}; use uuid::Uuid; use crate::id::TenantId; /// Algorithm to use. We require EdDSA. const STORAGE_TOKEN_ALGORITHM: Algorithm = Algorithm::EdDSA; #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] #[serde(rename_all = "lowercase")] pub enum Scope { /// Provides access to all data for a specific tenant (specified in `struct Claims` below) // TODO: join these two? Tenant, /// Provides access to all data for a specific tenant, but based on endpoint ID. This token scope /// is only used by compute to fetch the spec for a specific endpoint. The spec contains a Tenant-scoped /// token authorizing access to all data of a tenant, so the spec-fetch API requires a TenantEndpoint /// scope token to ensure that untrusted compute nodes can't fetch spec for arbitrary endpoints. TenantEndpoint, /// Provides blanket access to all tenants on the pageserver plus pageserver-wide APIs. /// Should only be used e.g. for status check/tenant creation/list. PageServerApi, /// Provides blanket access to all data on the safekeeper plus safekeeper-wide APIs. /// Should only be used e.g. for status check. /// Currently also used for connection from any pageserver to any safekeeper. SafekeeperData, /// The scope used by pageservers in upcalls to storage controller and cloud control plane #[serde(rename = "generations_api")] GenerationsApi, /// Allows access to control plane managment API and all storage controller endpoints. Admin, /// Allows access to control plane & storage controller endpoints used in infrastructure automation (e.g. node registration) Infra, /// Allows access to storage controller APIs used by the scrubber, to interrogate the state /// of a tenant & post scrub results. Scrubber, /// This scope is used for communication with other storage controller instances. /// At the time of writing, this is only used for the step down request. #[serde(rename = "controller_peer")] ControllerPeer, } fn deserialize_empty_string_as_none_uuid<'de, D>(deserializer: D) -> Result<Option<Uuid>, D::Error> where D: Deserializer<'de>, { let opt = Option::<String>::deserialize(deserializer)?; match opt.as_deref() { Some("") => Ok(None), Some(s) => Uuid::parse_str(s) .map(Some) .map_err(serde::de::Error::custom), None => Ok(None), } } /// JWT payload. See docs/authentication.md for the format #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Claims { #[serde(default)] pub tenant_id: Option<TenantId>, #[serde( default, skip_serializing_if = "Option::is_none", // Neon control plane includes this field as empty in the claims. // Consider it None in those cases. deserialize_with = "deserialize_empty_string_as_none_uuid" )] pub endpoint_id: Option<Uuid>, pub scope: Scope, } impl Claims { pub fn new(tenant_id: Option<TenantId>, scope: Scope) -> Self { Self { tenant_id, scope, endpoint_id: None, } } } pub struct SwappableJwtAuth(ArcSwap<JwtAuth>); impl SwappableJwtAuth { pub fn new(jwt_auth: JwtAuth) -> Self { SwappableJwtAuth(ArcSwap::new(Arc::new(jwt_auth))) } pub fn swap(&self, jwt_auth: JwtAuth) { self.0.swap(Arc::new(jwt_auth)); } pub fn decode<D: DeserializeOwned>( &self, token: &str, ) -> std::result::Result<TokenData<D>, AuthError> { self.0.load().decode(token) } } impl std::fmt::Debug for SwappableJwtAuth { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Swappable({:?})", self.0.load()) } } #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct AuthError(pub Cow<'static, str>); impl Display for AuthError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } pub struct JwtAuth { decoding_keys: Vec<DecodingKey>, validation: Validation, } impl JwtAuth { pub fn new(decoding_keys: Vec<DecodingKey>) -> Self { let mut validation = Validation::default(); validation.algorithms = vec![STORAGE_TOKEN_ALGORITHM]; // The default 'required_spec_claims' is 'exp'. But we don't want to require // expiration. validation.required_spec_claims = [].into(); Self { decoding_keys, validation, } } pub fn from_key_path(key_path: &Utf8Path) -> Result<Self> { let metadata = key_path.metadata()?; let decoding_keys = if metadata.is_dir() { let mut keys = Vec::new(); for entry in fs::read_dir(key_path)? { let path = entry?.path(); if !path.is_file() { // Ignore directories (don't recurse) continue; } let public_key = fs::read(path)?; keys.push(DecodingKey::from_ed_pem(&public_key)?); } keys } else if metadata.is_file() { let public_key = fs::read(key_path)?; vec![DecodingKey::from_ed_pem(&public_key)?] } else { anyhow::bail!("path is neither a directory or a file") }; if decoding_keys.is_empty() { anyhow::bail!( "Configured for JWT auth with zero decoding keys. All JWT gated requests would be rejected." ); } Ok(Self::new(decoding_keys)) } pub fn from_key(key: String) -> Result<Self> { Ok(Self::new(vec![DecodingKey::from_ed_pem(key.as_bytes())?])) } /// Attempt to decode the token with the internal decoding keys. /// /// The function tries the stored decoding keys in succession, /// and returns the first yielding a successful result. /// If there is no working decoding key, it returns the last error. pub fn decode<D: DeserializeOwned>( &self, token: &str, ) -> std::result::Result<TokenData<D>, AuthError> { let mut res = None; for decoding_key in &self.decoding_keys { res = Some(decode(token, decoding_key, &self.validation)); if let Some(Ok(res)) = res { return Ok(res); } } if let Some(res) = res { res.map_err(|e| AuthError(Cow::Owned(e.to_string()))) } else { Err(AuthError(Cow::Borrowed("no JWT decoding keys configured"))) } } } impl std::fmt::Debug for JwtAuth { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("JwtAuth") .field("validation", &self.validation) .finish() } } // this function is used only for testing purposes in CLI e g generate tokens during init pub fn encode_from_key_file<S: Serialize>(claims: &S, pem: &Pem) -> Result<String> { let key = EncodingKey::from_ed_der(pem.contents()); Ok(encode(&Header::new(STORAGE_TOKEN_ALGORITHM), claims, &key)?) } #[cfg(test)] mod tests { use std::str::FromStr; use super::*; // Generated with: // // openssl genpkey -algorithm ed25519 -out ed25519-priv.pem // openssl pkey -in ed25519-priv.pem -pubout -out ed25519-pub.pem const TEST_PUB_KEY_ED25519: &str = r#" -----BEGIN PUBLIC KEY----- MCowBQYDK2VwAyEARYwaNBayR+eGI0iXB4s3QxE3Nl2g1iWbr6KtLWeVD/w= -----END PUBLIC KEY----- "#; const TEST_PRIV_KEY_ED25519: &str = r#" -----BEGIN PRIVATE KEY----- MC4CAQAwBQYDK2VwBCIEID/Drmc1AA6U/znNRWpF3zEGegOATQxfkdWxitcOMsIH -----END PRIVATE KEY----- "#; #[test] fn test_decode() { let expected_claims = Claims { tenant_id: Some(TenantId::from_str("3d1f7595b468230304e0b73cecbcb081").unwrap()), scope: Scope::Tenant, endpoint_id: None, }; // A test token containing the following payload, signed using TEST_PRIV_KEY_ED25519: // // ``` // { // "scope": "tenant", // "tenant_id": "3d1f7595b468230304e0b73cecbcb081", // "iss": "neon.controlplane", // "iat": 1678442479 // } // ``` // let encoded_eddsa = "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJzY29wZSI6InRlbmFudCIsInRlbmFudF9pZCI6IjNkMWY3NTk1YjQ2ODIzMDMwNGUwYjczY2VjYmNiMDgxIiwiaXNzIjoibmVvbi5jb250cm9scGxhbmUiLCJpYXQiOjE2Nzg0NDI0Nzl9.rNheBnluMJNgXzSTTJoTNIGy4P_qe0JUHl_nVEGuDCTgHOThPVr552EnmKccrCKquPeW3c2YUk0Y9Oh4KyASAw"; // Check it can be validated with the public key let auth = JwtAuth::new(vec![ DecodingKey::from_ed_pem(TEST_PUB_KEY_ED25519.as_bytes()).unwrap(), ]); let claims_from_token: Claims = auth.decode(encoded_eddsa).unwrap().claims; assert_eq!(claims_from_token, expected_claims); } #[test] fn test_encode() { let claims = Claims { tenant_id: Some(TenantId::from_str("3d1f7595b468230304e0b73cecbcb081").unwrap()), scope: Scope::Tenant, endpoint_id: None, }; let pem = pem::parse(TEST_PRIV_KEY_ED25519).unwrap(); let encoded = encode_from_key_file(&claims, &pem).unwrap(); // decode it back let auth = JwtAuth::new(vec![ DecodingKey::from_ed_pem(TEST_PUB_KEY_ED25519.as_bytes()).unwrap(), ]); let decoded: TokenData<Claims> = auth.decode(&encoded).unwrap(); assert_eq!(decoded.claims, claims); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/seqwait.rs
libs/utils/src/seqwait.rs
#![warn(missing_docs)] use std::cmp::{Eq, Ordering}; use std::collections::BinaryHeap; use std::mem; use std::sync::Mutex; use std::time::Duration; use tokio::sync::watch::{self, channel}; use tokio::time::timeout; /// An error happened while waiting for a number #[derive(Debug, PartialEq, Eq, thiserror::Error)] pub enum SeqWaitError { /// The wait timeout was reached #[error("seqwait timeout was reached")] Timeout, /// [`SeqWait::shutdown`] was called #[error("SeqWait::shutdown was called")] Shutdown, } /// Monotonically increasing value /// /// It is handy to store some other fields under the same mutex in `SeqWait<S>` /// (e.g. store prev_record_lsn). So we allow SeqWait to be parametrized with /// any type that can expose counter. `V` is the type of exposed counter. pub trait MonotonicCounter<V> { /// Bump counter value and check that it goes forward /// N.B.: new_val is an actual new value, not a difference. fn cnt_advance(&mut self, new_val: V); /// Get counter value fn cnt_value(&self) -> V; } /// Heap of waiters, lowest numbers pop first. struct Waiters<V> where V: Ord, { heap: BinaryHeap<Waiter<V>>, /// Number of the first waiter in the heap, or None if there are no waiters. status_channel: watch::Sender<Option<V>>, } impl<V> Waiters<V> where V: Ord + Copy, { fn new() -> Self { Waiters { heap: BinaryHeap::new(), status_channel: channel(None).0, } } /// `status_channel` contains the number of the first waiter in the heap. /// This function should be called whenever waiters heap changes. fn update_status(&self) { let first_waiter = self.heap.peek().map(|w| w.wake_num); let _ = self.status_channel.send_replace(first_waiter); } /// Add new waiter to the heap, return a channel that will be notified when the number arrives. fn add(&mut self, num: V) -> watch::Receiver<()> { let (tx, rx) = channel(()); self.heap.push(Waiter { wake_num: num, wake_channel: tx, }); self.update_status(); rx } /// Pop all waiters <= num from the heap. Collect channels in a vector, /// so that caller can wake them up. fn pop_leq(&mut self, num: V) -> Vec<watch::Sender<()>> { let mut wake_these = Vec::new(); while let Some(n) = self.heap.peek() { if n.wake_num > num { break; } wake_these.push(self.heap.pop().unwrap().wake_channel); } if !wake_these.is_empty() { self.update_status(); } wake_these } /// Used on shutdown to efficiently drop all waiters. fn take_all(&mut self) -> BinaryHeap<Waiter<V>> { let heap = mem::take(&mut self.heap); self.update_status(); heap } } struct Waiter<T> where T: Ord, { wake_num: T, // wake me when this number arrives ... wake_channel: watch::Sender<()>, // ... by sending a message to this channel } // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here // to get that. impl<T: Ord> PartialOrd for Waiter<T> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl<T: Ord> Ord for Waiter<T> { fn cmp(&self, other: &Self) -> Ordering { other.wake_num.cmp(&self.wake_num) } } impl<T: Ord> PartialEq for Waiter<T> { fn eq(&self, other: &Self) -> bool { other.wake_num == self.wake_num } } impl<T: Ord> Eq for Waiter<T> {} /// Internal components of a `SeqWait` struct SeqWaitInt<S, V> where S: MonotonicCounter<V>, V: Ord, { waiters: Waiters<V>, current: S, shutdown: bool, } /// A tool for waiting on a sequence number /// /// This provides a way to wait the arrival of a number. /// As soon as the number arrives by another caller calling /// [`advance`], then the waiter will be woken up. /// /// This implementation takes a blocking Mutex on both [`wait_for`] /// and [`advance`], meaning there may be unexpected executor blocking /// due to thread scheduling unfairness. There are probably better /// implementations, but we can probably live with this for now. /// /// [`wait_for`]: SeqWait::wait_for /// [`advance`]: SeqWait::advance /// /// `S` means Storage, `V` is type of counter that this storage exposes. /// pub struct SeqWait<S, V> where S: MonotonicCounter<V>, V: Ord, { internal: Mutex<SeqWaitInt<S, V>>, } impl<S, V> SeqWait<S, V> where S: MonotonicCounter<V> + Copy, V: Ord + Copy, { /// Create a new `SeqWait`, initialized to a particular number pub fn new(starting_num: S) -> Self { let internal = SeqWaitInt { waiters: Waiters::new(), current: starting_num, shutdown: false, }; SeqWait { internal: Mutex::new(internal), } } /// Shut down a `SeqWait`, causing all waiters (present and /// future) to return an error. pub fn shutdown(&self) { let waiters = { // Prevent new waiters; wake all those that exist. // Wake everyone with an error. let mut internal = self.internal.lock().unwrap(); // Block any future waiters from starting internal.shutdown = true; // Take all waiters to drop them later. internal.waiters.take_all() // Drop the lock as we exit this scope. }; // When we drop the waiters list, each Receiver will // be woken with an error. // This drop doesn't need to be explicit; it's done // here to make it easier to read the code and understand // the order of events. drop(waiters); } /// Wait for a number to arrive /// /// This call won't complete until someone has called `advance` /// with a number greater than or equal to the one we're waiting for. /// /// This function is async cancellation-safe. pub async fn wait_for(&self, num: V) -> Result<(), SeqWaitError> { match self.queue_for_wait(num) { Ok(None) => Ok(()), Ok(Some(mut rx)) => rx.changed().await.map_err(|_| SeqWaitError::Shutdown), Err(e) => Err(e), } } /// Wait for a number to arrive /// /// This call won't complete until someone has called `advance` /// with a number greater than or equal to the one we're waiting for. /// /// If that hasn't happened after the specified timeout duration, /// [`SeqWaitError::Timeout`] will be returned. /// /// This function is async cancellation-safe. pub async fn wait_for_timeout( &self, num: V, timeout_duration: Duration, ) -> Result<(), SeqWaitError> { match self.queue_for_wait(num) { Ok(None) => Ok(()), Ok(Some(mut rx)) => match timeout(timeout_duration, rx.changed()).await { Ok(Ok(())) => Ok(()), Ok(Err(_)) => Err(SeqWaitError::Shutdown), Err(_) => Err(SeqWaitError::Timeout), }, Err(e) => Err(e), } } /// Check if [`Self::wait_for`] or [`Self::wait_for_timeout`] would wait if called with `num`. pub fn would_wait_for(&self, num: V) -> Result<(), V> { let internal = self.internal.lock().unwrap(); let cnt = internal.current.cnt_value(); drop(internal); if cnt >= num { Ok(()) } else { Err(cnt) } } /// Register and return a channel that will be notified when a number arrives, /// or None, if it has already arrived. fn queue_for_wait(&self, num: V) -> Result<Option<watch::Receiver<()>>, SeqWaitError> { let mut internal = self.internal.lock().unwrap(); if internal.current.cnt_value() >= num { return Ok(None); } if internal.shutdown { return Err(SeqWaitError::Shutdown); } // Add waiter channel to the queue. let rx = internal.waiters.add(num); // Drop the lock as we exit this scope. Ok(Some(rx)) } /// Announce a new number has arrived /// /// All waiters at this value or below will be woken. /// /// Returns the old number. pub fn advance(&self, num: V) -> V { let old_value; let wake_these = { let mut internal = self.internal.lock().unwrap(); old_value = internal.current.cnt_value(); if old_value >= num { return old_value; } internal.current.cnt_advance(num); // Pop all waiters <= num from the heap. internal.waiters.pop_leq(num) }; for tx in wake_these { // This can fail if there are no receivers. // We don't care; discard the error. let _ = tx.send(()); } old_value } /// Read the current value, without waiting. pub fn load(&self) -> S { self.internal.lock().unwrap().current } /// Get a Receiver for the current status. /// /// The current status is the number of the first waiter in the queue, /// or None if there are no waiters. /// /// This receiver will be notified whenever the status changes. /// It is useful for receiving notifications when the first waiter /// starts waiting for a number, or when there are no more waiters left. pub fn status_receiver(&self) -> watch::Receiver<Option<V>> { self.internal .lock() .unwrap() .waiters .status_channel .subscribe() } } #[cfg(test)] mod tests { use std::sync::Arc; use super::*; impl MonotonicCounter<i32> for i32 { fn cnt_advance(&mut self, val: i32) { assert!(*self <= val); *self = val; } fn cnt_value(&self) -> i32 { *self } } #[tokio::test] async fn seqwait() { let seq = Arc::new(SeqWait::new(0)); let seq2 = Arc::clone(&seq); let seq3 = Arc::clone(&seq); let jh1 = tokio::task::spawn(async move { seq2.wait_for(42).await.expect("wait_for 42"); let old = seq2.advance(100); assert_eq!(old, 99); seq2.wait_for_timeout(999, Duration::from_millis(100)) .await .expect_err("no 999"); }); let jh2 = tokio::task::spawn(async move { seq3.wait_for(42).await.expect("wait_for 42"); seq3.wait_for(0).await.expect("wait_for 0"); }); tokio::time::sleep(Duration::from_millis(200)).await; let old = seq.advance(99); assert_eq!(old, 0); seq.wait_for(100).await.expect("wait_for 100"); // Calling advance with a smaller value is a no-op assert_eq!(seq.advance(98), 100); assert_eq!(seq.load(), 100); jh1.await.unwrap(); jh2.await.unwrap(); seq.shutdown(); } #[tokio::test] async fn seqwait_timeout() { let seq = Arc::new(SeqWait::new(0)); let seq2 = Arc::clone(&seq); let jh = tokio::task::spawn(async move { let timeout = Duration::from_millis(1); let res = seq2.wait_for_timeout(42, timeout).await; assert_eq!(res, Err(SeqWaitError::Timeout)); }); tokio::time::sleep(Duration::from_millis(200)).await; // This will attempt to wake, but nothing will happen // because the waiter already dropped its Receiver. let old = seq.advance(99); assert_eq!(old, 0); jh.await.unwrap(); seq.shutdown(); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/guard_arc_swap.rs
libs/utils/src/guard_arc_swap.rs
//! A wrapper around `ArcSwap` that ensures there is only one writer at a time and writes //! don't block reads. use std::sync::Arc; use arc_swap::ArcSwap; use tokio::sync::TryLockError; pub struct GuardArcSwap<T> { inner: ArcSwap<T>, guard: tokio::sync::Mutex<()>, } pub struct Guard<'a, T> { _guard: tokio::sync::MutexGuard<'a, ()>, inner: &'a ArcSwap<T>, } impl<T> GuardArcSwap<T> { pub fn new(inner: T) -> Self { Self { inner: ArcSwap::new(Arc::new(inner)), guard: tokio::sync::Mutex::new(()), } } pub fn read(&self) -> Arc<T> { self.inner.load_full() } pub async fn write_guard(&self) -> Guard<'_, T> { Guard { _guard: self.guard.lock().await, inner: &self.inner, } } pub fn try_write_guard(&self) -> Result<Guard<'_, T>, TryLockError> { let guard = self.guard.try_lock()?; Ok(Guard { _guard: guard, inner: &self.inner, }) } } impl<T> Guard<'_, T> { pub fn read(&self) -> Arc<T> { self.inner.load_full() } pub fn write(&mut self, value: T) { self.inner.store(Arc::new(value)); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/lsn.rs
libs/utils/src/lsn.rs
#![warn(missing_docs)] use std::fmt; use std::ops::{Add, AddAssign}; use std::str::FromStr; use std::sync::atomic::{AtomicU64, Ordering}; use serde::de::Visitor; use serde::{Deserialize, Serialize}; use crate::seqwait::MonotonicCounter; /// Transaction log block size in bytes pub const XLOG_BLCKSZ: u32 = 8192; /// A Postgres LSN (Log Sequence Number), also known as an XLogRecPtr #[derive(Clone, Copy, Default, Eq, Ord, PartialEq, PartialOrd, Hash)] pub struct Lsn(pub u64); impl Serialize for Lsn { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { if serializer.is_human_readable() { serializer.collect_str(self) } else { self.0.serialize(serializer) } } } impl<'de> Deserialize<'de> for Lsn { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { struct LsnVisitor { is_human_readable_deserializer: bool, } impl Visitor<'_> for LsnVisitor { type Value = Lsn; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { if self.is_human_readable_deserializer { formatter.write_str( "value in form of hex string({upper_u32_hex}/{lower_u32_hex}) representing u64 integer", ) } else { formatter.write_str("value in form of integer(u64)") } } fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> where E: serde::de::Error, { Ok(Lsn(v)) } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error, { Lsn::from_str(v).map_err(|e| E::custom(e)) } } if deserializer.is_human_readable() { deserializer.deserialize_str(LsnVisitor { is_human_readable_deserializer: true, }) } else { deserializer.deserialize_u64(LsnVisitor { is_human_readable_deserializer: false, }) } } } /// Allows (de)serialization of an `Lsn` always as `u64`. /// /// ### Example /// /// ```rust /// # use serde::{Serialize, Deserialize}; /// use utils::lsn::Lsn; /// /// #[derive(PartialEq, Serialize, Deserialize, Debug)] /// struct Foo { /// #[serde(with = "utils::lsn::serde_as_u64")] /// always_u64: Lsn, /// } /// /// let orig = Foo { always_u64: Lsn(1234) }; /// /// let res = serde_json::to_string(&orig).unwrap(); /// assert_eq!(res, r#"{"always_u64":1234}"#); /// /// let foo = serde_json::from_str::<Foo>(&res).unwrap(); /// assert_eq!(foo, orig); /// ``` /// pub mod serde_as_u64 { use super::Lsn; /// Serializes the Lsn as u64 disregarding the human readability of the format. /// /// Meant to be used via `#[serde(with = "...")]` or `#[serde(serialize_with = "...")]`. pub fn serialize<S: serde::Serializer>(lsn: &Lsn, serializer: S) -> Result<S::Ok, S::Error> { use serde::Serialize; lsn.0.serialize(serializer) } /// Deserializes the Lsn as u64 disregarding the human readability of the format. /// /// Meant to be used via `#[serde(with = "...")]` or `#[serde(deserialize_with = "...")]`. pub fn deserialize<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result<Lsn, D::Error> { use serde::Deserialize; u64::deserialize(deserializer).map(Lsn) } } /// We tried to parse an LSN from a string, but failed #[derive(Debug, PartialEq, Eq, thiserror::Error)] #[error("LsnParseError")] pub struct LsnParseError; impl Lsn { /// Maximum possible value for an LSN pub const MAX: Lsn = Lsn(u64::MAX); /// Invalid value for InvalidXLogRecPtr, as defined in xlogdefs.h pub const INVALID: Lsn = Lsn(0); /// Subtract a number, returning None on overflow. pub fn checked_sub<T: Into<u64>>(self, other: T) -> Option<Lsn> { let other: u64 = other.into(); self.0.checked_sub(other).map(Lsn) } /// Subtract a number, saturating at numeric bounds instead of overflowing. pub fn saturating_sub<T: Into<u64>>(self, other: T) -> Lsn { Lsn(self.0.saturating_sub(other.into())) } /// Subtract a number, returning the difference as i128 to avoid overflow. pub fn widening_sub<T: Into<u64>>(self, other: T) -> i128 { let other: u64 = other.into(); i128::from(self.0) - i128::from(other) } /// Parse an LSN from a string in the form `0000000000000000` pub fn from_hex<S>(s: S) -> Result<Self, LsnParseError> where S: AsRef<str>, { let s: &str = s.as_ref(); let n = u64::from_str_radix(s, 16).or(Err(LsnParseError))?; Ok(Lsn(n)) } /// Compute the offset into a segment #[inline] pub fn segment_offset(self, seg_sz: usize) -> usize { (self.0 % seg_sz as u64) as usize } /// Compute LSN of the segment start. #[inline] pub fn segment_lsn(self, seg_sz: usize) -> Lsn { Lsn(self.0 - (self.0 % seg_sz as u64)) } /// Compute the segment number #[inline] pub fn segment_number(self, seg_sz: usize) -> u64 { self.0 / seg_sz as u64 } /// Compute the offset into a block #[inline] pub fn block_offset(self) -> u64 { const BLCKSZ: u64 = XLOG_BLCKSZ as u64; self.0 % BLCKSZ } /// Compute the block offset of the first byte of this Lsn within this /// segment #[inline] pub fn page_lsn(self) -> Lsn { Lsn(self.0 - self.block_offset()) } /// Compute the block offset of the first byte of this Lsn within this /// segment #[inline] pub fn page_offset_in_segment(self, seg_sz: usize) -> u64 { (self.0 - self.block_offset()) - self.segment_lsn(seg_sz).0 } /// Compute the bytes remaining in this block /// /// If the LSN is already at the block boundary, it will return `XLOG_BLCKSZ`. #[inline] pub fn remaining_in_block(self) -> u64 { const BLCKSZ: u64 = XLOG_BLCKSZ as u64; BLCKSZ - (self.0 % BLCKSZ) } /// Compute the bytes remaining to fill a chunk of some size /// /// If the LSN is already at the chunk boundary, it will return 0. pub fn calc_padding<T: Into<u64>>(self, sz: T) -> u64 { let sz: u64 = sz.into(); // By using wrapping_sub, we can subtract first and then mod second. // If it's done the other way around, then we would return a full // chunk size if we're already at the chunk boundary. // (Regular subtraction will panic on overflow in debug builds.) (sz.wrapping_sub(self.0)) % sz } /// Align LSN on 8-byte boundary (alignment of WAL records). pub fn align(&self) -> Lsn { Lsn((self.0 + 7) & !7) } /// Align LSN on 8-byte boundary (alignment of WAL records). pub fn is_aligned(&self) -> bool { *self == self.align() } /// Return if the LSN is valid /// mimics postgres XLogRecPtrIsInvalid macro pub fn is_valid(self) -> bool { self != Lsn::INVALID } } impl From<u64> for Lsn { fn from(n: u64) -> Self { Lsn(n) } } impl From<Lsn> for u64 { fn from(lsn: Lsn) -> u64 { lsn.0 } } impl FromStr for Lsn { type Err = LsnParseError; /// Parse an LSN from a string in the form `00000000/00000000` /// /// If the input string is missing the '/' character, then use `Lsn::from_hex` fn from_str(s: &str) -> Result<Self, Self::Err> { let mut splitter = s.trim().split('/'); if let (Some(left), Some(right), None) = (splitter.next(), splitter.next(), splitter.next()) { let left_num = u32::from_str_radix(left, 16).map_err(|_| LsnParseError)?; let right_num = u32::from_str_radix(right, 16).map_err(|_| LsnParseError)?; Ok(Lsn(((left_num as u64) << 32) | right_num as u64)) } else { Err(LsnParseError) } } } impl fmt::Display for Lsn { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:X}/{:X}", self.0 >> 32, self.0 & 0xffffffff) } } impl fmt::Debug for Lsn { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:X}/{:X}", self.0 >> 32, self.0 & 0xffffffff) } } impl Add<u64> for Lsn { type Output = Lsn; fn add(self, other: u64) -> Self::Output { // panic if the addition overflows. Lsn(self.0.checked_add(other).unwrap()) } } impl AddAssign<u64> for Lsn { fn add_assign(&mut self, other: u64) { // panic if the addition overflows. self.0 = self.0.checked_add(other).unwrap(); } } /// An [`Lsn`] that can be accessed atomically. pub struct AtomicLsn { inner: AtomicU64, } impl AtomicLsn { /// Creates a new atomic `Lsn`. pub fn new(val: u64) -> Self { AtomicLsn { inner: AtomicU64::new(val), } } /// Atomically retrieve the `Lsn` value from memory. pub fn load(&self) -> Lsn { Lsn(self.inner.load(Ordering::Acquire)) } /// Atomically store a new `Lsn` value to memory. pub fn store(&self, lsn: Lsn) { self.inner.store(lsn.0, Ordering::Release); } /// Adds to the current value, returning the previous value. /// /// This operation will panic on overflow. pub fn fetch_add(&self, val: u64) -> Lsn { let prev = self.inner.fetch_add(val, Ordering::AcqRel); assert!(prev.checked_add(val).is_some(), "AtomicLsn overflow"); Lsn(prev) } /// Atomically sets the Lsn to the max of old and new value, returning the old value. pub fn fetch_max(&self, lsn: Lsn) -> Lsn { let prev = self.inner.fetch_max(lsn.0, Ordering::AcqRel); Lsn(prev) } } impl From<Lsn> for AtomicLsn { fn from(lsn: Lsn) -> Self { Self::new(lsn.0) } } /// Pair of LSN's pointing to the end of the last valid record and previous one #[derive(Debug, Clone, Copy)] pub struct RecordLsn { /// LSN at the end of the current record pub last: Lsn, /// LSN at the end of the previous record pub prev: Lsn, } /// Expose `self.last` as counter to be able to use RecordLsn in SeqWait impl MonotonicCounter<Lsn> for RecordLsn { fn cnt_advance(&mut self, lsn: Lsn) { assert!(self.last <= lsn); let new_prev = self.last; self.last = lsn; self.prev = new_prev; } fn cnt_value(&self) -> Lsn { self.last } } /// Implements [`rand::distr::uniform::UniformSampler`] so we can sample [`Lsn`]s. /// /// This is used by the `pagebench` pageserver benchmarking tool. pub struct LsnSampler(<u64 as rand::distr::uniform::SampleUniform>::Sampler); impl rand::distr::uniform::SampleUniform for Lsn { type Sampler = LsnSampler; } impl rand::distr::uniform::UniformSampler for LsnSampler { type X = Lsn; fn new<B1, B2>(low: B1, high: B2) -> Result<Self, rand::distr::uniform::Error> where B1: rand::distr::uniform::SampleBorrow<Self::X> + Sized, B2: rand::distr::uniform::SampleBorrow<Self::X> + Sized, { <u64 as rand::distr::uniform::SampleUniform>::Sampler::new(low.borrow().0, high.borrow().0) .map(Self) } fn new_inclusive<B1, B2>(low: B1, high: B2) -> Result<Self, rand::distr::uniform::Error> where B1: rand::distr::uniform::SampleBorrow<Self::X> + Sized, B2: rand::distr::uniform::SampleBorrow<Self::X> + Sized, { <u64 as rand::distr::uniform::SampleUniform>::Sampler::new_inclusive( low.borrow().0, high.borrow().0, ) .map(Self) } fn sample<R: rand::prelude::Rng + ?Sized>(&self, rng: &mut R) -> Self::X { Lsn(self.0.sample(rng)) } } #[cfg(test)] mod tests { use serde_assert::{Deserializer, Serializer, Token, Tokens}; use super::*; use crate::bin_ser::BeSer; #[test] fn test_lsn_strings() { assert_eq!("12345678/AAAA5555".parse(), Ok(Lsn(0x12345678AAAA5555))); assert_eq!("aaaa/bbbb".parse(), Ok(Lsn(0x0000AAAA0000BBBB))); assert_eq!("1/A".parse(), Ok(Lsn(0x000000010000000A))); assert_eq!("0/0".parse(), Ok(Lsn(0))); "ABCDEFG/12345678".parse::<Lsn>().unwrap_err(); "123456789/AAAA5555".parse::<Lsn>().unwrap_err(); "12345678/AAAA55550".parse::<Lsn>().unwrap_err(); "-1/0".parse::<Lsn>().unwrap_err(); "1/-1".parse::<Lsn>().unwrap_err(); assert_eq!(format!("{}", Lsn(0x12345678AAAA5555)), "12345678/AAAA5555"); assert_eq!(format!("{}", Lsn(0x000000010000000A)), "1/A"); assert_eq!( Lsn::from_hex("12345678AAAA5555"), Ok(Lsn(0x12345678AAAA5555)) ); assert_eq!(Lsn::from_hex("0"), Ok(Lsn(0))); assert_eq!(Lsn::from_hex("F12345678AAAA5555"), Err(LsnParseError)); let expected_lsn = Lsn(0x3C490F8); assert_eq!(" 0/3C490F8".parse(), Ok(expected_lsn)); assert_eq!("0/3C490F8 ".parse(), Ok(expected_lsn)); assert_eq!(" 0/3C490F8 ".parse(), Ok(expected_lsn)); } #[test] fn test_lsn_math() { assert_eq!(Lsn(1234) + 11u64, Lsn(1245)); assert_eq!( { let mut lsn = Lsn(1234); lsn += 11u64; lsn }, Lsn(1245) ); assert_eq!(Lsn(1234).checked_sub(1233u64), Some(Lsn(1))); assert_eq!(Lsn(1234).checked_sub(1235u64), None); assert_eq!(Lsn(1235).widening_sub(1234u64), 1); assert_eq!(Lsn(1234).widening_sub(1235u64), -1); assert_eq!(Lsn(u64::MAX).widening_sub(0u64), i128::from(u64::MAX)); assert_eq!(Lsn(0).widening_sub(u64::MAX), -i128::from(u64::MAX)); let seg_sz: usize = 16 * 1024 * 1024; assert_eq!(Lsn(0x1000007).segment_offset(seg_sz), 7); assert_eq!(Lsn(0x1000007).segment_number(seg_sz), 1u64); assert_eq!(Lsn(0x4007).block_offset(), 7u64); assert_eq!(Lsn(0x4000).block_offset(), 0u64); assert_eq!(Lsn(0x4007).remaining_in_block(), 8185u64); assert_eq!(Lsn(0x4000).remaining_in_block(), 8192u64); assert_eq!(Lsn(0xffff01).calc_padding(seg_sz as u64), 255u64); assert_eq!(Lsn(0x2000000).calc_padding(seg_sz as u64), 0u64); assert_eq!(Lsn(0xffff01).calc_padding(8u32), 7u64); assert_eq!(Lsn(0xffff00).calc_padding(8u32), 0u64); } #[test] fn test_atomic_lsn() { let lsn = AtomicLsn::new(0); assert_eq!(lsn.fetch_add(1234), Lsn(0)); assert_eq!(lsn.load(), Lsn(1234)); lsn.store(Lsn(5678)); assert_eq!(lsn.load(), Lsn(5678)); assert_eq!(lsn.fetch_max(Lsn(6000)), Lsn(5678)); assert_eq!(lsn.fetch_max(Lsn(5000)), Lsn(6000)); } #[test] fn test_lsn_serde() { let original_lsn = Lsn(0x0123456789abcdef); let expected_readable_tokens = Tokens(vec![Token::U64(0x0123456789abcdef)]); let expected_non_readable_tokens = Tokens(vec![Token::Str(String::from("1234567/89ABCDEF"))]); // Testing human_readable ser/de let serializer = Serializer::builder().is_human_readable(false).build(); let readable_ser_tokens = original_lsn.serialize(&serializer).unwrap(); assert_eq!(readable_ser_tokens, expected_readable_tokens); let mut deserializer = Deserializer::builder() .is_human_readable(false) .tokens(readable_ser_tokens) .build(); let des_lsn = Lsn::deserialize(&mut deserializer).unwrap(); assert_eq!(des_lsn, original_lsn); // Testing NON human_readable ser/de let serializer = Serializer::builder().is_human_readable(true).build(); let non_readable_ser_tokens = original_lsn.serialize(&serializer).unwrap(); assert_eq!(non_readable_ser_tokens, expected_non_readable_tokens); let mut deserializer = Deserializer::builder() .is_human_readable(true) .tokens(non_readable_ser_tokens) .build(); let des_lsn = Lsn::deserialize(&mut deserializer).unwrap(); assert_eq!(des_lsn, original_lsn); // Testing mismatching ser/de let serializer = Serializer::builder().is_human_readable(false).build(); let non_readable_ser_tokens = original_lsn.serialize(&serializer).unwrap(); let mut deserializer = Deserializer::builder() .is_human_readable(true) .tokens(non_readable_ser_tokens) .build(); Lsn::deserialize(&mut deserializer).unwrap_err(); let serializer = Serializer::builder().is_human_readable(true).build(); let readable_ser_tokens = original_lsn.serialize(&serializer).unwrap(); let mut deserializer = Deserializer::builder() .is_human_readable(false) .tokens(readable_ser_tokens) .build(); Lsn::deserialize(&mut deserializer).unwrap_err(); } #[test] fn test_lsn_ensure_roundtrip() { let original_lsn = Lsn(0xaaaabbbb); let serializer = Serializer::builder().is_human_readable(false).build(); let ser_tokens = original_lsn.serialize(&serializer).unwrap(); let mut deserializer = Deserializer::builder() .is_human_readable(false) .tokens(ser_tokens) .build(); let des_lsn = Lsn::deserialize(&mut deserializer).unwrap(); assert_eq!(des_lsn, original_lsn); } #[test] fn test_lsn_bincode_serde() { let lsn = Lsn(0x0123456789abcdef); let expected_bytes = [0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]; let ser_bytes = lsn.ser().unwrap(); assert_eq!(ser_bytes, expected_bytes); let des_lsn = Lsn::des(&ser_bytes).unwrap(); assert_eq!(des_lsn, lsn); } #[test] fn test_lsn_bincode_ensure_roundtrip() { let original_lsn = Lsn(0x01_02_03_04_05_06_07_08); let expected_bytes = vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]; let ser_bytes = original_lsn.ser().unwrap(); assert_eq!(ser_bytes, expected_bytes); let des_lsn = Lsn::des(&ser_bytes).unwrap(); assert_eq!(des_lsn, original_lsn); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/vec_map.rs
libs/utils/src/vec_map.rs
use std::alloc::Layout; use std::cmp::Ordering; use std::ops::RangeBounds; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum VecMapOrdering { Greater, GreaterOrEqual, } /// Ordered map datastructure implemented in a Vec. /// /// Append only - can only add keys that are larger than the /// current max key. /// Ordering can be adjusted using [`VecMapOrdering`] /// during `VecMap` construction. #[derive(Clone, Debug)] pub struct VecMap<K, V> { data: Vec<(K, V)>, ordering: VecMapOrdering, } impl<K, V> Default for VecMap<K, V> { fn default() -> Self { VecMap { data: Default::default(), ordering: VecMapOrdering::Greater, } } } #[derive(thiserror::Error, Debug)] pub enum VecMapError { #[error("Key violates ordering constraint")] InvalidKey, #[error("Mismatched ordering constraints")] ExtendOrderingError, } impl<K: Ord, V> VecMap<K, V> { pub fn new(ordering: VecMapOrdering) -> Self { Self { data: Vec::new(), ordering, } } pub fn with_capacity(capacity: usize, ordering: VecMapOrdering) -> Self { Self { data: Vec::with_capacity(capacity), ordering, } } pub fn is_empty(&self) -> bool { self.data.is_empty() } pub fn as_slice(&self) -> &[(K, V)] { self.data.as_slice() } /// This function may panic if given a range where the lower bound is /// greater than the upper bound. pub fn slice_range<R: RangeBounds<K>>(&self, range: R) -> &[(K, V)] { use std::ops::Bound::*; let binary_search = |k: &K| self.data.binary_search_by_key(&k, extract_key); let start_idx = match range.start_bound() { Unbounded => 0, Included(k) => binary_search(k).unwrap_or_else(std::convert::identity), Excluded(k) => match binary_search(k) { Ok(idx) => idx + 1, Err(idx) => idx, }, }; let end_idx = match range.end_bound() { Unbounded => self.data.len(), Included(k) => match binary_search(k) { Ok(idx) => idx + 1, Err(idx) => idx, }, Excluded(k) => binary_search(k).unwrap_or_else(std::convert::identity), }; &self.data[start_idx..end_idx] } /// Add a key value pair to the map. /// If `key` is not respective of the `self` ordering the /// pair will not be added and `InvalidKey` error will be returned. pub fn append(&mut self, key: K, value: V) -> Result<usize, VecMapError> { self.validate_key_order(&key)?; let delta_size = self.instrument_vec_op(|vec| vec.push((key, value))); Ok(delta_size) } /// Update the maximum key value pair or add a new key value pair to the map. /// If `key` is not respective of the `self` ordering no updates or additions /// will occur and `InvalidKey` error will be returned. pub fn append_or_update_last( &mut self, key: K, mut value: V, ) -> Result<(Option<V>, usize), VecMapError> { if let Some((last_key, last_value)) = self.data.last_mut() { match key.cmp(last_key) { Ordering::Less => return Err(VecMapError::InvalidKey), Ordering::Equal => { std::mem::swap(last_value, &mut value); const DELTA_SIZE: usize = 0; return Ok((Some(value), DELTA_SIZE)); } Ordering::Greater => {} } } let delta_size = self.instrument_vec_op(|vec| vec.push((key, value))); Ok((None, delta_size)) } /// Move items from `other` to the end of `self`, leaving `other` empty. /// If the `other` ordering is different from `self` ordering /// `ExtendOrderingError` error will be returned. /// If any keys in `other` is not respective of the ordering defined in /// `self`, `InvalidKey` error will be returned and no mutation will occur. pub fn extend(&mut self, other: &mut Self) -> Result<usize, VecMapError> { if self.ordering != other.ordering { return Err(VecMapError::ExtendOrderingError); } let other_first_opt = other.data.last().map(extract_key); if let Some(other_first) = other_first_opt { self.validate_key_order(other_first)?; } let delta_size = self.instrument_vec_op(|vec| vec.append(&mut other.data)); Ok(delta_size) } /// Validate the current last key in `self` and key being /// inserted against the order defined in `self`. fn validate_key_order(&self, key: &K) -> Result<(), VecMapError> { if let Some(last_key) = self.data.last().map(extract_key) { match (&self.ordering, &key.cmp(last_key)) { (VecMapOrdering::Greater, Ordering::Less | Ordering::Equal) => { return Err(VecMapError::InvalidKey); } (VecMapOrdering::Greater, Ordering::Greater) => {} (VecMapOrdering::GreaterOrEqual, Ordering::Less) => { return Err(VecMapError::InvalidKey); } (VecMapOrdering::GreaterOrEqual, Ordering::Equal | Ordering::Greater) => {} } } Ok(()) } /// Instrument an operation on the underlying [`Vec`]. /// Will panic if the operation decreases capacity. /// Returns the increase in memory usage caused by the op. fn instrument_vec_op(&mut self, op: impl FnOnce(&mut Vec<(K, V)>)) -> usize { let old_cap = self.data.capacity(); op(&mut self.data); let new_cap = self.data.capacity(); match old_cap.cmp(&new_cap) { Ordering::Less => { let old_size = Layout::array::<(K, V)>(old_cap).unwrap().size(); let new_size = Layout::array::<(K, V)>(new_cap).unwrap().size(); new_size - old_size } Ordering::Equal => 0, Ordering::Greater => panic!("VecMap capacity shouldn't ever decrease"), } } /// Similar to `from_iter` defined in `FromIter` trait except /// that it accepts an [`VecMapOrdering`] pub fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I, ordering: VecMapOrdering) -> Self { let iter = iter.into_iter(); let initial_capacity = { match iter.size_hint() { (lower_bound, None) => lower_bound, (_, Some(upper_bound)) => upper_bound, } }; let mut vec_map = VecMap::with_capacity(initial_capacity, ordering); for (key, value) in iter { vec_map .append(key, value) .expect("The passed collection needs to be sorted!"); } vec_map } } impl<K: Ord, V> IntoIterator for VecMap<K, V> { type Item = (K, V); type IntoIter = std::vec::IntoIter<(K, V)>; fn into_iter(self) -> Self::IntoIter { self.data.into_iter() } } fn extract_key<K, V>(entry: &(K, V)) -> &K { &entry.0 } #[cfg(test)] mod tests { use std::collections::BTreeMap; use std::ops::Bound; use super::{VecMap, VecMapOrdering}; #[test] fn unbounded_range() { let mut vec = VecMap::default(); vec.append(0, ()).unwrap(); assert_eq!(vec.slice_range(0..0), &[]); } #[test] #[should_panic] fn invalid_ordering_range() { let mut vec = VecMap::default(); vec.append(0, ()).unwrap(); #[allow(clippy::reversed_empty_ranges)] vec.slice_range(1..0); } #[test] fn range_tests() { let mut vec = VecMap::default(); vec.append(0, ()).unwrap(); vec.append(2, ()).unwrap(); vec.append(4, ()).unwrap(); assert_eq!(vec.slice_range(0..0), &[]); assert_eq!(vec.slice_range(0..1), &[(0, ())]); assert_eq!(vec.slice_range(0..2), &[(0, ())]); assert_eq!(vec.slice_range(0..3), &[(0, ()), (2, ())]); assert_eq!(vec.slice_range(..0), &[]); assert_eq!(vec.slice_range(..1), &[(0, ())]); assert_eq!(vec.slice_range(..3), &[(0, ()), (2, ())]); assert_eq!(vec.slice_range(..3), &[(0, ()), (2, ())]); assert_eq!(vec.slice_range(0..=0), &[(0, ())]); assert_eq!(vec.slice_range(0..=1), &[(0, ())]); assert_eq!(vec.slice_range(0..=2), &[(0, ()), (2, ())]); assert_eq!(vec.slice_range(0..=3), &[(0, ()), (2, ())]); assert_eq!(vec.slice_range(..=0), &[(0, ())]); assert_eq!(vec.slice_range(..=1), &[(0, ())]); assert_eq!(vec.slice_range(..=2), &[(0, ()), (2, ())]); assert_eq!(vec.slice_range(..=3), &[(0, ()), (2, ())]); } struct BoundIter { min: i32, max: i32, next: Option<Bound<i32>>, } impl BoundIter { fn new(min: i32, max: i32) -> Self { Self { min, max, next: Some(Bound::Unbounded), } } } impl Iterator for BoundIter { type Item = Bound<i32>; fn next(&mut self) -> Option<Self::Item> { let cur = self.next?; self.next = match &cur { Bound::Unbounded => Some(Bound::Included(self.min)), Bound::Included(x) => { if *x >= self.max { Some(Bound::Excluded(self.min)) } else { Some(Bound::Included(x + 1)) } } Bound::Excluded(x) => { if *x >= self.max { None } else { Some(Bound::Excluded(x + 1)) } } }; Some(cur) } } #[test] fn range_exhaustive() { let map: BTreeMap<i32, ()> = (1..=7).step_by(2).map(|x| (x, ())).collect(); let mut vec = VecMap::default(); for &key in map.keys() { vec.append(key, ()).unwrap(); } const RANGE_MIN: i32 = 0; const RANGE_MAX: i32 = 8; for lower_bound in BoundIter::new(RANGE_MIN, RANGE_MAX) { let ub_min = match lower_bound { Bound::Unbounded => RANGE_MIN, Bound::Included(x) => x, Bound::Excluded(x) => x + 1, }; for upper_bound in BoundIter::new(ub_min, RANGE_MAX) { let map_range: Vec<(i32, ())> = map .range((lower_bound, upper_bound)) .map(|(&x, _)| (x, ())) .collect(); let vec_slice = vec.slice_range((lower_bound, upper_bound)); assert_eq!(map_range, vec_slice); } } } #[test] fn extend() { let mut left = VecMap::default(); left.append(0, ()).unwrap(); assert_eq!(left.as_slice(), &[(0, ())]); let mut empty = VecMap::default(); left.extend(&mut empty).unwrap(); assert_eq!(left.as_slice(), &[(0, ())]); assert_eq!(empty.as_slice(), &[]); let mut right = VecMap::default(); right.append(1, ()).unwrap(); left.extend(&mut right).unwrap(); assert_eq!(left.as_slice(), &[(0, ()), (1, ())]); assert_eq!(right.as_slice(), &[]); let mut zero_map = VecMap::default(); zero_map.append(0, ()).unwrap(); left.extend(&mut zero_map).unwrap_err(); assert_eq!(left.as_slice(), &[(0, ()), (1, ())]); assert_eq!(zero_map.as_slice(), &[(0, ())]); let mut one_map = VecMap::default(); one_map.append(1, ()).unwrap(); left.extend(&mut one_map).unwrap_err(); assert_eq!(left.as_slice(), &[(0, ()), (1, ())]); assert_eq!(one_map.as_slice(), &[(1, ())]); let mut map_greater_or_equal = VecMap::new(VecMapOrdering::GreaterOrEqual); map_greater_or_equal.append(2, ()).unwrap(); map_greater_or_equal.append(2, ()).unwrap(); left.extend(&mut map_greater_or_equal).unwrap_err(); assert_eq!(left.as_slice(), &[(0, ()), (1, ())]); assert_eq!(map_greater_or_equal.as_slice(), &[(2, ()), (2, ())]); } #[test] fn extend_with_ordering() { let mut left = VecMap::new(VecMapOrdering::GreaterOrEqual); left.append(0, ()).unwrap(); assert_eq!(left.as_slice(), &[(0, ())]); let mut greater_right = VecMap::new(VecMapOrdering::Greater); greater_right.append(0, ()).unwrap(); left.extend(&mut greater_right).unwrap_err(); assert_eq!(left.as_slice(), &[(0, ())]); let mut greater_or_equal_right = VecMap::new(VecMapOrdering::GreaterOrEqual); greater_or_equal_right.append(2, ()).unwrap(); greater_or_equal_right.append(2, ()).unwrap(); left.extend(&mut greater_or_equal_right).unwrap(); assert_eq!(left.as_slice(), &[(0, ()), (2, ()), (2, ())]); } #[test] fn vec_map_from_sorted() { let vec = vec![(1, ()), (2, ()), (3, ()), (6, ())]; let vec_map = VecMap::from_iter(vec, VecMapOrdering::Greater); assert_eq!(vec_map.as_slice(), &[(1, ()), (2, ()), (3, ()), (6, ())]); let vec = vec![(1, ()), (2, ()), (3, ()), (3, ()), (6, ()), (6, ())]; let vec_map = VecMap::from_iter(vec, VecMapOrdering::GreaterOrEqual); assert_eq!( vec_map.as_slice(), &[(1, ()), (2, ()), (3, ()), (3, ()), (6, ()), (6, ())] ); } #[test] #[should_panic] fn vec_map_from_unsorted_greater() { let vec = vec![(1, ()), (2, ()), (2, ()), (3, ()), (6, ())]; let _ = VecMap::from_iter(vec, VecMapOrdering::Greater); } #[test] #[should_panic] fn vec_map_from_unsorted_greater_or_equal() { let vec = vec![(1, ()), (2, ()), (3, ()), (6, ()), (5, ())]; let _ = VecMap::from_iter(vec, VecMapOrdering::GreaterOrEqual); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/pageserver_feedback.rs
libs/utils/src/pageserver_feedback.rs
use std::time::{Duration, SystemTime}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use pq_proto::{PG_EPOCH, read_cstr}; use serde::{Deserialize, Serialize}; use tracing::{trace, warn}; use crate::lsn::Lsn; /// Feedback pageserver sends to safekeeper and safekeeper resends to compute. /// /// Serialized in custom flexible key/value format. In replication protocol, it /// is marked with NEON_STATUS_UPDATE_TAG_BYTE to differentiate from postgres /// Standby status update / Hot standby feedback messages. /// /// serde Serialize is used only for human readable dump to json (e.g. in /// safekeepers debug_dump). #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub struct PageserverFeedback { /// Last known size of the timeline. Used to enforce timeline size limit. pub current_timeline_size: u64, /// LSN last received and ingested by the pageserver. Controls backpressure. pub last_received_lsn: Lsn, /// LSN up to which data is persisted by the pageserver to its local disc. /// Controls backpressure. pub disk_consistent_lsn: Lsn, /// LSN up to which data is persisted by the pageserver on s3; safekeepers /// consider WAL before it can be removed. pub remote_consistent_lsn: Lsn, // Serialize with RFC3339 format. #[serde(with = "serde_systemtime")] pub replytime: SystemTime, /// Used to track feedbacks from different shards. Always zero for unsharded tenants. pub shard_number: u32, /// If true, the pageserver has detected corruption and the safekeeper and postgres /// should stop sending WAL. pub corruption_detected: bool, } impl PageserverFeedback { pub fn empty() -> PageserverFeedback { PageserverFeedback { current_timeline_size: 0, last_received_lsn: Lsn::INVALID, remote_consistent_lsn: Lsn::INVALID, disk_consistent_lsn: Lsn::INVALID, replytime: *PG_EPOCH, shard_number: 0, corruption_detected: false, } } // Serialize PageserverFeedback using custom format // to support protocol extensibility. // // Following layout is used: // char - number of key-value pairs that follow. // // key-value pairs: // null-terminated string - key, // uint32 - value length in bytes // value itself // // TODO: change serialized fields names once all computes migrate to rename. pub fn serialize(&self, buf: &mut BytesMut) { let buf_ptr = buf.len(); buf.put_u8(0); // # of keys, will be filled later let mut nkeys = 0; nkeys += 1; buf.put_slice(b"current_timeline_size\0"); buf.put_i32(8); buf.put_u64(self.current_timeline_size); nkeys += 1; buf.put_slice(b"ps_writelsn\0"); buf.put_i32(8); buf.put_u64(self.last_received_lsn.0); nkeys += 1; buf.put_slice(b"ps_flushlsn\0"); buf.put_i32(8); buf.put_u64(self.disk_consistent_lsn.0); nkeys += 1; buf.put_slice(b"ps_applylsn\0"); buf.put_i32(8); buf.put_u64(self.remote_consistent_lsn.0); let timestamp = self .replytime .duration_since(*PG_EPOCH) .expect("failed to serialize pg_replytime earlier than PG_EPOCH") .as_micros() as i64; nkeys += 1; buf.put_slice(b"ps_replytime\0"); buf.put_i32(8); buf.put_i64(timestamp); if self.shard_number > 0 { nkeys += 1; buf.put_slice(b"shard_number\0"); buf.put_i32(4); buf.put_u32(self.shard_number); } if self.corruption_detected { nkeys += 1; buf.put_slice(b"corruption_detected\0"); buf.put_i32(1); buf.put_u8(1); } buf[buf_ptr] = nkeys; } // Deserialize PageserverFeedback message // TODO: change serialized fields names once all computes migrate to rename. pub fn parse(mut buf: Bytes) -> PageserverFeedback { let mut rf = PageserverFeedback::empty(); let nfields = buf.get_u8(); for _ in 0..nfields { let key = read_cstr(&mut buf).unwrap(); match key.as_ref() { b"current_timeline_size" => { let len = buf.get_i32(); assert_eq!(len, 8); rf.current_timeline_size = buf.get_u64(); } b"ps_writelsn" => { let len = buf.get_i32(); assert_eq!(len, 8); rf.last_received_lsn = Lsn(buf.get_u64()); } b"ps_flushlsn" => { let len = buf.get_i32(); assert_eq!(len, 8); rf.disk_consistent_lsn = Lsn(buf.get_u64()); } b"ps_applylsn" => { let len = buf.get_i32(); assert_eq!(len, 8); rf.remote_consistent_lsn = Lsn(buf.get_u64()); } b"ps_replytime" => { let len = buf.get_i32(); assert_eq!(len, 8); let raw_time = buf.get_i64(); if raw_time > 0 { rf.replytime = *PG_EPOCH + Duration::from_micros(raw_time as u64); } else { rf.replytime = *PG_EPOCH - Duration::from_micros(-raw_time as u64); } } b"shard_number" => { let len = buf.get_i32(); assert_eq!(len, 4); rf.shard_number = buf.get_u32(); } b"corruption_detected" => { let len = buf.get_i32(); assert_eq!(len, 1); rf.corruption_detected = buf.get_u8() != 0; } _ => { let len = buf.get_i32(); warn!( "PageserverFeedback parse. unknown key {} of len {len}. Skip it.", String::from_utf8_lossy(key.as_ref()) ); buf.advance(len as usize); } } } trace!("PageserverFeedback parsed is {:?}", rf); rf } } mod serde_systemtime { use std::time::SystemTime; use chrono::{DateTime, Utc}; use serde::{Deserialize, Deserializer, Serializer}; pub fn serialize<S>(ts: &SystemTime, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let chrono_dt: DateTime<Utc> = (*ts).into(); serializer.serialize_str(&chrono_dt.to_rfc3339()) } pub fn deserialize<'de, D>(deserializer: D) -> Result<SystemTime, D::Error> where D: Deserializer<'de>, { let time: String = Deserialize::deserialize(deserializer)?; Ok(DateTime::parse_from_rfc3339(&time) .map_err(serde::de::Error::custom)? .into()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_replication_feedback_serialization() { let mut rf = PageserverFeedback::empty(); // Fill rf with some values rf.current_timeline_size = 12345678; // Set rounded time to be able to compare it with deserialized value, // because it is rounded up to microseconds during serialization. rf.replytime = *PG_EPOCH + Duration::from_secs(100_000_000); let mut data = BytesMut::new(); rf.serialize(&mut data); let rf_parsed = PageserverFeedback::parse(data.freeze()); assert_eq!(rf, rf_parsed); } // Test that databricks-specific fields added to the PageserverFeedback message are serialized // and deserialized correctly, in addition to the existing fields from upstream. #[test] fn test_replication_feedback_databricks_fields() { let mut rf = PageserverFeedback::empty(); rf.current_timeline_size = 12345678; rf.last_received_lsn = Lsn(23456789); rf.disk_consistent_lsn = Lsn(34567890); rf.remote_consistent_lsn = Lsn(45678901); rf.replytime = *PG_EPOCH + Duration::from_secs(100_000_000); rf.shard_number = 1; rf.corruption_detected = true; let mut data = BytesMut::new(); rf.serialize(&mut data); let rf_parsed = PageserverFeedback::parse(data.freeze()); assert_eq!(rf, rf_parsed); } #[test] fn test_replication_feedback_unknown_key() { let mut rf = PageserverFeedback::empty(); // Fill rf with some values rf.current_timeline_size = 12345678; // Set rounded time to be able to compare it with deserialized value, // because it is rounded up to microseconds during serialization. rf.replytime = *PG_EPOCH + Duration::from_secs(100_000_000); let mut data = BytesMut::new(); rf.serialize(&mut data); // Add an extra field to the buffer and adjust number of keys data[0] += 1; data.put_slice(b"new_field_one\0"); data.put_i32(8); data.put_u64(42); // Parse serialized data and check that new field is not parsed let rf_parsed = PageserverFeedback::parse(data.freeze()); assert_eq!(rf, rf_parsed); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/bin_ser.rs
libs/utils/src/bin_ser.rs
//! Utilities for binary serialization/deserialization. //! //! The [`BeSer`] trait allows us to define data structures //! that can match data structures that are sent over the wire //! in big-endian form with no packing. //! //! The [`LeSer`] trait does the same thing, in little-endian form. //! //! Note: you will get a compile error if you try to `use` both traits //! in the same module or scope. This is intended to be a safety //! mechanism: mixing big-endian and little-endian encoding in the same file //! is error-prone. #![warn(missing_docs)] use std::io::{self, Read, Write}; use bincode::Options; use serde::Serialize; use serde::de::DeserializeOwned; use thiserror::Error; /// An error that occurred during a deserialize operation /// /// This could happen because the input data was too short, /// or because an invalid value was encountered. #[derive(Debug, Error)] pub enum DeserializeError { /// The deserializer isn't able to deserialize the supplied data. #[error("deserialize error")] BadInput, /// While deserializing from a `Read` source, an `io::Error` occurred. #[error("deserialize error: {0}")] Io(io::Error), } impl From<bincode::Error> for DeserializeError { fn from(e: bincode::Error) -> Self { match *e { bincode::ErrorKind::Io(io_err) => DeserializeError::Io(io_err), _ => DeserializeError::BadInput, } } } /// An error that occurred during a serialize operation /// /// This probably means our [`Write`] failed, e.g. we tried /// to write beyond the end of a buffer. #[derive(Debug, Error)] pub enum SerializeError { /// The serializer isn't able to serialize the supplied data. #[error("serialize error")] BadInput, /// While serializing into a `Write` sink, an `io::Error` occurred. #[error("serialize error: {0}")] Io(io::Error), } impl From<bincode::Error> for SerializeError { fn from(e: bincode::Error) -> Self { match *e { bincode::ErrorKind::Io(io_err) => SerializeError::Io(io_err), _ => SerializeError::BadInput, } } } /// A shortcut that configures big-endian binary serialization /// /// Properties: /// - Big endian /// - Fixed integer encoding (i.e. 1u32 is 00000001 not 01) /// /// Does not allow trailing bytes in deserialization. If this is desired, you /// may set [`Options::allow_trailing_bytes`] to explicitly accommodate this. pub fn be_coder() -> impl Options { bincode::DefaultOptions::new() .with_big_endian() .with_fixint_encoding() } /// A shortcut that configures little-ending binary serialization /// /// Properties: /// - Little endian /// - Fixed integer encoding (i.e. 1u32 is 00000001 not 01) /// /// Does not allow trailing bytes in deserialization. If this is desired, you /// may set [`Options::allow_trailing_bytes`] to explicitly accommodate this. pub fn le_coder() -> impl Options { bincode::DefaultOptions::new() .with_little_endian() .with_fixint_encoding() } /// Binary serialize/deserialize helper functions (Big Endian) /// pub trait BeSer { /// Serialize into a byte slice fn ser_into_slice(&self, mut b: &mut [u8]) -> Result<(), SerializeError> where Self: Serialize, { // &mut [u8] implements Write, but `ser_into` needs a mutable // reference to that. So we need the slightly awkward "mutable // reference to a mutable reference. self.ser_into(&mut b) } /// Serialize into a borrowed writer /// /// This is useful for most `Write` types except `&mut [u8]`, which /// can more easily use [`ser_into_slice`](Self::ser_into_slice). fn ser_into<W: Write>(&self, w: &mut W) -> Result<(), SerializeError> where Self: Serialize, { be_coder().serialize_into(w, &self).map_err(|e| e.into()) } /// Serialize into a new heap-allocated buffer fn ser(&self) -> Result<Vec<u8>, SerializeError> where Self: Serialize, { be_coder().serialize(&self).map_err(|e| e.into()) } /// Deserialize from the full contents of a byte slice /// /// See also: [`BeSer::des_prefix`] fn des(buf: &[u8]) -> Result<Self, DeserializeError> where Self: DeserializeOwned, { be_coder() .deserialize(buf) .or(Err(DeserializeError::BadInput)) } /// Deserialize from a prefix of the byte slice /// /// Uses as much of the byte slice as is necessary to deserialize the /// type, but does not guarantee that the entire slice is used. /// /// See also: [`BeSer::des`] fn des_prefix(buf: &[u8]) -> Result<Self, DeserializeError> where Self: DeserializeOwned, { be_coder() .allow_trailing_bytes() .deserialize(buf) .or(Err(DeserializeError::BadInput)) } /// Deserialize from a reader fn des_from<R: Read>(r: &mut R) -> Result<Self, DeserializeError> where Self: DeserializeOwned, { be_coder().deserialize_from(r).map_err(|e| e.into()) } /// Compute the serialized size of a data structure /// /// Note: it may be faster to serialize to a buffer and then measure the /// buffer length, than to call `serialized_size` and then `ser_into`. fn serialized_size(&self) -> Result<u64, SerializeError> where Self: Serialize, { be_coder().serialized_size(self).map_err(|e| e.into()) } } /// Binary serialize/deserialize helper functions (Little Endian) /// pub trait LeSer { /// Serialize into a byte slice fn ser_into_slice(&self, mut b: &mut [u8]) -> Result<(), SerializeError> where Self: Serialize, { // &mut [u8] implements Write, but `ser_into` needs a mutable // reference to that. So we need the slightly awkward "mutable // reference to a mutable reference. self.ser_into(&mut b) } /// Serialize into a borrowed writer /// /// This is useful for most `Write` types except `&mut [u8]`, which /// can more easily use [`ser_into_slice`](Self::ser_into_slice). fn ser_into<W: Write>(&self, w: &mut W) -> Result<(), SerializeError> where Self: Serialize, { le_coder().serialize_into(w, &self).map_err(|e| e.into()) } /// Serialize into a new heap-allocated buffer fn ser(&self) -> Result<Vec<u8>, SerializeError> where Self: Serialize, { le_coder().serialize(&self).map_err(|e| e.into()) } /// Deserialize from the full contents of a byte slice /// /// See also: [`LeSer::des_prefix`] fn des(buf: &[u8]) -> Result<Self, DeserializeError> where Self: DeserializeOwned, { le_coder() .deserialize(buf) .or(Err(DeserializeError::BadInput)) } /// Deserialize from a prefix of the byte slice /// /// Uses as much of the byte slice as is necessary to deserialize the /// type, but does not guarantee that the entire slice is used. /// /// See also: [`LeSer::des`] fn des_prefix(buf: &[u8]) -> Result<Self, DeserializeError> where Self: DeserializeOwned, { le_coder() .allow_trailing_bytes() .deserialize(buf) .or(Err(DeserializeError::BadInput)) } /// Deserialize from a reader fn des_from<R: Read>(r: &mut R) -> Result<Self, DeserializeError> where Self: DeserializeOwned, { le_coder().deserialize_from(r).map_err(|e| e.into()) } /// Compute the serialized size of a data structure /// /// Note: it may be faster to serialize to a buffer and then measure the /// buffer length, than to call `serialized_size` and then `ser_into`. fn serialized_size(&self) -> Result<u64, SerializeError> where Self: Serialize, { le_coder().serialized_size(self).map_err(|e| e.into()) } } // Because usage of `BeSer` or `LeSer` can be done with *either* a Serialize or // DeserializeOwned implementation, the blanket implementation has to be for every type. impl<T> BeSer for T {} impl<T> LeSer for T {} #[cfg(test)] mod tests { use std::io::Cursor; use serde::{Deserialize, Serialize}; use super::DeserializeError; #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct ShortStruct { a: u8, b: u32, } const SHORT1: ShortStruct = ShortStruct { a: 7, b: 65536 }; const SHORT1_ENC_BE: &[u8] = &[7, 0, 1, 0, 0]; const SHORT1_ENC_BE_TRAILING: &[u8] = &[7, 0, 1, 0, 0, 255, 255, 255]; const SHORT1_ENC_LE: &[u8] = &[7, 0, 0, 1, 0]; const SHORT1_ENC_LE_TRAILING: &[u8] = &[7, 0, 0, 1, 0, 255, 255, 255]; const SHORT2: ShortStruct = ShortStruct { a: 8, b: 0x07030000, }; const SHORT2_ENC_BE: &[u8] = &[8, 7, 3, 0, 0]; const SHORT2_ENC_BE_TRAILING: &[u8] = &[8, 7, 3, 0, 0, 0xff, 0xff, 0xff]; const SHORT2_ENC_LE: &[u8] = &[8, 0, 0, 3, 7]; const SHORT2_ENC_LE_TRAILING: &[u8] = &[8, 0, 0, 3, 7, 0xff, 0xff, 0xff]; #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct NewTypeStruct(u32); const NT1: NewTypeStruct = NewTypeStruct(414243); const NT1_INNER: u32 = 414243; #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct LongMsg { pub tag: u8, pub blockpos: u32, pub last_flush_position: u64, pub apply: u64, pub timestamp: i64, pub reply_requested: u8, } const LONG1: LongMsg = LongMsg { tag: 42, blockpos: 0x1000_2000, last_flush_position: 0x1234_2345_3456_4567, apply: 0x9876_5432_10FE_DCBA, timestamp: 0x7788_99AA_BBCC_DDFF, reply_requested: 1, }; #[test] fn be_short() { use super::BeSer; assert_eq!(SHORT1.serialized_size().unwrap(), 5); let encoded = SHORT1.ser().unwrap(); assert_eq!(encoded, SHORT1_ENC_BE); let decoded = ShortStruct::des(SHORT2_ENC_BE).unwrap(); assert_eq!(decoded, SHORT2); // with trailing data let decoded = ShortStruct::des_prefix(SHORT2_ENC_BE_TRAILING).unwrap(); assert_eq!(decoded, SHORT2); let err = ShortStruct::des(SHORT2_ENC_BE_TRAILING).unwrap_err(); assert!(matches!(err, DeserializeError::BadInput)); // serialize into a `Write` sink. let mut buf = Cursor::new(vec![0xFF; 8]); SHORT1.ser_into(&mut buf).unwrap(); assert_eq!(buf.into_inner(), SHORT1_ENC_BE_TRAILING); // deserialize from a `Write` sink. let mut buf = Cursor::new(SHORT2_ENC_BE); let decoded = ShortStruct::des_from(&mut buf).unwrap(); assert_eq!(decoded, SHORT2); // deserialize from a `Write` sink that terminates early. let mut buf = Cursor::new([0u8; 4]); let err = ShortStruct::des_from(&mut buf).unwrap_err(); assert!(matches!(err, DeserializeError::Io(_))); } #[test] fn le_short() { use super::LeSer; assert_eq!(SHORT1.serialized_size().unwrap(), 5); let encoded = SHORT1.ser().unwrap(); assert_eq!(encoded, SHORT1_ENC_LE); let decoded = ShortStruct::des(SHORT2_ENC_LE).unwrap(); assert_eq!(decoded, SHORT2); // with trailing data let decoded = ShortStruct::des_prefix(SHORT2_ENC_LE_TRAILING).unwrap(); assert_eq!(decoded, SHORT2); let err = ShortStruct::des(SHORT2_ENC_LE_TRAILING).unwrap_err(); assert!(matches!(err, DeserializeError::BadInput)); // serialize into a `Write` sink. let mut buf = Cursor::new(vec![0xFF; 8]); SHORT1.ser_into(&mut buf).unwrap(); assert_eq!(buf.into_inner(), SHORT1_ENC_LE_TRAILING); // deserialize from a `Write` sink. let mut buf = Cursor::new(SHORT2_ENC_LE); let decoded = ShortStruct::des_from(&mut buf).unwrap(); assert_eq!(decoded, SHORT2); // deserialize from a `Write` sink that terminates early. let mut buf = Cursor::new([0u8; 4]); let err = ShortStruct::des_from(&mut buf).unwrap_err(); assert!(matches!(err, DeserializeError::Io(_))); } #[test] fn be_long() { use super::BeSer; assert_eq!(LONG1.serialized_size().unwrap(), 30); let msg = LONG1; let encoded = msg.ser().unwrap(); let expected = hex_literal::hex!( "2A 1000 2000 1234 2345 3456 4567 9876 5432 10FE DCBA 7788 99AA BBCC DDFF 01" ); assert_eq!(encoded, expected); let msg2 = LongMsg::des(&encoded).unwrap(); assert_eq!(msg, msg2); } #[test] fn le_long() { use super::LeSer; assert_eq!(LONG1.serialized_size().unwrap(), 30); let msg = LONG1; let encoded = msg.ser().unwrap(); let expected = hex_literal::hex!( "2A 0020 0010 6745 5634 4523 3412 BADC FE10 3254 7698 FFDD CCBB AA99 8877 01" ); assert_eq!(encoded, expected); let msg2 = LongMsg::des(&encoded).unwrap(); assert_eq!(msg, msg2); } #[test] /// Ensure that newtype wrappers around u32 don't change the serialization format fn be_nt() { use super::BeSer; assert_eq!(NT1.serialized_size().unwrap(), 4); let msg = NT1; let encoded = msg.ser().unwrap(); let expected = hex_literal::hex!("0006 5223"); assert_eq!(encoded, expected); assert_eq!(encoded, NT1_INNER.ser().unwrap()); let msg2 = NewTypeStruct::des(&encoded).unwrap(); assert_eq!(msg, msg2); } #[test] /// Ensure that newtype wrappers around u32 don't change the serialization format fn le_nt() { use super::LeSer; assert_eq!(NT1.serialized_size().unwrap(), 4); let msg = NT1; let encoded = msg.ser().unwrap(); let expected = hex_literal::hex!("2352 0600"); assert_eq!(encoded, expected); assert_eq!(encoded, NT1_INNER.ser().unwrap()); let msg2 = NewTypeStruct::des(&encoded).unwrap(); assert_eq!(msg, msg2); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/ip_address.rs
libs/utils/src/ip_address.rs
use std::env::{VarError, var}; use std::error::Error; use std::net::IpAddr; use std::str::FromStr; /// Name of the environment variable containing the reachable IP address of the node. If set, the IP address contained in this /// environment variable is used as the reachable IP address of the pageserver or safekeeper node during node registration. /// In a Kubernetes environment, this environment variable should be set by Kubernetes to the Pod IP (specified in the Pod /// template). pub const HADRON_NODE_IP_ADDRESS: &str = "HADRON_NODE_IP_ADDRESS"; /// Read the reachable IP address of this page server from env var HADRON_NODE_IP_ADDRESS. /// In Kubernetes this environment variable is set to the Pod IP (specified in the Pod template). pub fn read_node_ip_addr_from_env() -> Result<Option<IpAddr>, Box<dyn Error>> { match var(HADRON_NODE_IP_ADDRESS) { Ok(v) => { if let Ok(addr) = IpAddr::from_str(&v) { Ok(Some(addr)) } else { Err(format!("Invalid IP address string: {v}. Cannot be parsed as either an IPv4 or an IPv6 address.").into()) } } Err(VarError::NotPresent) => Ok(None), Err(e) => Err(e.into()), } } #[cfg(test)] mod tests { use super::*; use std::env; use std::net::{Ipv4Addr, Ipv6Addr}; #[test] fn test_read_node_ip_addr_from_env() { // SAFETY: test code unsafe { // Test with a valid IPv4 address env::set_var(HADRON_NODE_IP_ADDRESS, "192.168.1.1"); let result = read_node_ip_addr_from_env().unwrap(); assert_eq!(result, Some(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)))); // Test with a valid IPv6 address env::set_var( HADRON_NODE_IP_ADDRESS, "2001:0db8:85a3:0000:0000:8a2e:0370:7334", ); } let result = read_node_ip_addr_from_env().unwrap(); assert_eq!( result, Some(IpAddr::V6( Ipv6Addr::from_str("2001:0db8:85a3:0000:0000:8a2e:0370:7334").unwrap() )) ); // Test with an invalid IP address // SAFETY: test code unsafe { env::set_var(HADRON_NODE_IP_ADDRESS, "invalid_ip"); } let result = read_node_ip_addr_from_env(); assert!(result.is_err()); // Test with no environment variable set // SAFETY: test code unsafe { env::remove_var(HADRON_NODE_IP_ADDRESS); } let result = read_node_ip_addr_from_env().unwrap(); assert_eq!(result, None); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/tracing_span_assert.rs
libs/utils/src/tracing_span_assert.rs
//! Assert that the current [`tracing::Span`] has a given set of fields. //! //! Can only produce meaningful positive results when tracing has been configured as in example. //! Absence of `tracing_error::ErrorLayer` is not detected yet. //! //! `#[cfg(test)]` code will get a pass when using the `check_fields_present` macro in case tracing //! is completly unconfigured. //! //! # Usage //! //! ```rust //! # fn main() { //! use tracing_subscriber::prelude::*; //! let registry = tracing_subscriber::registry() //! .with(tracing_error::ErrorLayer::default()); //! //! // Register the registry as the global subscriber. //! // In this example, we'll only use it as a thread-local subscriber. //! let _guard = tracing::subscriber::set_default(registry); //! //! // Then, in the main code: //! //! let span = tracing::info_span!("TestSpan", tenant_id = 1); //! let _guard = span.enter(); //! //! // ... down the call stack //! //! use utils::tracing_span_assert::{check_fields_present, ConstExtractor}; //! let extractor = ConstExtractor::new("tenant_id"); //! if let Err(missing) = check_fields_present!([&extractor]) { //! // if you copypaste this to a custom assert method, remember to add #[track_caller] //! // to get the "user" code location for the panic. //! panic!("Missing fields: {missing:?}"); //! } //! # } //! ``` //! //! Recommended reading: <https://docs.rs/tracing-subscriber/0.3.16/tracing_subscriber/layer/index.html#per-layer-filtering> //! #[derive(Debug)] pub enum ExtractionResult { Present, Absent, } pub trait Extractor: Send + Sync + std::fmt::Debug { fn id(&self) -> &str; fn extract(&self, fields: &tracing::field::FieldSet) -> ExtractionResult; } #[derive(Debug)] pub struct ConstExtractor { field_name: &'static str, } impl ConstExtractor { pub const fn new(field_name: &'static str) -> ConstExtractor { ConstExtractor { field_name } } } impl Extractor for ConstExtractor { fn id(&self) -> &str { self.field_name } fn extract(&self, fields: &tracing::field::FieldSet) -> ExtractionResult { if fields.iter().any(|f| f.name() == self.field_name) { ExtractionResult::Present } else { ExtractionResult::Absent } } } /// Checks that the given extractors are satisfied with the current span hierarchy. /// /// This should not be called directly, but used through [`check_fields_present`] which allows /// `Summary::Unconfigured` only when the calling crate is being `#[cfg(test)]` as a conservative default. #[doc(hidden)] pub fn check_fields_present0<const L: usize>( must_be_present: [&dyn Extractor; L], ) -> Result<Summary, Vec<&dyn Extractor>> { let mut missing = must_be_present.into_iter().collect::<Vec<_>>(); let trace = tracing_error::SpanTrace::capture(); trace.with_spans(|md, _formatted_fields| { // when trying to understand the inner workings of how does the matching work, note that // this closure might be called zero times if the span is disabled. normally it is called // once per span hierarchy level. missing.retain(|extractor| match extractor.extract(md.fields()) { ExtractionResult::Present => false, ExtractionResult::Absent => true, }); // continue walking up until we've found all missing !missing.is_empty() }); if missing.is_empty() { Ok(Summary::FoundEverything) } else if !tracing_subscriber_configured() { Ok(Summary::Unconfigured) } else { // we can still hit here if a tracing subscriber has been configured but the ErrorLayer is // missing, which can be annoying. for this case, we could probably use // SpanTrace::status(). // // another way to end up here is with RUST_LOG=pageserver=off while configuring the // logging, though I guess in that case the SpanTrace::status() == EMPTY would be valid. // this case is covered by test `not_found_if_tracing_error_subscriber_has_wrong_filter`. Err(missing) } } /// Checks that the given extractors are satisfied with the current span hierarchy. /// /// The macro is the preferred way of checking if fields exist while passing checks if a test does /// not have tracing configured. /// /// Why mangled name? Because #[macro_export] will expose it at utils::__check_fields_present. /// However we can game a module namespaced macro for `use` purposes by re-exporting the /// #[macro_export] exported name with an alias (below). #[doc(hidden)] #[macro_export] macro_rules! __check_fields_present { ($extractors:expr) => {{ { use $crate::tracing_span_assert::{check_fields_present0, Summary::*, Extractor}; match check_fields_present0($extractors) { Ok(FoundEverything) => Ok(()), Ok(Unconfigured) if cfg!(feature = "testing") => { // allow unconfigured in tests Ok(()) }, Ok(Unconfigured) => { panic!(r#"utils::tracing_span_assert: outside of #[cfg(feature = "testing")] expected tracing to be configured with tracing_error::ErrorLayer"#) }, Err(missing) => Err(missing) } } }} } pub use crate::__check_fields_present as check_fields_present; /// Explanation for why the check was deemed ok. /// /// Mainly useful for testing, or configuring per-crate behaviour as in with /// [`check_fields_present`]. #[derive(Debug)] pub enum Summary { /// All extractors were found. /// /// Should only happen when tracing is properly configured. FoundEverything, /// Tracing has not been configured at all. This is ok for tests running without tracing set /// up. Unconfigured, } fn tracing_subscriber_configured() -> bool { let mut noop_configured = false; tracing::dispatcher::get_default(|d| { // it is possible that this closure will not be invoked, but the current implementation // always invokes it noop_configured = d.is::<tracing::subscriber::NoSubscriber>(); }); !noop_configured } #[cfg(test)] mod tests { use std::collections::HashSet; use std::fmt::{self}; use std::hash::{Hash, Hasher}; use tracing_subscriber::prelude::*; use super::*; struct MemoryIdentity<'a>(&'a dyn Extractor); impl MemoryIdentity<'_> { fn as_ptr(&self) -> *const () { self.0 as *const _ as *const () } } impl PartialEq for MemoryIdentity<'_> { fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } } impl Eq for MemoryIdentity<'_> {} impl Hash for MemoryIdentity<'_> { fn hash<H: Hasher>(&self, state: &mut H) { self.as_ptr().hash(state); } } impl fmt::Debug for MemoryIdentity<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:p}: {}", self.as_ptr(), self.0.id()) } } struct Setup { _current_thread_subscriber_guard: tracing::subscriber::DefaultGuard, tenant_extractor: ConstExtractor, timeline_extractor: ConstExtractor, } fn setup_current_thread() -> Setup { let tenant_extractor = ConstExtractor::new("tenant_id"); let timeline_extractor = ConstExtractor::new("timeline_id"); let registry = tracing_subscriber::registry() .with(tracing_subscriber::fmt::layer()) .with(tracing_error::ErrorLayer::default()); let guard = tracing::subscriber::set_default(registry); Setup { _current_thread_subscriber_guard: guard, tenant_extractor, timeline_extractor, } } fn assert_missing(missing: Vec<&dyn Extractor>, expected: Vec<&dyn Extractor>) { let missing: HashSet<MemoryIdentity> = HashSet::from_iter(missing.into_iter().map(MemoryIdentity)); let expected: HashSet<MemoryIdentity> = HashSet::from_iter(expected.into_iter().map(MemoryIdentity)); assert_eq!(missing, expected); } #[test] fn positive_one_level() { let setup = setup_current_thread(); let span = tracing::info_span!("root", tenant_id = "tenant-1", timeline_id = "timeline-1"); let _guard = span.enter(); let res = check_fields_present0([&setup.tenant_extractor, &setup.timeline_extractor]); assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}"); } #[test] fn negative_one_level() { let setup = setup_current_thread(); let span = tracing::info_span!("root", timeline_id = "timeline-1"); let _guard = span.enter(); let missing = check_fields_present0([&setup.tenant_extractor, &setup.timeline_extractor]) .unwrap_err(); assert_missing(missing, vec![&setup.tenant_extractor]); } #[test] fn positive_multiple_levels() { let setup = setup_current_thread(); let span = tracing::info_span!("root"); let _guard = span.enter(); let span = tracing::info_span!("child", tenant_id = "tenant-1"); let _guard = span.enter(); let span = tracing::info_span!("grandchild", timeline_id = "timeline-1"); let _guard = span.enter(); let res = check_fields_present0([&setup.tenant_extractor, &setup.timeline_extractor]); assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}"); } #[test] fn negative_multiple_levels() { let setup = setup_current_thread(); let span = tracing::info_span!("root"); let _guard = span.enter(); let span = tracing::info_span!("child", timeline_id = "timeline-1"); let _guard = span.enter(); let missing = check_fields_present0([&setup.tenant_extractor]).unwrap_err(); assert_missing(missing, vec![&setup.tenant_extractor]); } #[test] fn positive_subset_one_level() { let setup = setup_current_thread(); let span = tracing::info_span!("root", tenant_id = "tenant-1", timeline_id = "timeline-1"); let _guard = span.enter(); let res = check_fields_present0([&setup.tenant_extractor]); assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}"); } #[test] fn positive_subset_multiple_levels() { let setup = setup_current_thread(); let span = tracing::info_span!("root"); let _guard = span.enter(); let span = tracing::info_span!("child", tenant_id = "tenant-1"); let _guard = span.enter(); let span = tracing::info_span!("grandchild", timeline_id = "timeline-1"); let _guard = span.enter(); let res = check_fields_present0([&setup.tenant_extractor]); assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}"); } #[test] fn negative_subset_one_level() { let setup = setup_current_thread(); let span = tracing::info_span!("root", timeline_id = "timeline-1"); let _guard = span.enter(); let missing = check_fields_present0([&setup.tenant_extractor]).unwrap_err(); assert_missing(missing, vec![&setup.tenant_extractor]); } #[test] fn negative_subset_multiple_levels() { let setup = setup_current_thread(); let span = tracing::info_span!("root"); let _guard = span.enter(); let span = tracing::info_span!("child", timeline_id = "timeline-1"); let _guard = span.enter(); let missing = check_fields_present0([&setup.tenant_extractor]).unwrap_err(); assert_missing(missing, vec![&setup.tenant_extractor]); } #[test] fn tracing_error_subscriber_not_set_up_straight_line() { // no setup let span = tracing::info_span!("foo", e = "some value"); let _guard = span.enter(); let extractor = ConstExtractor::new("e"); let res = check_fields_present0([&extractor]); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); // similarly for a not found key let extractor = ConstExtractor::new("foobar"); let res = check_fields_present0([&extractor]); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); } #[test] fn tracing_error_subscriber_not_set_up_with_instrument() { // no setup // demo a case where span entering is used to establish a parent child connection, but // when we re-enter the subspan SpanTrace::with_spans iterates over nothing. let span = tracing::info_span!("foo", e = "some value"); let _guard = span.enter(); let subspan = tracing::info_span!("bar", f = "foobar"); drop(_guard); // normally this would work, but without any tracing-subscriber configured, both // check_field_present find nothing let _guard = subspan.enter(); let extractors: [&dyn Extractor; 2] = [&ConstExtractor::new("e"), &ConstExtractor::new("f")]; let res = check_fields_present0(extractors); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); // similarly for a not found key let extractor = ConstExtractor::new("g"); let res = check_fields_present0([&extractor]); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); } #[test] fn tracing_subscriber_configured() { // this will fail if any utils::logging::init callers appear, but let's hope they do not // appear. assert!(!super::tracing_subscriber_configured()); let _g = setup_current_thread(); assert!(super::tracing_subscriber_configured()); } #[test] fn not_found_when_disabled_by_filter() { let r = tracing_subscriber::registry().with({ tracing_error::ErrorLayer::default().with_filter(tracing_subscriber::filter::filter_fn( |md| !(md.is_span() && *md.level() == tracing::Level::INFO), )) }); let _guard = tracing::subscriber::set_default(r); // this test is a rather tricky one, it has a number of possible outcomes depending on the // execution order when executed with other tests even if no test sets the global default // subscriber. let span = tracing::info_span!("foo", e = "some value"); let _guard = span.enter(); let extractors: [&dyn Extractor; 1] = [&ConstExtractor::new("e")]; if span.is_disabled() { // the tests are running single threaded, or we got lucky and no other tests subscriber // was got to register their per-CALLSITE::META interest between `set_default` and // creation of the span, thus the filter got to apply and registered interest of Never, // so the span was never created. // // as the span is disabled, no keys were recorded to it, leading check_fields_present0 // to find an error. let missing = check_fields_present0(extractors).unwrap_err(); assert_missing(missing, vec![extractors[0]]); } else { // when the span is enabled, it is because some other test is running at the same time, // and that tests registry has filters which are interested in our above span. // // because the span is now enabled, all keys will be found for it. the // tracing_error::SpanTrace does not consider layer filters during the span hierarchy // walk (SpanTrace::with_spans), nor is the SpanTrace::status a reliable indicator in // this test-induced issue. let res = check_fields_present0(extractors); assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}"); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/logging.rs
libs/utils/src/logging.rs
use std::future::Future; use std::pin::Pin; use std::str::FromStr; use std::time::Duration; use anyhow::Context; use metrics::{IntCounter, IntCounterVec}; use once_cell::sync::Lazy; use strum_macros::{EnumString, VariantNames}; use tokio::time::Instant; use tracing::{info, warn}; /// Logs a critical error, similarly to `tracing::error!`. This will: /// /// * Emit an ERROR log message with prefix "CRITICAL:" and a backtrace. /// * Trigger a pageable alert (via the metric below). /// * Increment libmetrics_tracing_event_count{level="critical"}, and indirectly level="error". /// * In debug builds, panic the process. /// /// When including errors in the message, please use {err:?} to include the error cause and original /// backtrace. #[macro_export] macro_rules! critical { ($($arg:tt)*) => {{ if cfg!(debug_assertions) { panic!($($arg)*); } // Increment both metrics $crate::logging::TRACING_EVENT_COUNT_METRIC.inc_critical(); let backtrace = std::backtrace::Backtrace::capture(); tracing::error!("CRITICAL: {}\n{backtrace}", format!($($arg)*)); }}; } #[macro_export] macro_rules! critical_timeline { ($tenant_shard_id:expr, $timeline_id:expr, $corruption_detected:expr, $($arg:tt)*) => {{ if cfg!(debug_assertions) { panic!($($arg)*); } // Increment both metrics $crate::logging::TRACING_EVENT_COUNT_METRIC.inc_critical(); $crate::logging::HADRON_CRITICAL_STORAGE_EVENT_COUNT_METRIC.inc(&$tenant_shard_id.to_string(), &$timeline_id.to_string()); if let Some(c) = $corruption_detected.as_ref() { c.store(true, std::sync::atomic::Ordering::Relaxed); } let backtrace = std::backtrace::Backtrace::capture(); tracing::error!("CRITICAL: [tenant_shard_id: {}, timeline_id: {}] {}\n{backtrace}", $tenant_shard_id, $timeline_id, format!($($arg)*)); }}; } #[derive(EnumString, strum_macros::Display, VariantNames, Eq, PartialEq, Debug, Clone, Copy)] #[strum(serialize_all = "snake_case")] pub enum LogFormat { Plain, Json, Test, } impl LogFormat { pub fn from_config(s: &str) -> anyhow::Result<LogFormat> { use strum::VariantNames; LogFormat::from_str(s).with_context(|| { format!( "Unrecognized log format. Please specify one of: {:?}", LogFormat::VARIANTS ) }) } } pub struct TracingEventCountMetric { /// CRITICAL is not a `tracing` log level. Instead, we increment it in the `critical!` macro, /// and also emit it as a regular error. These are thus double-counted, but that seems fine. critical: IntCounter, error: IntCounter, warn: IntCounter, info: IntCounter, debug: IntCounter, trace: IntCounter, } // Begin Hadron: Add a HadronCriticalStorageEventCountMetric metric that is sliced by tenant_id and timeline_id pub struct HadronCriticalStorageEventCountMetric { critical: IntCounterVec, } pub static HADRON_CRITICAL_STORAGE_EVENT_COUNT_METRIC: Lazy<HadronCriticalStorageEventCountMetric> = Lazy::new(|| { let vec = metrics::register_int_counter_vec!( "hadron_critical_storage_event_count", "Number of critical storage events, by tenant_id and timeline_id", &["tenant_shard_id", "timeline_id"] ) .expect("failed to define metric"); HadronCriticalStorageEventCountMetric::new(vec) }); impl HadronCriticalStorageEventCountMetric { fn new(vec: IntCounterVec) -> Self { Self { critical: vec } } // Allow public access from `critical!` macro. pub fn inc(&self, tenant_shard_id: &str, timeline_id: &str) { self.critical .with_label_values(&[tenant_shard_id, timeline_id]) .inc(); } } // End Hadron pub static TRACING_EVENT_COUNT_METRIC: Lazy<TracingEventCountMetric> = Lazy::new(|| { let vec = metrics::register_int_counter_vec!( "libmetrics_tracing_event_count", "Number of tracing events, by level", &["level"] ) .expect("failed to define metric"); TracingEventCountMetric::new(vec) }); impl TracingEventCountMetric { fn new(vec: IntCounterVec) -> Self { Self { critical: vec.with_label_values(&["critical"]), error: vec.with_label_values(&["error"]), warn: vec.with_label_values(&["warn"]), info: vec.with_label_values(&["info"]), debug: vec.with_label_values(&["debug"]), trace: vec.with_label_values(&["trace"]), } } // Allow public access from `critical!` macro. pub fn inc_critical(&self) { self.critical.inc(); } fn inc_for_level(&self, level: tracing::Level) { let counter = match level { tracing::Level::ERROR => &self.error, tracing::Level::WARN => &self.warn, tracing::Level::INFO => &self.info, tracing::Level::DEBUG => &self.debug, tracing::Level::TRACE => &self.trace, }; counter.inc(); } } struct TracingEventCountLayer(&'static TracingEventCountMetric); impl<S> tracing_subscriber::layer::Layer<S> for TracingEventCountLayer where S: tracing::Subscriber, { fn on_event( &self, event: &tracing::Event<'_>, _ctx: tracing_subscriber::layer::Context<'_, S>, ) { self.0.inc_for_level(*event.metadata().level()); } } /// Whether to add the `tracing_error` crate's `ErrorLayer` /// to the global tracing subscriber. /// pub enum TracingErrorLayerEnablement { /// Do not add the `ErrorLayer`. Disabled, /// Add the `ErrorLayer` with the filter specified by RUST_LOG, defaulting to `info` if `RUST_LOG` is unset. EnableWithRustLogFilter, } /// Where the logging should output to. #[derive(Clone, Copy)] pub enum Output { Stdout, Stderr, } pub fn init( log_format: LogFormat, tracing_error_layer_enablement: TracingErrorLayerEnablement, output: Output, ) -> anyhow::Result<()> { // We fall back to printing all spans at info-level or above if // the RUST_LOG environment variable is not set. let rust_log_env_filter = || { tracing_subscriber::EnvFilter::try_from_default_env() .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")) }; // NB: the order of the with() calls does not matter. // See https://docs.rs/tracing-subscriber/0.3.16/tracing_subscriber/layer/index.html#per-layer-filtering use tracing_subscriber::prelude::*; let r = tracing_subscriber::registry(); let r = r.with({ let log_layer = tracing_subscriber::fmt::layer() .with_target(false) .with_ansi(false) .with_writer(move || -> Box<dyn std::io::Write> { match output { Output::Stdout => Box::new(std::io::stdout()), Output::Stderr => Box::new(std::io::stderr()), } }); let log_layer = match log_format { LogFormat::Json => log_layer.json().boxed(), LogFormat::Plain => log_layer.boxed(), LogFormat::Test => log_layer.with_test_writer().boxed(), }; log_layer.with_filter(rust_log_env_filter()) }); let r = r.with( TracingEventCountLayer(&TRACING_EVENT_COUNT_METRIC).with_filter(rust_log_env_filter()), ); match tracing_error_layer_enablement { TracingErrorLayerEnablement::EnableWithRustLogFilter => r .with(tracing_error::ErrorLayer::default().with_filter(rust_log_env_filter())) .init(), TracingErrorLayerEnablement::Disabled => r.init(), } Ok(()) } /// Disable the default rust panic hook by using `set_hook`. /// /// For neon binaries, the assumption is that tracing is configured before with [`init`], after /// that sentry is configured (if needed). sentry will install it's own on top of this, always /// processing the panic before we log it. /// /// When the return value is dropped, the hook is reverted to std default hook (prints to stderr). /// If the assumptions about the initialization order are not held, use /// [`TracingPanicHookGuard::forget`] but keep in mind, if tracing is stopped, then panics will be /// lost. #[must_use] pub fn replace_panic_hook_with_tracing_panic_hook() -> TracingPanicHookGuard { std::panic::set_hook(Box::new(tracing_panic_hook)); TracingPanicHookGuard::new() } /// Drop guard which restores the std panic hook on drop. /// /// Tracing should not be used when it's not configured, but we cannot really latch on to any /// imaginary lifetime of tracing. pub struct TracingPanicHookGuard { act: bool, } impl TracingPanicHookGuard { fn new() -> Self { TracingPanicHookGuard { act: true } } /// Make this hook guard not do anything when dropped. pub fn forget(&mut self) { self.act = false; } } impl Drop for TracingPanicHookGuard { fn drop(&mut self) { if self.act { let _ = std::panic::take_hook(); } } } /// Named symbol for our panic hook, which logs the panic. fn tracing_panic_hook(info: &std::panic::PanicHookInfo) { // following rust 1.66.1 std implementation: // https://github.com/rust-lang/rust/blob/90743e7298aca107ddaa0c202a4d3604e29bfeb6/library/std/src/panicking.rs#L235-L288 let location = info.location(); let msg = match info.payload().downcast_ref::<&'static str>() { Some(s) => *s, None => match info.payload().downcast_ref::<String>() { Some(s) => &s[..], None => "Box<dyn Any>", }, }; let thread = std::thread::current(); let thread = thread.name().unwrap_or("<unnamed>"); let backtrace = std::backtrace::Backtrace::capture(); let _entered = if let Some(location) = location { tracing::error_span!("panic", %thread, location = %PrettyLocation(location)) } else { // very unlikely to hit here, but the guarantees of std could change tracing::error_span!("panic", %thread) } .entered(); if backtrace.status() == std::backtrace::BacktraceStatus::Captured { // this has an annoying extra '\n' in the end which anyhow doesn't do, but we cannot really // get rid of it as we cannot get in between of std::fmt::Formatter<'_>; we could format to // string, maybe even to a TLS one but tracing already does that. tracing::error!("{msg}\n\nStack backtrace:\n{backtrace}"); } else { tracing::error!("{msg}"); } // ensure that we log something on the panic if this hook is left after tracing has been // unconfigured. worst case when teardown is racing the panic is to log the panic twice. tracing::dispatcher::get_default(|d| { if let Some(_none) = d.downcast_ref::<tracing::subscriber::NoSubscriber>() { let location = location.map(PrettyLocation); log_panic_to_stderr(thread, msg, location, &backtrace); } }); } #[cold] fn log_panic_to_stderr( thread: &str, msg: &str, location: Option<PrettyLocation<'_, '_>>, backtrace: &std::backtrace::Backtrace, ) { eprintln!( "panic while tracing is unconfigured: thread '{thread}' panicked at '{msg}', {location:?}\nStack backtrace:\n{backtrace}" ); } struct PrettyLocation<'a, 'b>(&'a std::panic::Location<'b>); impl std::fmt::Display for PrettyLocation<'_, '_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}:{}:{}", self.0.file(), self.0.line(), self.0.column()) } } impl std::fmt::Debug for PrettyLocation<'_, '_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { <Self as std::fmt::Display>::fmt(self, f) } } /// When you will store a secret but want to make sure it won't /// be accidentally logged, wrap it in a SecretString, whose Debug /// implementation does not expose the contents. #[derive(Clone, Eq, PartialEq)] pub struct SecretString(String); impl SecretString { pub fn get_contents(&self) -> &str { self.0.as_str() } } impl From<String> for SecretString { fn from(s: String) -> Self { Self(s) } } impl FromStr for SecretString { type Err = std::convert::Infallible; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(Self(s.to_string())) } } impl std::fmt::Debug for SecretString { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "[SECRET]") } } /// Logs a periodic message if a future is slow to complete. /// /// This is performance-sensitive as it's used on the GetPage read path. /// /// TODO: consider upgrading this to a warning, but currently it fires too often. #[inline] pub async fn log_slow<O>( name: &str, threshold: Duration, f: Pin<&mut impl Future<Output = O>>, ) -> O { monitor_slow_future( threshold, threshold, // period = threshold f, |MonitorSlowFutureCallback { ready, is_slow, elapsed_total, elapsed_since_last_callback: _, }| { if !is_slow { return; } let elapsed = elapsed_total.as_secs_f64(); if ready { info!("slow {name} completed after {elapsed:.3}s"); } else { info!("slow {name} still running after {elapsed:.3}s"); } }, ) .await } /// Logs a periodic warning if a future is slow to complete. #[inline] pub async fn warn_slow<O>( name: &str, threshold: Duration, f: Pin<&mut impl Future<Output = O>>, ) -> O { monitor_slow_future( threshold, threshold, // period = threshold f, |MonitorSlowFutureCallback { ready, is_slow, elapsed_total, elapsed_since_last_callback: _, }| { if !is_slow { return; } let elapsed = elapsed_total.as_secs_f64(); if ready { warn!("slow {name} completed after {elapsed:.3}s"); } else { warn!("slow {name} still running after {elapsed:.3}s"); } }, ) .await } /// Poll future `fut` to completion, invoking callback `cb` at the given `threshold` and every /// `period` afterwards, and also unconditionally when the future completes. #[inline] pub async fn monitor_slow_future<F, O>( threshold: Duration, period: Duration, mut fut: Pin<&mut F>, mut cb: impl FnMut(MonitorSlowFutureCallback), ) -> O where F: Future<Output = O>, { let started = Instant::now(); let mut attempt = 1; let mut last_cb = started; loop { // NB: use timeout_at() instead of timeout() to avoid an extra clock reading in the common // case where the timeout doesn't fire. let deadline = started + threshold + (attempt - 1) * period; // TODO: still call the callback if the future panics? Copy how we do it for the page_service flush_in_progress counter. let res = tokio::time::timeout_at(deadline, &mut fut).await; let now = Instant::now(); let elapsed_total = now - started; cb(MonitorSlowFutureCallback { ready: res.is_ok(), is_slow: elapsed_total >= threshold, elapsed_total, elapsed_since_last_callback: now - last_cb, }); last_cb = now; if let Ok(output) = res { return output; } attempt += 1; } } /// See [`monitor_slow_future`]. pub struct MonitorSlowFutureCallback { /// Whether the future completed. If true, there will be no more callbacks. pub ready: bool, /// Whether the future is taking `>=` the specififed threshold duration to complete. /// Monotonic: if true in one callback invocation, true in all subsequent onces. pub is_slow: bool, /// The time elapsed since the [`monitor_slow_future`] was first polled. pub elapsed_total: Duration, /// The time elapsed since the last callback invocation. /// For the initial callback invocation, the time elapsed since the [`monitor_slow_future`] was first polled. pub elapsed_since_last_callback: Duration, } #[cfg(test)] mod tests { use metrics::IntCounterVec; use metrics::core::Opts; use crate::logging::{TracingEventCountLayer, TracingEventCountMetric}; #[test] fn tracing_event_count_metric() { let counter_vec = IntCounterVec::new(Opts::new("testmetric", "testhelp"), &["level"]).unwrap(); let metric = Box::leak(Box::new(TracingEventCountMetric::new(counter_vec.clone()))); let layer = TracingEventCountLayer(metric); use tracing_subscriber::prelude::*; tracing::subscriber::with_default(tracing_subscriber::registry().with(layer), || { tracing::trace!("foo"); tracing::debug!("foo"); tracing::info!("foo"); tracing::warn!("foo"); tracing::error!("foo"); }); assert_eq!(counter_vec.with_label_values(&["trace"]).get(), 1); assert_eq!(counter_vec.with_label_values(&["debug"]).get(), 1); assert_eq!(counter_vec.with_label_values(&["info"]).get(), 1); assert_eq!(counter_vec.with_label_values(&["warn"]).get(), 1); assert_eq!(counter_vec.with_label_values(&["error"]).get(), 1); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/fs_ext.rs
libs/utils/src/fs_ext.rs
/// Extensions to `std::fs` types. use std::{fs, io, path::Path}; use anyhow::Context; #[cfg(feature = "rename_noreplace")] mod rename_noreplace; #[cfg(feature = "rename_noreplace")] pub use rename_noreplace::rename_noreplace; pub trait PathExt { /// Returns an error if `self` is not a directory. fn is_empty_dir(&self) -> io::Result<bool>; } impl<P> PathExt for P where P: AsRef<Path>, { fn is_empty_dir(&self) -> io::Result<bool> { Ok(fs::read_dir(self)?.next().is_none()) } } pub async fn is_directory_empty(path: impl AsRef<Path>) -> anyhow::Result<bool> { let mut dir = tokio::fs::read_dir(&path) .await .context(format!("read_dir({})", path.as_ref().display()))?; Ok(dir.next_entry().await?.is_none()) } pub async fn list_dir(path: impl AsRef<Path>) -> anyhow::Result<Vec<String>> { let mut dir = tokio::fs::read_dir(&path) .await .context(format!("read_dir({})", path.as_ref().display()))?; let mut content = vec![]; while let Some(next) = dir.next_entry().await? { let file_name = next.file_name(); content.push(file_name.to_string_lossy().to_string()); } Ok(content) } pub fn ignore_not_found(e: io::Error) -> io::Result<()> { if e.kind() == io::ErrorKind::NotFound { Ok(()) } else { Err(e) } } pub fn ignore_absent_files<F>(fs_operation: F) -> io::Result<()> where F: Fn() -> io::Result<()>, { fs_operation().or_else(ignore_not_found) } #[cfg(test)] mod test { use super::ignore_absent_files; use crate::fs_ext::{is_directory_empty, list_dir}; #[test] fn is_empty_dir() { use super::PathExt; let dir = camino_tempfile::tempdir().unwrap(); let dir_path = dir.path(); // test positive case assert!( dir_path.is_empty_dir().expect("test failure"), "new tempdir should be empty" ); // invoke on a file to ensure it returns an error let file_path = dir_path.join("testfile"); let f = std::fs::File::create(&file_path).unwrap(); drop(f); assert!(file_path.is_empty_dir().is_err()); // do it again on a path, we know to be nonexistent std::fs::remove_file(&file_path).unwrap(); assert!(file_path.is_empty_dir().is_err()); } #[tokio::test] async fn is_empty_dir_async() { let dir = camino_tempfile::tempdir().unwrap(); let dir_path = dir.path(); // test positive case assert!( is_directory_empty(dir_path).await.expect("test failure"), "new tempdir should be empty" ); // invoke on a file to ensure it returns an error let file_path = dir_path.join("testfile"); let f = std::fs::File::create(&file_path).unwrap(); drop(f); assert!(is_directory_empty(&file_path).await.is_err()); // do it again on a path, we know to be nonexistent std::fs::remove_file(&file_path).unwrap(); assert!(is_directory_empty(file_path).await.is_err()); } #[test] fn ignore_absent_files_works() { let dir = camino_tempfile::tempdir().unwrap(); let file_path = dir.path().join("testfile"); ignore_absent_files(|| std::fs::remove_file(&file_path)).expect("should execute normally"); let f = std::fs::File::create(&file_path).unwrap(); drop(f); ignore_absent_files(|| std::fs::remove_file(&file_path)).expect("should execute normally"); assert!(!file_path.exists()); } #[tokio::test] async fn list_dir_works() { let dir = camino_tempfile::tempdir().unwrap(); let dir_path = dir.path(); assert!(list_dir(dir_path).await.unwrap().is_empty()); let file_path = dir_path.join("testfile"); let _ = std::fs::File::create(&file_path).unwrap(); assert_eq!(&list_dir(dir_path).await.unwrap(), &["testfile"]); let another_dir_path = dir_path.join("testdir"); std::fs::create_dir(another_dir_path).unwrap(); let expected = &["testdir", "testfile"]; let mut actual = list_dir(dir_path).await.unwrap(); actual.sort(); assert_eq!(actual, expected); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/linux_socket_ioctl.rs
libs/utils/src/linux_socket_ioctl.rs
//! Linux-specific socket ioctls. //! //! <https://elixir.bootlin.com/linux/v6.1.128/source/include/uapi/linux/sockios.h#L25-L27> use std::io; use std::mem::MaybeUninit; use std::os::fd::RawFd; use std::os::raw::c_int; use nix::libc::{FIONREAD, TIOCOUTQ}; unsafe fn do_ioctl(socket_fd: RawFd, cmd: nix::libc::Ioctl) -> io::Result<c_int> { let mut inq: MaybeUninit<c_int> = MaybeUninit::uninit(); // SAFETY: encapsulating fn is unsafe, we require `socket_fd` to be a valid file descriptor unsafe { let err = nix::libc::ioctl(socket_fd, cmd, inq.as_mut_ptr()); if err == 0 { Ok(inq.assume_init()) } else { Err(io::Error::last_os_error()) } } } /// # Safety /// /// Caller must ensure that `socket_fd` is a valid TCP socket file descriptor. pub unsafe fn inq(socket_fd: RawFd) -> io::Result<c_int> { // SAFETY: encapsulating fn is unsafe unsafe { do_ioctl(socket_fd, FIONREAD) } } /// # Safety /// /// Caller must ensure that `socket_fd` is a valid TCP socket file descriptor. pub unsafe fn outq(socket_fd: RawFd) -> io::Result<c_int> { // SAFETY: encapsulating fn is unsafe unsafe { do_ioctl(socket_fd, TIOCOUTQ) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/postgres_client.rs
libs/utils/src/postgres_client.rs
//! Postgres client connection code common to other crates (safekeeper and //! pageserver) which depends on tenant/timeline ids and thus not fitting into //! postgres_connection crate. use anyhow::Context; use postgres_connection::{PgConnectionConfig, parse_host_port}; use crate::id::TenantTimelineId; #[derive(Copy, Clone, PartialEq, Eq, Debug, serde::Serialize, serde::Deserialize)] #[serde(rename_all = "kebab-case")] pub enum InterpretedFormat { Bincode, Protobuf, } #[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[serde(rename_all = "kebab-case")] pub enum Compression { Zstd { level: i8 }, } #[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[serde(tag = "type", content = "args")] #[serde(rename_all = "kebab-case")] pub enum PostgresClientProtocol { /// Usual Postgres replication protocol Vanilla, /// Custom shard-aware protocol that replicates interpreted records. /// Used to send wal from safekeeper to pageserver. Interpreted { format: InterpretedFormat, compression: Option<Compression>, }, } pub struct ConnectionConfigArgs<'a> { pub protocol: PostgresClientProtocol, pub ttid: TenantTimelineId, pub shard_number: Option<u8>, pub shard_count: Option<u8>, pub shard_stripe_size: Option<u32>, pub listen_pg_addr_str: &'a str, pub auth_token: Option<&'a str>, pub availability_zone: Option<&'a str>, } impl<'a> ConnectionConfigArgs<'a> { fn options(&'a self) -> Vec<String> { let mut options = vec![ "-c".to_owned(), format!("timeline_id={}", self.ttid.timeline_id), format!("tenant_id={}", self.ttid.tenant_id), format!( "protocol={}", serde_json::to_string(&self.protocol).unwrap() ), ]; if self.shard_number.is_some() { assert!(self.shard_count.is_some()); assert!(self.shard_stripe_size.is_some()); options.push(format!("shard_count={}", self.shard_count.unwrap())); options.push(format!("shard_number={}", self.shard_number.unwrap())); options.push(format!( "shard_stripe_size={}", self.shard_stripe_size.unwrap() )); } options } } /// Create client config for fetching WAL from safekeeper on particular timeline. /// listen_pg_addr_str is in form host:\[port\]. pub fn wal_stream_connection_config( args: ConnectionConfigArgs, ) -> anyhow::Result<PgConnectionConfig> { let (host, port) = parse_host_port(args.listen_pg_addr_str).context("Unable to parse listen_pg_addr_str")?; let port = port.unwrap_or(5432); let mut connstr = PgConnectionConfig::new_host_port(host, port) .extend_options(args.options()) .set_password(args.auth_token.map(|s| s.to_owned())); if let Some(availability_zone) = args.availability_zone { connstr = connstr.extend_options([format!("availability_zone={availability_zone}")]); } Ok(connstr) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/signals.rs
libs/utils/src/signals.rs
pub use signal_hook::consts::TERM_SIGNALS; pub use signal_hook::consts::signal::*; use signal_hook::iterator::Signals; use tokio::signal::unix::{SignalKind, signal}; use tracing::info; pub enum Signal { Quit, Interrupt, Terminate, } impl Signal { pub fn name(&self) -> &'static str { match self { Signal::Quit => "SIGQUIT", Signal::Interrupt => "SIGINT", Signal::Terminate => "SIGTERM", } } } pub struct ShutdownSignals; impl ShutdownSignals { pub fn handle(mut handler: impl FnMut(Signal) -> anyhow::Result<()>) -> anyhow::Result<()> { for raw_signal in Signals::new(TERM_SIGNALS)?.into_iter() { let signal = match raw_signal { SIGINT => Signal::Interrupt, SIGTERM => Signal::Terminate, SIGQUIT => Signal::Quit, other => panic!("unknown signal: {other}"), }; handler(signal)?; } Ok(()) } } /// Runs in a loop since we want to be responsive to multiple signals /// even after triggering shutdown (e.g. a SIGQUIT after a slow SIGTERM shutdown) /// <https://github.com/neondatabase/neon/issues/9740> pub async fn signal_handler(token: tokio_util::sync::CancellationToken) { let mut sigint = signal(SignalKind::interrupt()).unwrap(); let mut sigterm = signal(SignalKind::terminate()).unwrap(); let mut sigquit = signal(SignalKind::quit()).unwrap(); loop { let signal = tokio::select! { _ = sigquit.recv() => { info!("Got signal SIGQUIT. Terminating in immediate shutdown mode."); std::process::exit(111); } _ = sigint.recv() => "SIGINT", _ = sigterm.recv() => "SIGTERM", }; if !token.is_cancelled() { info!("Got signal {signal}. Terminating gracefully in fast shutdown mode."); token.cancel(); } else { info!("Got signal {signal}. Already shutting down."); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/leaky_bucket.rs
libs/utils/src/leaky_bucket.rs
//! This module implements the Generic Cell Rate Algorithm for a simplified //! version of the Leaky Bucket rate limiting system. //! //! # Leaky Bucket //! //! If the bucket is full, no new requests are allowed and are throttled/errored. //! If the bucket is partially full/empty, new requests are added to the bucket in //! terms of "tokens". //! //! Over time, tokens are removed from the bucket, naturally allowing new requests at a steady rate. //! //! The bucket size tunes the burst support. The drain rate tunes the steady-rate requests per second. //! //! # [GCRA](https://en.wikipedia.org/wiki/Generic_cell_rate_algorithm) //! //! GCRA is a continuous rate leaky-bucket impl that stores minimal state and requires //! no background jobs to drain tokens, as the design utilises timestamps to drain automatically over time. //! //! We store an "empty_at" timestamp as the only state. As time progresses, we will naturally approach //! the empty state. The full-bucket state is calculated from `empty_at - config.bucket_width`. //! //! Another explaination can be found here: <https://brandur.org/rate-limiting> use std::sync::Mutex; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::Duration; use tokio::sync::Notify; use tokio::time::Instant; #[derive(Clone, Copy)] pub struct LeakyBucketConfig { /// This is the "time cost" of a single request unit. /// Should loosely represent how long it takes to handle a request unit in active resource time. /// Loosely speaking this is the inverse of the steady-rate requests-per-second pub cost: Duration, /// total size of the bucket pub bucket_width: Duration, } impl LeakyBucketConfig { pub fn new(rps: f64, bucket_size: f64) -> Self { let cost = Duration::from_secs_f64(rps.recip()); let bucket_width = cost.mul_f64(bucket_size); Self { cost, bucket_width } } } pub struct LeakyBucketState { /// Bucket is represented by `allow_at..empty_at` where `allow_at = empty_at - config.bucket_width`. /// /// At any given time, `empty_at - now` represents the number of tokens in the bucket, multiplied by the "time_cost". /// Adding `n` tokens to the bucket is done by moving `empty_at` forward by `n * config.time_cost`. /// If `now < allow_at`, the bucket is considered filled and cannot accept any more tokens. /// Draining the bucket will happen naturally as `now` moves forward. /// /// Let `n` be some "time cost" for the request, /// If now is after empty_at, the bucket is empty and the empty_at is reset to now, /// If now is within the `bucket window + n`, we are within time budget. /// If now is before the `bucket window + n`, we have run out of budget. /// /// This is inspired by the generic cell rate algorithm (GCRA) and works /// exactly the same as a leaky-bucket. pub empty_at: Instant, } impl LeakyBucketState { pub fn with_initial_tokens(config: &LeakyBucketConfig, initial_tokens: f64) -> Self { LeakyBucketState { empty_at: Instant::now() + config.cost.mul_f64(initial_tokens), } } pub fn bucket_is_empty(&self, now: Instant) -> bool { // if self.end is after now, the bucket is not empty self.empty_at <= now } /// Immediately adds tokens to the bucket, if there is space. /// /// In a scenario where you are waiting for available rate, /// rather than just erroring immediately, `started` corresponds to when this waiting started. /// /// `n` is the number of tokens that will be filled in the bucket. /// /// # Errors /// /// If there is not enough space, no tokens are added. Instead, an error is returned with the time when /// there will be space again. pub fn add_tokens( &mut self, config: &LeakyBucketConfig, started: Instant, n: f64, ) -> Result<(), Instant> { let now = Instant::now(); // invariant: started <= now debug_assert!(started <= now); // If the bucket was empty when we started our search, // we should update the `empty_at` value accordingly. // this prevents us from having negative tokens in the bucket. let mut empty_at = self.empty_at; if empty_at < started { empty_at = started; } let n = config.cost.mul_f64(n); let new_empty_at = empty_at + n; let allow_at = new_empty_at.checked_sub(config.bucket_width); // empty_at // allow_at | new_empty_at // / | / // -------o-[---------o-|--]--------- // now1 ^ now2 ^ // // at now1, the bucket would be completely filled if we add n tokens. // at now2, the bucket would be partially filled if we add n tokens. match allow_at { Some(allow_at) if now < allow_at => Err(allow_at), _ => { self.empty_at = new_empty_at; Ok(()) } } } } pub struct RateLimiter { pub config: LeakyBucketConfig, pub sleep_counter: AtomicU64, pub state: Mutex<LeakyBucketState>, /// a queue to provide this fair ordering. pub queue: Notify, } struct Requeue<'a>(&'a Notify); impl Drop for Requeue<'_> { fn drop(&mut self) { self.0.notify_one(); } } impl RateLimiter { pub fn with_initial_tokens(config: LeakyBucketConfig, initial_tokens: f64) -> Self { RateLimiter { sleep_counter: AtomicU64::new(0), state: Mutex::new(LeakyBucketState::with_initial_tokens( &config, initial_tokens, )), config, queue: { let queue = Notify::new(); queue.notify_one(); queue }, } } pub fn steady_rps(&self) -> f64 { self.config.cost.as_secs_f64().recip() } /// returns true if we did throttle pub async fn acquire(&self, count: usize) -> bool { let start = tokio::time::Instant::now(); let start_count = self.sleep_counter.load(Ordering::Acquire); let mut end_count = start_count; // wait until we are the first in the queue let mut notified = std::pin::pin!(self.queue.notified()); if !notified.as_mut().enable() { notified.await; end_count = self.sleep_counter.load(Ordering::Acquire); } // notify the next waiter in the queue when we are done. let _guard = Requeue(&self.queue); loop { let res = self .state .lock() .unwrap() .add_tokens(&self.config, start, count as f64); match res { Ok(()) => return end_count > start_count, Err(ready_at) => { struct Increment<'a>(&'a AtomicU64); impl Drop for Increment<'_> { fn drop(&mut self) { self.0.fetch_add(1, Ordering::AcqRel); } } // increment the counter after we finish sleeping (or cancel this task). // this ensures that tasks that have already started the acquire will observe // the new sleep count when they are allowed to resume on the notify. let _inc = Increment(&self.sleep_counter); end_count += 1; tokio::time::sleep_until(ready_at).await; } } } } } #[cfg(test)] mod tests { use std::time::Duration; use tokio::time::Instant; use super::{LeakyBucketConfig, LeakyBucketState}; #[tokio::test(start_paused = true)] async fn check() { let config = LeakyBucketConfig { // average 100rps cost: Duration::from_millis(10), // burst up to 100 requests bucket_width: Duration::from_millis(1000), }; let mut state = LeakyBucketState { empty_at: Instant::now(), }; // supports burst { // should work for 100 requests this instant for _ in 0..100 { state.add_tokens(&config, Instant::now(), 1.0).unwrap(); } let ready = state.add_tokens(&config, Instant::now(), 1.0).unwrap_err(); assert_eq!(ready - Instant::now(), Duration::from_millis(10)); } // doesn't overfill { // after 1s we should have an empty bucket again. tokio::time::advance(Duration::from_secs(1)).await; assert!(state.bucket_is_empty(Instant::now())); // after 1s more, we should not over count the tokens and allow more than 200 requests. tokio::time::advance(Duration::from_secs(1)).await; for _ in 0..100 { state.add_tokens(&config, Instant::now(), 1.0).unwrap(); } let ready = state.add_tokens(&config, Instant::now(), 1.0).unwrap_err(); assert_eq!(ready - Instant::now(), Duration::from_millis(10)); } // supports sustained rate over a long period { tokio::time::advance(Duration::from_secs(1)).await; // should sustain 100rps for _ in 0..2000 { tokio::time::advance(Duration::from_millis(10)).await; state.add_tokens(&config, Instant::now(), 1.0).unwrap(); } } // supports requesting more tokens than can be stored in the bucket // we just wait a little bit longer upfront. { // start the bucket completely empty tokio::time::advance(Duration::from_secs(5)).await; assert!(state.bucket_is_empty(Instant::now())); // requesting 200 tokens of space should take 200*cost = 2s // but we already have 1s available, so we wait 1s from start. let start = Instant::now(); let ready = state.add_tokens(&config, start, 200.0).unwrap_err(); assert_eq!(ready - Instant::now(), Duration::from_secs(1)); tokio::time::advance(Duration::from_millis(500)).await; let ready = state.add_tokens(&config, start, 200.0).unwrap_err(); assert_eq!(ready - Instant::now(), Duration::from_millis(500)); tokio::time::advance(Duration::from_millis(500)).await; state.add_tokens(&config, start, 200.0).unwrap(); // bucket should be completely full now let ready = state.add_tokens(&config, Instant::now(), 1.0).unwrap_err(); assert_eq!(ready - Instant::now(), Duration::from_millis(10)); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/metrics_collector.rs
libs/utils/src/metrics_collector.rs
use std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, }; use metrics::{IntGauge, proto::MetricFamily, register_int_gauge}; use once_cell::sync::Lazy; pub static METRICS_STALE_MILLIS: Lazy<IntGauge> = Lazy::new(|| { register_int_gauge!( "metrics_metrics_stale_milliseconds", "The current metrics stale time in milliseconds" ) .expect("failed to define a metric") }); #[derive(Debug)] pub struct CollectedMetrics { pub metrics: Vec<MetricFamily>, pub collected_at: Instant, } impl CollectedMetrics { fn new(metrics: Vec<MetricFamily>) -> Self { Self { metrics, collected_at: Instant::now(), } } } #[derive(Debug)] pub struct MetricsCollector { last_collected: RwLock<Arc<CollectedMetrics>>, } impl MetricsCollector { pub fn new() -> Self { Self { last_collected: RwLock::new(Arc::new(CollectedMetrics::new(vec![]))), } } #[tracing::instrument(name = "metrics_collector", skip_all)] pub fn run_once(&self, cache_metrics: bool) -> Arc<CollectedMetrics> { let started = Instant::now(); let metrics = metrics::gather(); let collected = Arc::new(CollectedMetrics::new(metrics)); if cache_metrics { let mut guard = self.last_collected.write().unwrap(); *guard = collected.clone(); } tracing::info!( "Collected {} metric families in {} ms", collected.metrics.len(), started.elapsed().as_millis() ); collected } pub fn last_collected(&self) -> Arc<CollectedMetrics> { self.last_collected.read().unwrap().clone() } } impl Default for MetricsCollector { fn default() -> Self { Self::new() } } // Interval for metrics collection. Currently hard-coded to be the same as the metrics scape interval from the obs agent pub static METRICS_COLLECTION_INTERVAL: Duration = Duration::from_secs(30); pub static METRICS_COLLECTOR: Lazy<MetricsCollector> = Lazy::new(MetricsCollector::default);
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/toml_edit_ext.rs
libs/utils/src/toml_edit_ext.rs
#[derive(Debug, thiserror::Error)] pub enum Error { #[error("item is not a document")] ItemIsNotADocument, #[error(transparent)] Serde(toml_edit::de::Error), } pub fn deserialize_item<T>(item: &toml_edit::Item) -> Result<T, Error> where T: serde::de::DeserializeOwned, { let document: toml_edit::DocumentMut = match item { toml_edit::Item::Table(toml) => toml.clone().into(), toml_edit::Item::Value(toml_edit::Value::InlineTable(toml)) => { toml.clone().into_table().into() } _ => return Err(Error::ItemIsNotADocument), }; toml_edit::de::from_document(document).map_err(Error::Serde) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/circuit_breaker.rs
libs/utils/src/circuit_breaker.rs
use std::fmt::Display; use std::time::{Duration, Instant}; use metrics::IntCounter; /// Circuit breakers are for operations that are expensive and fallible. /// /// If a circuit breaker fails repeatedly, we will stop attempting it for some /// period of time, to avoid denial-of-service from retries, and /// to mitigate the log spam from repeated failures. pub struct CircuitBreaker { /// An identifier that enables us to log useful errors when a circuit is broken name: String, /// Consecutive failures since last success fail_count: usize, /// How many consecutive failures before we break the circuit fail_threshold: usize, /// If circuit is broken, when was it broken? broken_at: Option<Instant>, /// If set, we will auto-reset the circuit this long after it was broken. If None, broken /// circuits stay broken forever, or until success() is called. reset_period: Option<Duration>, /// If this is true, no actual circuit-breaking happens. This is for overriding a circuit breaker /// to permit something to keep running even if it would otherwise have tripped it. short_circuit: bool, } impl CircuitBreaker { pub fn new(name: String, fail_threshold: usize, reset_period: Option<Duration>) -> Self { Self { name, fail_count: 0, fail_threshold, broken_at: None, reset_period, short_circuit: false, } } /// Construct an unbreakable circuit breaker, for use in unit tests etc. pub fn short_circuit() -> Self { Self { name: String::new(), fail_threshold: 0, fail_count: 0, broken_at: None, reset_period: None, short_circuit: true, } } pub fn fail<E>(&mut self, metric: &IntCounter, error: E) where E: Display, { if self.short_circuit { return; } self.fail_count += 1; if self.broken_at.is_none() && self.fail_count >= self.fail_threshold { self.break_circuit(metric, error); } } /// Call this after successfully executing an operation pub fn success(&mut self, metric: &IntCounter) { self.fail_count = 0; if let Some(broken_at) = &self.broken_at { tracing::info!(breaker=%self.name, "Circuit breaker failure ended (was broken for {})", humantime::format_duration(broken_at.elapsed())); self.broken_at = None; metric.inc(); } } /// Call this before attempting an operation, and skip the operation if we are currently broken. pub fn is_broken(&mut self) -> bool { if self.short_circuit { return false; } if let Some(broken_at) = self.broken_at { match self.reset_period { Some(reset_period) if broken_at.elapsed() > reset_period => { self.reset_circuit(); false } _ => true, } } else { false } } fn break_circuit<E>(&mut self, metric: &IntCounter, error: E) where E: Display, { self.broken_at = Some(Instant::now()); tracing::error!(breaker=%self.name, "Circuit breaker broken! Last error: {error}"); metric.inc(); } fn reset_circuit(&mut self) { self.broken_at = None; self.fail_count = 0; } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/fs_ext/rename_noreplace.rs
libs/utils/src/fs_ext/rename_noreplace.rs
use nix::NixPath; /// Rename a file without replacing an existing file. /// /// This is a wrapper around platform-specific APIs. pub fn rename_noreplace<P1: ?Sized + NixPath, P2: ?Sized + NixPath>( src: &P1, dst: &P2, ) -> nix::Result<()> { { #[cfg(all(target_os = "linux", target_env = "gnu"))] { nix::fcntl::renameat2( nix::fcntl::AT_FDCWD, src, nix::fcntl::AT_FDCWD, dst, nix::fcntl::RenameFlags::RENAME_NOREPLACE, ) } #[cfg(target_os = "macos")] { let res = src.with_nix_path(|src| { dst.with_nix_path(|dst| // SAFETY: `src` and `dst` are valid C strings as per the NixPath trait and they outlive the call to renamex_np. unsafe { nix::libc::renamex_np(src.as_ptr(), dst.as_ptr(), nix::libc::RENAME_EXCL) }) })??; nix::errno::Errno::result(res).map(drop) } #[cfg(not(any(all(target_os = "linux", target_env = "gnu"), target_os = "macos")))] { std::compile_error!("OS does not support no-replace renames"); } } } #[cfg(test)] mod test { use std::fs; use std::path::PathBuf; use super::*; fn testdir() -> camino_tempfile::Utf8TempDir { match crate::env::var("NEON_UTILS_RENAME_NOREPLACE_TESTDIR") { Some(path) => { let path: camino::Utf8PathBuf = path; camino_tempfile::tempdir_in(path).unwrap() } None => camino_tempfile::tempdir().unwrap(), } } #[test] fn test_absolute_paths() { let testdir = testdir(); println!("testdir: {}", testdir.path()); let src = testdir.path().join("src"); let dst = testdir.path().join("dst"); fs::write(&src, b"").unwrap(); fs::write(&dst, b"").unwrap(); let src = src.canonicalize().unwrap(); assert!(src.is_absolute()); let dst = dst.canonicalize().unwrap(); assert!(dst.is_absolute()); let result = rename_noreplace(&src, &dst); assert_eq!(result.unwrap_err(), nix::Error::EEXIST); } #[test] fn test_relative_paths() { let testdir = testdir(); println!("testdir: {}", testdir.path()); // this is fine because we run in nextest => process per test std::env::set_current_dir(testdir.path()).unwrap(); let src = PathBuf::from("src"); let dst = PathBuf::from("dst"); fs::write(&src, b"").unwrap(); fs::write(&dst, b"").unwrap(); let result = rename_noreplace(&src, &dst); assert_eq!(result.unwrap_err(), nix::Error::EEXIST); } #[test] fn test_works_when_not_exists() { let testdir = testdir(); println!("testdir: {}", testdir.path()); let src = testdir.path().join("src"); let dst = testdir.path().join("dst"); fs::write(&src, b"content").unwrap(); rename_noreplace(src.as_std_path(), dst.as_std_path()).unwrap(); assert_eq!( "content", String::from_utf8(std::fs::read(&dst).unwrap()).unwrap() ); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/sync/heavier_once_cell.rs
libs/utils/src/sync/heavier_once_cell.rs
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; use tokio::sync::Semaphore; /// Custom design like [`tokio::sync::OnceCell`] but using [`OwnedSemaphorePermit`] instead of /// `SemaphorePermit`. /// /// Allows use of `take` which does not require holding an outer mutex guard /// for the duration of initialization. /// /// Has no unsafe, builds upon [`tokio::sync::Semaphore`] and [`std::sync::Mutex`]. /// /// [`OwnedSemaphorePermit`]: tokio::sync::OwnedSemaphorePermit pub struct OnceCell<T> { inner: Mutex<Inner<T>>, initializers: AtomicUsize, } impl<T> Default for OnceCell<T> { /// Create new uninitialized [`OnceCell`]. fn default() -> Self { Self { inner: Default::default(), initializers: AtomicUsize::new(0), } } } /// Semaphore is the current state: /// - open semaphore means the value is `None`, not yet initialized /// - closed semaphore means the value has been initialized #[derive(Debug)] struct Inner<T> { init_semaphore: Arc<Semaphore>, value: Option<T>, } impl<T> Default for Inner<T> { fn default() -> Self { Self { init_semaphore: Arc::new(Semaphore::new(1)), value: None, } } } impl<T> OnceCell<T> { /// Creates an already initialized `OnceCell` with the given value. pub fn new(value: T) -> Self { let sem = Semaphore::new(1); sem.close(); Self { inner: Mutex::new(Inner { init_semaphore: Arc::new(sem), value: Some(value), }), initializers: AtomicUsize::new(0), } } /// Returns a guard to an existing initialized value, or uniquely initializes the value before /// returning the guard. /// /// Initializing might wait on any existing [`Guard::take_and_deinit`] deinitialization. /// /// Initialization is panic-safe and cancellation-safe. pub async fn get_or_init<F, Fut, E>(&self, factory: F) -> Result<Guard<'_, T>, E> where F: FnOnce(InitPermit) -> Fut, Fut: std::future::Future<Output = Result<(T, InitPermit), E>>, { loop { let sem = { let guard = self.inner.lock().unwrap(); if guard.value.is_some() { return Ok(Guard(guard)); } guard.init_semaphore.clone() }; { let permit = { // increment the count for the duration of queued let _guard = CountWaitingInitializers::start(self); sem.acquire().await }; let Ok(permit) = permit else { let guard = self.inner.lock().unwrap(); if !Arc::ptr_eq(&sem, &guard.init_semaphore) { // there was a take_and_deinit in between continue; } assert!( guard.value.is_some(), "semaphore got closed, must be initialized" ); return Ok(Guard(guard)); }; permit.forget(); } let permit = InitPermit(sem); let (value, _permit) = factory(permit).await?; let guard = self.inner.lock().unwrap(); return Ok(Self::set0(value, guard)); } } /// Like [`Self::get_or_init_detached_measured`], but without out parameter for time spent waiting. pub async fn get_or_init_detached(&self) -> Result<Guard<'_, T>, InitPermit> { self.get_or_init_detached_measured(None).await } /// Returns a guard to an existing initialized value, or returns an unique initialization /// permit which can be used to initialize this `OnceCell` using `OnceCell::set`. pub async fn get_or_init_detached_measured( &self, mut wait_time: Option<&mut crate::elapsed_accum::ElapsedAccum>, ) -> Result<Guard<'_, T>, InitPermit> { // It looks like OnceCell::get_or_init could be implemented using this method instead of // duplication. However, that makes the future be !Send due to possibly holding on to the // MutexGuard over an await point. loop { let sem = { let guard = self.inner.lock().unwrap(); if guard.value.is_some() { return Ok(Guard(guard)); } guard.init_semaphore.clone() }; { let permit = { // increment the count for the duration of queued let _guard = CountWaitingInitializers::start(self); let fut = sem.acquire(); if let Some(wait_time) = wait_time.as_mut() { wait_time.measure(fut).await } else { fut.await } }; let Ok(permit) = permit else { let guard = self.inner.lock().unwrap(); if !Arc::ptr_eq(&sem, &guard.init_semaphore) { // there was a take_and_deinit in between continue; } assert!( guard.value.is_some(), "semaphore got closed, must be initialized" ); return Ok(Guard(guard)); }; permit.forget(); } let permit = InitPermit(sem); return Err(permit); } } /// Assuming a permit is held after previous call to [`Guard::take_and_deinit`], it can be used /// to complete initializing the inner value. /// /// # Panics /// /// If the inner has already been initialized. pub fn set(&self, value: T, _permit: InitPermit) -> Guard<'_, T> { let guard = self.inner.lock().unwrap(); // cannot assert that this permit is for self.inner.semaphore, but we can assert it cannot // give more permits right now. if guard.init_semaphore.try_acquire().is_ok() { drop(guard); panic!("permit is of wrong origin"); } Self::set0(value, guard) } fn set0(value: T, mut guard: std::sync::MutexGuard<'_, Inner<T>>) -> Guard<'_, T> { if guard.value.is_some() { drop(guard); unreachable!("we won permit, must not be initialized"); } guard.value = Some(value); guard.init_semaphore.close(); Guard(guard) } /// Returns a guard to an existing initialized value, if any. pub fn get(&self) -> Option<Guard<'_, T>> { let guard = self.inner.lock().unwrap(); if guard.value.is_some() { Some(Guard(guard)) } else { None } } /// Like [`Guard::take_and_deinit`], but will return `None` if this OnceCell was never /// initialized. pub fn take_and_deinit(&mut self) -> Option<(T, InitPermit)> { let inner = self.inner.get_mut().unwrap(); inner.take_and_deinit() } /// Return the number of [`Self::get_or_init`] calls waiting for initialization to complete. pub fn initializer_count(&self) -> usize { self.initializers.load(Ordering::Relaxed) } } /// DropGuard counter for queued tasks waiting to initialize, mainly accessible for the /// initializing task for example at the end of initialization. struct CountWaitingInitializers<'a, T>(&'a OnceCell<T>); impl<'a, T> CountWaitingInitializers<'a, T> { fn start(target: &'a OnceCell<T>) -> Self { target.initializers.fetch_add(1, Ordering::Relaxed); CountWaitingInitializers(target) } } impl<T> Drop for CountWaitingInitializers<'_, T> { fn drop(&mut self) { self.0.initializers.fetch_sub(1, Ordering::Relaxed); } } /// Uninteresting guard object to allow short-lived access to inspect or clone the held, /// initialized value. #[derive(Debug)] pub struct Guard<'a, T>(MutexGuard<'a, Inner<T>>); impl<T> std::ops::Deref for Guard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { self.0 .value .as_ref() .expect("guard is not created unless value has been initialized") } } impl<T> std::ops::DerefMut for Guard<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.0 .value .as_mut() .expect("guard is not created unless value has been initialized") } } impl<T> Guard<'_, T> { /// Take the current value, and a new permit for it's deinitialization. /// /// The permit will be on a semaphore part of the new internal value, and any following /// [`OnceCell::get_or_init`] will wait on it to complete. pub fn take_and_deinit(mut self) -> (T, InitPermit) { self.0 .take_and_deinit() .expect("guard is not created unless value has been initialized") } } impl<T> Inner<T> { pub fn take_and_deinit(&mut self) -> Option<(T, InitPermit)> { let value = self.value.take()?; let mut swapped = Inner::default(); let sem = swapped.init_semaphore.clone(); // acquire and forget right away, moving the control over to InitPermit sem.try_acquire().expect("we just created this").forget(); let permit = InitPermit(sem); std::mem::swap(self, &mut swapped); Some((value, permit)) } } /// Type held by OnceCell (de)initializing task. /// /// On drop, this type will return the permit. pub struct InitPermit(Arc<tokio::sync::Semaphore>); impl std::fmt::Debug for InitPermit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let ptr = Arc::as_ptr(&self.0) as *const (); f.debug_tuple("InitPermit").field(&ptr).finish() } } impl Drop for InitPermit { fn drop(&mut self) { assert_eq!( self.0.available_permits(), 0, "InitPermit should only exist as the unique permit" ); self.0.add_permits(1); } } #[cfg(test)] mod tests { use std::convert::Infallible; use std::pin::{Pin, pin}; use std::time::Duration; use futures::Future; use super::*; #[tokio::test] async fn many_initializers() { #[derive(Default, Debug)] struct Counters { factory_got_to_run: AtomicUsize, future_polled: AtomicUsize, winners: AtomicUsize, } let initializers = 100; let cell = Arc::new(OnceCell::default()); let counters = Arc::new(Counters::default()); let barrier = Arc::new(tokio::sync::Barrier::new(initializers + 1)); let mut js = tokio::task::JoinSet::new(); for i in 0..initializers { js.spawn({ let cell = cell.clone(); let counters = counters.clone(); let barrier = barrier.clone(); async move { barrier.wait().await; let won = { let g = cell .get_or_init(|permit| { counters.factory_got_to_run.fetch_add(1, Ordering::Relaxed); async { counters.future_polled.fetch_add(1, Ordering::Relaxed); Ok::<_, Infallible>((i, permit)) } }) .await .unwrap(); *g == i }; if won { counters.winners.fetch_add(1, Ordering::Relaxed); } } }); } barrier.wait().await; while let Some(next) = js.join_next().await { next.expect("no panics expected"); } let mut counters = Arc::try_unwrap(counters).unwrap(); assert_eq!(*counters.factory_got_to_run.get_mut(), 1); assert_eq!(*counters.future_polled.get_mut(), 1); assert_eq!(*counters.winners.get_mut(), 1); } #[tokio::test(start_paused = true)] async fn reinit_waits_for_deinit() { // with the tokio::time paused, we will "sleep" for 1s while holding the reinitialization let sleep_for = Duration::from_secs(1); let initial = 42; let reinit = 1; let cell = Arc::new(OnceCell::new(initial)); let deinitialization_started = Arc::new(tokio::sync::Barrier::new(2)); let jh = tokio::spawn({ let cell = cell.clone(); let deinitialization_started = deinitialization_started.clone(); async move { let (answer, _permit) = cell.get().expect("initialized to value").take_and_deinit(); assert_eq!(answer, initial); deinitialization_started.wait().await; tokio::time::sleep(sleep_for).await; } }); deinitialization_started.wait().await; let started_at = tokio::time::Instant::now(); cell.get_or_init(|permit| async { Ok::<_, Infallible>((reinit, permit)) }) .await .unwrap(); let elapsed = started_at.elapsed(); assert!( elapsed >= sleep_for, "initialization should had taken at least the time time slept with permit" ); jh.await.unwrap(); assert_eq!(*cell.get().unwrap(), reinit); } #[test] fn reinit_with_deinit_permit() { let cell = Arc::new(OnceCell::new(42)); let (mol, permit) = cell.get().unwrap().take_and_deinit(); cell.set(5, permit); assert_eq!(*cell.get().unwrap(), 5); let (five, permit) = cell.get().unwrap().take_and_deinit(); assert_eq!(5, five); cell.set(mol, permit); assert_eq!(*cell.get().unwrap(), 42); } #[tokio::test] async fn initialization_attemptable_until_ok() { let cell = OnceCell::default(); for _ in 0..10 { cell.get_or_init(|_permit| async { Err("whatever error") }) .await .unwrap_err(); } let g = cell .get_or_init(|permit| async { Ok::<_, Infallible>(("finally success", permit)) }) .await .unwrap(); assert_eq!(*g, "finally success"); } #[tokio::test] async fn initialization_is_cancellation_safe() { let cell = OnceCell::default(); let barrier = tokio::sync::Barrier::new(2); let initializer = cell.get_or_init(|permit| async { barrier.wait().await; futures::future::pending::<()>().await; Ok::<_, Infallible>(("never reached", permit)) }); tokio::select! { _ = initializer => { unreachable!("cannot complete; stuck in pending().await") }, _ = barrier.wait() => {} }; // now initializer is dropped assert!(cell.get().is_none()); let g = cell .get_or_init(|permit| async { Ok::<_, Infallible>(("now initialized", permit)) }) .await .unwrap(); assert_eq!(*g, "now initialized"); } #[tokio::test(start_paused = true)] async fn reproduce_init_take_deinit_race() { init_take_deinit_scenario(|cell, factory| { Box::pin(async { cell.get_or_init(factory).await.unwrap(); }) }) .await; } type BoxedInitFuture<T, E> = Pin<Box<dyn Future<Output = Result<(T, InitPermit), E>>>>; type BoxedInitFunction<T, E> = Box<dyn Fn(InitPermit) -> BoxedInitFuture<T, E>>; /// Reproduce an assertion failure. /// /// This has interesting generics to be generic between `get_or_init` and `get_mut_or_init`. /// We currently only have one, but the structure is kept. async fn init_take_deinit_scenario<F>(init_way: F) where F: for<'a> Fn( &'a OnceCell<&'static str>, BoxedInitFunction<&'static str, Infallible>, ) -> Pin<Box<dyn Future<Output = ()> + 'a>>, { let cell = OnceCell::default(); // acquire the init_semaphore only permit to drive initializing tasks in order to waiting // on the same semaphore. let permit = cell .inner .lock() .unwrap() .init_semaphore .clone() .try_acquire_owned() .unwrap(); let mut t1 = pin!(init_way( &cell, Box::new(|permit| Box::pin(async move { Ok(("t1", permit)) })), )); let mut t2 = pin!(init_way( &cell, Box::new(|permit| Box::pin(async move { Ok(("t2", permit)) })), )); // drive t2 first to the init_semaphore -- the timeout will be hit once t2 future can // no longer make progress tokio::select! { _ = &mut t2 => unreachable!("it cannot get permit"), _ = tokio::time::sleep(Duration::from_secs(3600 * 24 * 7 * 365)) => {} } // followed by t1 in the init_semaphore tokio::select! { _ = &mut t1 => unreachable!("it cannot get permit"), _ = tokio::time::sleep(Duration::from_secs(3600 * 24 * 7 * 365)) => {} } // now let t2 proceed and initialize drop(permit); t2.await; let (s, permit) = { cell.get().unwrap().take_and_deinit() }; assert_eq!("t2", s); // now originally t1 would see the semaphore it has as closed. it cannot yet get a permit from // the new one. tokio::select! { _ = &mut t1 => unreachable!("it cannot get permit"), _ = tokio::time::sleep(Duration::from_secs(3600 * 24 * 7 * 365)) => {} } // only now we get to initialize it drop(permit); t1.await; assert_eq!("t1", *cell.get().unwrap()); } #[tokio::test(start_paused = true)] async fn detached_init_smoke() { let target = OnceCell::default(); let Err(permit) = target.get_or_init_detached().await else { unreachable!("it is not initialized") }; tokio::time::timeout( std::time::Duration::from_secs(3600 * 24 * 7 * 365), target.get_or_init(|permit2| async { Ok::<_, Infallible>((11, permit2)) }), ) .await .expect_err("should timeout since we are already holding the permit"); target.set(42, permit); let (_answer, permit) = { let guard = target .get_or_init(|permit| async { Ok::<_, Infallible>((11, permit)) }) .await .unwrap(); assert_eq!(*guard, 42); guard.take_and_deinit() }; assert!(target.get().is_none()); target.set(11, permit); assert_eq!(*target.get().unwrap(), 11); } #[tokio::test] async fn take_and_deinit_on_mut() { use std::convert::Infallible; let mut target = OnceCell::<u32>::default(); assert!(target.take_and_deinit().is_none()); target .get_or_init(|permit| async move { Ok::<_, Infallible>((42, permit)) }) .await .unwrap(); let again = target.take_and_deinit(); assert!(matches!(again, Some((42, _))), "{again:?}"); assert!(target.take_and_deinit().is_none()); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/sync/gate.rs
libs/utils/src/sync/gate.rs
use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; /// Gates are a concurrency helper, primarily used for implementing safe shutdown. /// /// Users of a resource call `enter()` to acquire a GateGuard, and the owner of /// the resource calls `close()` when they want to ensure that all holders of guards /// have released them, and that no future guards will be issued. pub struct Gate { inner: Arc<GateInner>, } impl std::fmt::Debug for Gate { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Gate") // use this for identification .field("ptr", &Arc::as_ptr(&self.inner)) .field("inner", &self.inner) .finish() } } struct GateInner { sem: tokio::sync::Semaphore, closing: std::sync::atomic::AtomicBool, } impl std::fmt::Debug for GateInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let avail = self.sem.available_permits(); let guards = u32::try_from(avail) .ok() // the sem only supports 32-bit ish amount, but lets play it safe .and_then(|x| Gate::MAX_UNITS.checked_sub(x)); let closing = self.closing.load(Ordering::Relaxed); if let Some(guards) = guards { f.debug_struct("Gate") .field("remaining_guards", &guards) .field("closing", &closing) .finish() } else { f.debug_struct("Gate") .field("avail_permits", &avail) .field("closing", &closing) .finish() } } } /// RAII guard for a [`Gate`]: as long as this exists, calls to [`Gate::close`] will /// not complete. #[derive(Debug)] pub struct GateGuard { // Record the span where the gate was entered, so that we can identify who was blocking Gate::close span_at_enter: tracing::Span, gate: Arc<GateInner>, } impl GateGuard { pub fn try_clone(&self) -> Result<Self, GateError> { Gate::enter_impl(self.gate.clone()) } } impl Drop for GateGuard { fn drop(&mut self) { if self.gate.closing.load(Ordering::Relaxed) { self.span_at_enter.in_scope( || tracing::info!(gate = ?Arc::as_ptr(&self.gate), "kept the gate from closing"), ); } // when the permit was acquired, it was forgotten to allow us to manage it's lifecycle // manually, so "return" the permit now. self.gate.sem.add_permits(1); } } #[derive(Debug, thiserror::Error)] pub enum GateError { #[error("gate is closed")] GateClosed, } impl GateError { pub fn is_cancel(&self) -> bool { match self { GateError::GateClosed => true, } } } impl Default for Gate { fn default() -> Self { Self { inner: Arc::new(GateInner { sem: tokio::sync::Semaphore::new(Self::MAX_UNITS as usize), closing: AtomicBool::new(false), }), } } } impl Gate { const MAX_UNITS: u32 = u32::MAX; /// Acquire a guard that will prevent close() calls from completing. If close() /// was already called, this will return an error which should be interpreted /// as "shutting down". /// /// This function would typically be used from e.g. request handlers. While holding /// the guard returned from this function, it is important to respect a CancellationToken /// to avoid blocking close() indefinitely: typically types that contain a Gate will /// also contain a CancellationToken. pub fn enter(&self) -> Result<GateGuard, GateError> { Self::enter_impl(self.inner.clone()) } fn enter_impl(gate: Arc<GateInner>) -> Result<GateGuard, GateError> { let permit = gate.sem.try_acquire().map_err(|_| GateError::GateClosed)?; // we now have the permit, let's disable the normal raii functionality and leave // "returning" the permit to our GateGuard::drop. // // this is done to avoid the need for multiple Arcs (one for semaphore, next for other // fields). permit.forget(); Ok(GateGuard { span_at_enter: tracing::Span::current(), gate, }) } /// Types with a shutdown() method and a gate should call this method at the /// end of shutdown, to ensure that all GateGuard holders are done. /// /// This will wait for all guards to be destroyed. For this to complete promptly, it is /// important that the holders of such guards are respecting a CancellationToken which has /// been cancelled before entering this function. pub async fn close(&self) { let started_at = std::time::Instant::now(); let mut do_close = std::pin::pin!(self.do_close()); // with 1s we rarely saw anything, let's try if we get more gate closing reasons with 100ms let nag_after = Duration::from_millis(100); let Err(_timeout) = tokio::time::timeout(nag_after, &mut do_close).await else { return; }; tracing::info!( gate = ?self.as_ptr(), elapsed_ms = started_at.elapsed().as_millis(), "closing is taking longer than expected" ); // close operation is not trying to be cancellation safe as pageserver does not need it. // // note: "closing" is not checked in Gate::enter -- it exists just for observability, // dropping of GateGuard after this will log who they were. self.inner.closing.store(true, Ordering::Relaxed); do_close.await; tracing::info!( gate = ?self.as_ptr(), elapsed_ms = started_at.elapsed().as_millis(), "close completed" ); } /// Used as an identity of a gate. This identity will be resolved to something useful when /// it's actually closed in a hopefully sensible `tracing::Span` which will describe it even /// more. /// /// `GateGuard::drop` also logs this pointer when it has realized it has been keeping the gate /// open for too long. fn as_ptr(&self) -> *const GateInner { Arc::as_ptr(&self.inner) } /// Check if [`Self::close()`] has finished waiting for all [`Self::enter()`] users to finish. This /// is usually analoguous for "Did shutdown finish?" for types that include a Gate, whereas checking /// the CancellationToken on such types is analogous to "Did shutdown start?" pub fn close_complete(&self) -> bool { self.inner.sem.is_closed() } #[tracing::instrument(level = tracing::Level::DEBUG, skip_all, fields(gate = ?self.as_ptr()))] async fn do_close(&self) { tracing::debug!("Closing Gate..."); match self.inner.sem.acquire_many(Self::MAX_UNITS).await { Ok(_permit) => { // While holding all units, close the semaphore. All subsequent calls to enter() will fail. self.inner.sem.close(); } Err(_closed) => { // Semaphore closed: we are the only function that can do this, so it indicates a double-call. // This is legal. Timeline::shutdown for example is not protected from being called more than // once. tracing::debug!("Double close") } } tracing::debug!("Closed Gate.") } } #[cfg(test)] mod tests { use super::*; #[tokio::test] async fn close_unused() { // Having taken no guards, we should not be blocked in close let gate = Gate::default(); gate.close().await; } #[tokio::test] async fn close_idle() { // If a guard is dropped before entering, close should not be blocked let gate = Gate::default(); let guard = gate.enter().unwrap(); drop(guard); gate.close().await; // Entering a closed guard fails gate.enter().expect_err("enter should fail after close"); } #[tokio::test(start_paused = true)] async fn close_busy_gate() { let gate = Gate::default(); let forever = Duration::from_secs(24 * 7 * 365); let guard = tracing::info_span!("i am holding back the gate").in_scope(|| gate.enter().unwrap()); let mut close_fut = std::pin::pin!(gate.close()); // Close should be waiting for guards to drop tokio::time::timeout(forever, &mut close_fut) .await .unwrap_err(); // Attempting to enter() should fail, even though close isn't done yet. gate.enter() .expect_err("enter should fail after entering close"); // this will now log, which we cannot verify except manually drop(guard); // Guard is gone, close should finish close_fut.await; // Attempting to enter() is still forbidden gate.enter().expect_err("enter should fail finishing close"); } #[tokio::test(start_paused = true)] async fn clone_gate_guard() { let gate = Gate::default(); let forever = Duration::from_secs(24 * 7 * 365); let guard1 = gate.enter().expect("gate isn't closed"); let guard2 = guard1.try_clone().expect("gate isn't clsoed"); let mut close_fut = std::pin::pin!(gate.close()); tokio::time::timeout(forever, &mut close_fut) .await .unwrap_err(); // we polled close_fut once, that should prevent all later enters and clones gate.enter().unwrap_err(); guard1.try_clone().unwrap_err(); guard2.try_clone().unwrap_err(); // guard2 keeps gate open even if guard1 is closed drop(guard1); tokio::time::timeout(forever, &mut close_fut) .await .unwrap_err(); drop(guard2); // now that the last guard is dropped, closing should complete close_fut.await; // entering is still forbidden gate.enter().expect_err("enter should stilll fail"); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/sync/duplex.rs
libs/utils/src/sync/duplex.rs
pub mod mpsc;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/sync/spsc_fold.rs
libs/utils/src/sync/spsc_fold.rs
use core::future::poll_fn; use core::task::Poll; use std::sync::{Arc, Mutex}; use diatomic_waker::DiatomicWaker; pub struct Sender<T> { state: Arc<Inner<T>>, } pub struct Receiver<T> { state: Arc<Inner<T>>, } struct Inner<T> { wake_receiver: DiatomicWaker, wake_sender: DiatomicWaker, value: Mutex<State<T>>, } enum State<T> { NoData, HasData(T), TryFoldFailed, // transient state SenderWaitsForReceiverToConsume(T), SenderGone(Option<T>), ReceiverGone, AllGone, SenderDropping, // transient state ReceiverDropping, // transient state } pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) { let inner = Inner { wake_receiver: DiatomicWaker::new(), wake_sender: DiatomicWaker::new(), value: Mutex::new(State::NoData), }; let state = Arc::new(inner); ( Sender { state: state.clone(), }, Receiver { state }, ) } #[derive(Debug, thiserror::Error)] pub enum SendError { #[error("receiver is gone")] ReceiverGone, } impl<T: Send> Sender<T> { /// # Panics /// /// If `try_fold` panics, any subsequent call to `send` panic. pub async fn send<F>(&mut self, value: T, try_fold: F) -> Result<(), SendError> where F: Fn(&mut T, T) -> Result<(), T>, { let mut value = Some(value); poll_fn(|cx| { let mut guard = self.state.value.lock().unwrap(); match &mut *guard { State::NoData => { *guard = State::HasData(value.take().unwrap()); self.state.wake_receiver.notify(); Poll::Ready(Ok(())) } State::HasData(_) => { let State::HasData(acc_mut) = &mut *guard else { unreachable!("this match arm guarantees that the guard is HasData"); }; match try_fold(acc_mut, value.take().unwrap()) { Ok(()) => { // no need to wake receiver, if it was waiting it already // got a wake-up when we transitioned from NoData to HasData Poll::Ready(Ok(())) } Err(unfoldable_value) => { value = Some(unfoldable_value); let State::HasData(acc) = std::mem::replace(&mut *guard, State::TryFoldFailed) else { unreachable!("this match arm guarantees that the guard is HasData"); }; *guard = State::SenderWaitsForReceiverToConsume(acc); // SAFETY: send is single threaded due to `&mut self` requirement, // therefore register is not concurrent. unsafe { self.state.wake_sender.register(cx.waker()); } Poll::Pending } } } State::SenderWaitsForReceiverToConsume(_data) => { // SAFETY: send is single threaded due to `&mut self` requirement, // therefore register is not concurrent. unsafe { self.state.wake_sender.register(cx.waker()); } Poll::Pending } State::ReceiverGone => Poll::Ready(Err(SendError::ReceiverGone)), State::SenderGone(_) | State::AllGone | State::SenderDropping | State::ReceiverDropping | State::TryFoldFailed => { unreachable!(); } } }) .await } } impl<T> Drop for Sender<T> { fn drop(&mut self) { scopeguard::defer! { self.state.wake_receiver.notify() }; let Ok(mut guard) = self.state.value.lock() else { return; }; *guard = match std::mem::replace(&mut *guard, State::SenderDropping) { State::NoData => State::SenderGone(None), State::HasData(data) | State::SenderWaitsForReceiverToConsume(data) => { State::SenderGone(Some(data)) } State::ReceiverGone => State::AllGone, State::TryFoldFailed | State::SenderGone(_) | State::AllGone | State::SenderDropping | State::ReceiverDropping => { unreachable!("unreachable state {:?}", guard.discriminant_str()) } } } } #[derive(Debug, thiserror::Error)] pub enum RecvError { #[error("sender is gone")] SenderGone, } impl<T: Send> Receiver<T> { pub async fn recv(&mut self) -> Result<T, RecvError> { poll_fn(|cx| { let mut guard = self.state.value.lock().unwrap(); match &mut *guard { State::NoData => { // SAFETY: recv is single threaded due to `&mut self` requirement, // therefore register is not concurrent. unsafe { self.state.wake_receiver.register(cx.waker()); } Poll::Pending } guard @ State::HasData(_) | guard @ State::SenderWaitsForReceiverToConsume(_) | guard @ State::SenderGone(Some(_)) => { let data = guard .take_data() .expect("in these states, data is guaranteed to be present"); self.state.wake_sender.notify(); Poll::Ready(Ok(data)) } State::SenderGone(None) => Poll::Ready(Err(RecvError::SenderGone)), State::ReceiverGone | State::AllGone | State::SenderDropping | State::ReceiverDropping | State::TryFoldFailed => { unreachable!("unreachable state {:?}", guard.discriminant_str()); } } }) .await } } impl<T> Drop for Receiver<T> { fn drop(&mut self) { scopeguard::defer! { self.state.wake_sender.notify() }; let Ok(mut guard) = self.state.value.lock() else { return; }; *guard = match std::mem::replace(&mut *guard, State::ReceiverDropping) { State::NoData => State::ReceiverGone, State::HasData(_) | State::SenderWaitsForReceiverToConsume(_) => State::ReceiverGone, State::SenderGone(_) => State::AllGone, State::TryFoldFailed | State::ReceiverGone | State::AllGone | State::SenderDropping | State::ReceiverDropping => { unreachable!("unreachable state {:?}", guard.discriminant_str()) } } } } impl<T> State<T> { fn take_data(&mut self) -> Option<T> { match self { State::HasData(_) => { let State::HasData(data) = std::mem::replace(self, State::NoData) else { unreachable!("this match arm guarantees that the state is HasData"); }; Some(data) } State::SenderWaitsForReceiverToConsume(_) => { let State::SenderWaitsForReceiverToConsume(data) = std::mem::replace(self, State::NoData) else { unreachable!( "this match arm guarantees that the state is SenderWaitsForReceiverToConsume" ); }; Some(data) } State::SenderGone(data) => Some(data.take().unwrap()), State::NoData | State::TryFoldFailed | State::ReceiverGone | State::AllGone | State::SenderDropping | State::ReceiverDropping => None, } } fn discriminant_str(&self) -> &'static str { match self { State::NoData => "NoData", State::HasData(_) => "HasData", State::TryFoldFailed => "TryFoldFailed", State::SenderWaitsForReceiverToConsume(_) => "SenderWaitsForReceiverToConsume", State::SenderGone(_) => "SenderGone", State::ReceiverGone => "ReceiverGone", State::AllGone => "AllGone", State::SenderDropping => "SenderDropping", State::ReceiverDropping => "ReceiverDropping", } } } #[cfg(test)] mod tests { use super::*; const FOREVER: std::time::Duration = std::time::Duration::from_secs(u64::MAX); #[tokio::test] async fn test_send_recv() { let (mut sender, mut receiver) = channel(); sender .send(42, |acc, val| { *acc += val; Ok(()) }) .await .unwrap(); let received = receiver.recv().await.unwrap(); assert_eq!(received, 42); } #[tokio::test] async fn test_send_recv_with_fold() { let (mut sender, mut receiver) = channel(); sender .send(1, |acc, val| { *acc += val; Ok(()) }) .await .unwrap(); sender .send(2, |acc, val| { *acc += val; Ok(()) }) .await .unwrap(); let received = receiver.recv().await.unwrap(); assert_eq!(received, 3); } #[tokio::test(start_paused = true)] async fn test_sender_waits_for_receiver_if_try_fold_fails() { let (mut sender, mut receiver) = channel(); sender.send(23, |_, _| panic!("first send")).await.unwrap(); let send_fut = sender.send(42, |_, val| Err(val)); let mut send_fut = std::pin::pin!(send_fut); tokio::select! { _ = tokio::time::sleep(FOREVER) => {}, _ = &mut send_fut => { panic!("send should not complete"); }, } let val = receiver.recv().await.unwrap(); assert_eq!(val, 23); tokio::select! { _ = tokio::time::sleep(FOREVER) => { panic!("receiver should have consumed the value"); }, _ = &mut send_fut => { }, } let val = receiver.recv().await.unwrap(); assert_eq!(val, 42); } #[tokio::test(start_paused = true)] async fn test_sender_errors_if_waits_for_receiver_and_receiver_drops() { let (mut sender, receiver) = channel(); sender.send(23, |_, _| unreachable!()).await.unwrap(); let send_fut = sender.send(42, |_, val| Err(val)); let send_fut = std::pin::pin!(send_fut); drop(receiver); let result = send_fut.await; assert!(matches!(result, Err(SendError::ReceiverGone))); } #[tokio::test(start_paused = true)] async fn test_receiver_errors_if_waits_for_sender_and_sender_drops() { let (sender, mut receiver) = channel::<()>(); let recv_fut = receiver.recv(); let recv_fut = std::pin::pin!(recv_fut); drop(sender); let result = recv_fut.await; assert!(matches!(result, Err(RecvError::SenderGone))); } #[tokio::test(start_paused = true)] async fn test_receiver_errors_if_waits_for_sender_and_sender_drops_with_data() { let (mut sender, mut receiver) = channel(); sender.send(42, |_, _| unreachable!()).await.unwrap(); { let recv_fut = receiver.recv(); let recv_fut = std::pin::pin!(recv_fut); drop(sender); let val = recv_fut.await.unwrap(); assert_eq!(val, 42); } let result = receiver.recv().await; assert!(matches!(result, Err(RecvError::SenderGone))); } #[tokio::test(start_paused = true)] async fn test_receiver_waits_for_sender_if_no_data() { let (mut sender, mut receiver) = channel(); let recv_fut = receiver.recv(); let mut recv_fut = std::pin::pin!(recv_fut); tokio::select! { _ = tokio::time::sleep(FOREVER) => {}, _ = &mut recv_fut => { panic!("recv should not complete"); }, } sender.send(42, |_, _| Ok(())).await.unwrap(); let val = recv_fut.await.unwrap(); assert_eq!(val, 42); } #[tokio::test] async fn test_receiver_gone_while_nodata() { let (mut sender, receiver) = channel(); drop(receiver); let result = sender.send(42, |_, _| Ok(())).await; assert!(matches!(result, Err(SendError::ReceiverGone))); } #[tokio::test] async fn test_sender_gone_while_nodata() { let (sender, mut receiver) = super::channel::<usize>(); drop(sender); let result = receiver.recv().await; assert!(matches!(result, Err(RecvError::SenderGone))); } #[tokio::test(start_paused = true)] async fn test_receiver_drops_after_sender_went_to_sleep() { let (mut sender, receiver) = channel(); let state = receiver.state.clone(); sender.send(23, |_, _| unreachable!()).await.unwrap(); let send_task = tokio::spawn(async move { sender.send(42, |_, v| Err(v)).await }); tokio::time::sleep(FOREVER).await; assert!(matches!( &*state.value.lock().unwrap(), &State::SenderWaitsForReceiverToConsume(_) )); drop(receiver); let err = send_task .await .unwrap() .expect_err("should unblock immediately"); assert!(matches!(err, SendError::ReceiverGone)); } #[tokio::test(start_paused = true)] async fn test_sender_drops_after_receiver_went_to_sleep() { let (sender, mut receiver) = channel::<usize>(); let state = sender.state.clone(); let recv_task = tokio::spawn(async move { receiver.recv().await }); tokio::time::sleep(FOREVER).await; assert!(matches!(&*state.value.lock().unwrap(), &State::NoData)); drop(sender); let err = recv_task.await.unwrap().expect_err("should error"); assert!(matches!(err, RecvError::SenderGone)); } #[tokio::test(start_paused = true)] async fn test_receiver_drop_while_waiting_for_receiver_to_consume_unblocks_sender() { let (mut sender, receiver) = channel(); let state = receiver.state.clone(); sender.send((), |_, _| unreachable!()).await.unwrap(); assert!(matches!(&*state.value.lock().unwrap(), &State::HasData(_))); let unmergeable = sender.send((), |_, _| Err(())); let mut unmergeable = std::pin::pin!(unmergeable); tokio::select! { _ = tokio::time::sleep(FOREVER) => {}, _ = &mut unmergeable => { panic!("unmergeable should not complete"); }, } assert!(matches!( &*state.value.lock().unwrap(), &State::SenderWaitsForReceiverToConsume(_) )); drop(receiver); assert!(matches!( &*state.value.lock().unwrap(), &State::ReceiverGone )); unmergeable.await.unwrap_err(); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/src/sync/duplex/mpsc.rs
libs/utils/src/sync/duplex/mpsc.rs
use tokio::sync::mpsc; /// A bi-directional channel. pub struct Duplex<S, R> { pub tx: mpsc::Sender<S>, pub rx: mpsc::Receiver<R>, } /// Creates a bi-directional channel. /// /// The channel will buffer up to the provided number of messages. Once the buffer is full, /// attempts to send new messages will wait until a message is received from the channel. /// The provided buffer capacity must be at least 1. pub fn channel<A: Send, B: Send>(buffer: usize) -> (Duplex<A, B>, Duplex<B, A>) { let (tx_a, rx_a) = mpsc::channel::<A>(buffer); let (tx_b, rx_b) = mpsc::channel::<B>(buffer); (Duplex { tx: tx_a, rx: rx_b }, Duplex { tx: tx_b, rx: rx_a }) } impl<S: Send, R: Send> Duplex<S, R> { /// Sends a value, waiting until there is capacity. /// /// A successful send occurs when it is determined that the other end of the channel has not hung up already. pub async fn send(&self, x: S) -> Result<(), mpsc::error::SendError<S>> { self.tx.send(x).await } pub fn try_send(&self, x: S) -> Result<(), mpsc::error::TrySendError<S>> { self.tx.try_send(x) } /// Receives the next value for this receiver. /// /// This method returns `None` if the channel has been closed and there are /// no remaining messages in the channel's buffer. pub async fn recv(&mut self) -> Option<R> { self.rx.recv().await } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/tests/bin_ser_test.rs
libs/utils/tests/bin_ser_test.rs
use std::io::Read; use bytes::{Buf, BytesMut}; use hex_literal::hex; use serde::Deserialize; use utils::bin_ser::LeSer; #[derive(Debug, PartialEq, Eq, Deserialize)] pub struct HeaderData { magic: u16, info: u16, tli: u32, pageaddr: u64, len: u32, } // A manual implementation using BytesMut, just so we can // verify that we decode the same way. pub fn decode_header_data(buf: &mut BytesMut) -> HeaderData { HeaderData { magic: buf.get_u16_le(), info: buf.get_u16_le(), tli: buf.get_u32_le(), pageaddr: buf.get_u64_le(), len: buf.get_u32_le(), } } pub fn decode2<R: Read>(reader: &mut R) -> HeaderData { HeaderData::des_from(reader).unwrap() } #[test] fn test1() { let raw1 = hex!("8940 7890 5534 7890 1289 5379 8378 7893 4207 8923 4712 3218"); let mut buf1 = BytesMut::from(&raw1[..]); let mut buf2 = &raw1[..]; let dec1 = decode_header_data(&mut buf1); let dec2 = decode2(&mut buf2); assert_eq!(dec1, dec2); assert_eq!(buf1, buf2); }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/utils/benches/benchmarks.rs
libs/utils/benches/benchmarks.rs
use std::time::Duration; use criterion::{Bencher, Criterion, criterion_group, criterion_main}; use pprof::criterion::{Output, PProfProfiler}; use utils::id; use utils::logging::log_slow; // Register benchmarks with Criterion. criterion_group!( name = benches; config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); targets = bench_id_stringify, bench_log_slow, ); criterion_main!(benches); pub fn bench_id_stringify(c: &mut Criterion) { // Can only use public methods. let ttid = id::TenantTimelineId::generate(); c.bench_function("id.to_string", |b| { b.iter(|| { // FIXME measurement overhead? //for _ in 0..1000 { // ttid.tenant_id.to_string(); //} ttid.tenant_id.to_string(); }) }); } pub fn bench_log_slow(c: &mut Criterion) { for enabled in [false, true] { c.bench_function(&format!("log_slow/enabled={enabled}"), |b| { run_bench(b, enabled).unwrap() }); } // The actual benchmark. fn run_bench(b: &mut Bencher, enabled: bool) -> anyhow::Result<()> { const THRESHOLD: Duration = Duration::from_secs(1); // Use a multi-threaded runtime to avoid thread parking overhead when yielding. let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .build()?; // Test both with and without log_slow, since we're essentially measuring Tokio scheduling // performance too. Use a simple noop future that yields once, to avoid any scheduler fast // paths for a ready future. if enabled { b.iter(|| { runtime.block_on(log_slow( "ready", THRESHOLD, std::pin::pin!(tokio::task::yield_now()), )) }); } else { b.iter(|| runtime.block_on(tokio::task::yield_now())); } Ok(()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/neon-shmem/src/lib.rs
libs/neon-shmem/src/lib.rs
pub mod hash; pub mod shmem; pub mod sync;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/neon-shmem/src/sync.rs
libs/neon-shmem/src/sync.rs
//! Simple utilities akin to what's in [`std::sync`] but designed to work with shared memory. use std::mem::MaybeUninit; use std::ptr::NonNull; use nix::errno::Errno; pub type RwLock<T> = lock_api::RwLock<PthreadRwLock, T>; pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, PthreadRwLock, T>; pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, PthreadRwLock, T>; pub type ValueReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, PthreadRwLock, T>; pub type ValueWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, PthreadRwLock, T>; /// Shared memory read-write lock. pub struct PthreadRwLock(Option<NonNull<libc::pthread_rwlock_t>>); /// Simple macro that calls a function in the libc namespace and panics if return value is nonzero. macro_rules! libc_checked { ($fn_name:ident ( $($arg:expr),* )) => {{ let res = libc::$fn_name($($arg),*); if res != 0 { panic!("{} failed with {}", stringify!($fn_name), Errno::from_raw(res)); } }}; } impl PthreadRwLock { /// Creates a new `PthreadRwLock` on top of a pointer to a pthread rwlock. /// /// # Safety /// `lock` must be non-null. Every unsafe operation will panic in the event of an error. pub unsafe fn new(lock: *mut libc::pthread_rwlock_t) -> Self { unsafe { let mut attrs = MaybeUninit::uninit(); libc_checked!(pthread_rwlockattr_init(attrs.as_mut_ptr())); libc_checked!(pthread_rwlockattr_setpshared( attrs.as_mut_ptr(), libc::PTHREAD_PROCESS_SHARED )); libc_checked!(pthread_rwlock_init(lock, attrs.as_mut_ptr())); // Safety: POSIX specifies that "any function affecting the attributes // object (including destruction) shall not affect any previously // initialized read-write locks". libc_checked!(pthread_rwlockattr_destroy(attrs.as_mut_ptr())); Self(Some(NonNull::new_unchecked(lock))) } } fn inner(&self) -> NonNull<libc::pthread_rwlock_t> { match self.0 { None => { panic!("PthreadRwLock constructed badly - something likely used RawRwLock::INIT") } Some(x) => x, } } } unsafe impl lock_api::RawRwLock for PthreadRwLock { type GuardMarker = lock_api::GuardSend; const INIT: Self = Self(None); fn try_lock_shared(&self) -> bool { unsafe { let res = libc::pthread_rwlock_tryrdlock(self.inner().as_ptr()); match res { 0 => true, libc::EAGAIN => false, _ => panic!( "pthread_rwlock_tryrdlock failed with {}", Errno::from_raw(res) ), } } } fn try_lock_exclusive(&self) -> bool { unsafe { let res = libc::pthread_rwlock_trywrlock(self.inner().as_ptr()); match res { 0 => true, libc::EAGAIN => false, _ => panic!("try_wrlock failed with {}", Errno::from_raw(res)), } } } fn lock_shared(&self) { unsafe { libc_checked!(pthread_rwlock_rdlock(self.inner().as_ptr())); } } fn lock_exclusive(&self) { unsafe { libc_checked!(pthread_rwlock_wrlock(self.inner().as_ptr())); } } unsafe fn unlock_exclusive(&self) { unsafe { libc_checked!(pthread_rwlock_unlock(self.inner().as_ptr())); } } unsafe fn unlock_shared(&self) { unsafe { libc_checked!(pthread_rwlock_unlock(self.inner().as_ptr())); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/neon-shmem/src/hash.rs
libs/neon-shmem/src/hash.rs
//! Resizable hash table implementation on top of byte-level storage (either a [`ShmemHandle`] or a fixed byte array). //! //! This hash table has two major components: the bucket array and the dictionary. Each bucket within the //! bucket array contains a `Option<(K, V)>` and an index of another bucket. In this way there is both an //! implicit freelist within the bucket array (`None` buckets point to other `None` entries) and various hash //! chains within the bucket array (a Some bucket will point to other Some buckets that had the same hash). //! //! Buckets are never moved unless they are within a region that is being shrunk, and so the actual hash- //! dependent component is done with the dictionary. When a new key is inserted into the map, a position //! within the dictionary is decided based on its hash, the data is inserted into an empty bucket based //! off of the freelist, and then the index of said bucket is placed in the dictionary. //! //! This map is resizable (if initialized on top of a [`ShmemHandle`]). Both growing and shrinking happen //! in-place and are at a high level achieved by expanding/reducing the bucket array and rebuilding the //! dictionary by rehashing all keys. //! //! Concurrency is managed very simply: the entire map is guarded by one shared-memory RwLock. use std::hash::{BuildHasher, Hash}; use std::mem::MaybeUninit; use crate::shmem::ShmemHandle; use crate::{shmem, sync::*}; mod core; pub mod entry; #[cfg(test)] mod tests; use core::{Bucket, CoreHashMap, INVALID_POS}; use entry::{Entry, OccupiedEntry, PrevPos, VacantEntry}; use thiserror::Error; /// Error type for a hashmap shrink operation. #[derive(Error, Debug)] pub enum HashMapShrinkError { /// There was an error encountered while resizing the memory area. #[error("shmem resize failed: {0}")] ResizeError(shmem::Error), /// Occupied entries in to-be-shrunk space were encountered beginning at the given index. #[error("occupied entry in deallocated space found at {0}")] RemainingEntries(usize), } /// This represents a hash table that (possibly) lives in shared memory. /// If a new process is launched with fork(), the child process inherits /// this struct. #[must_use] pub struct HashMapInit<'a, K, V, S = rustc_hash::FxBuildHasher> { shmem_handle: Option<ShmemHandle>, shared_ptr: *mut HashMapShared<'a, K, V>, shared_size: usize, hasher: S, num_buckets: u32, } /// This is a per-process handle to a hash table that (possibly) lives in shared memory. /// If a child process is launched with fork(), the child process should /// get its own HashMapAccess by calling HashMapInit::attach_writer/reader(). /// /// XXX: We're not making use of it at the moment, but this struct could /// hold process-local information in the future. pub struct HashMapAccess<'a, K, V, S = rustc_hash::FxBuildHasher> { shmem_handle: Option<ShmemHandle>, shared_ptr: *mut HashMapShared<'a, K, V>, hasher: S, } unsafe impl<K: Sync, V: Sync, S> Sync for HashMapAccess<'_, K, V, S> {} unsafe impl<K: Send, V: Send, S> Send for HashMapAccess<'_, K, V, S> {} impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> { /// Change the 'hasher' used by the hash table. /// /// NOTE: This must be called right after creating the hash table, /// before inserting any entries and before calling attach_writer/reader. /// Otherwise different accessors could be using different hash function, /// with confusing results. pub fn with_hasher<T: BuildHasher>(self, hasher: T) -> HashMapInit<'a, K, V, T> { HashMapInit { hasher, shmem_handle: self.shmem_handle, shared_ptr: self.shared_ptr, shared_size: self.shared_size, num_buckets: self.num_buckets, } } /// Loosely (over)estimate the size needed to store a hash table with `num_buckets` buckets. pub fn estimate_size(num_buckets: u32) -> usize { // add some margin to cover alignment etc. CoreHashMap::<K, V>::estimate_size(num_buckets) + size_of::<HashMapShared<K, V>>() + 1000 } fn new( num_buckets: u32, shmem_handle: Option<ShmemHandle>, area_ptr: *mut u8, area_size: usize, hasher: S, ) -> Self { let mut ptr: *mut u8 = area_ptr; let end_ptr: *mut u8 = unsafe { ptr.add(area_size) }; // carve out area for the One Big Lock (TM) and the HashMapShared. ptr = unsafe { ptr.add(ptr.align_offset(align_of::<libc::pthread_rwlock_t>())) }; let raw_lock_ptr = ptr; ptr = unsafe { ptr.add(size_of::<libc::pthread_rwlock_t>()) }; ptr = unsafe { ptr.add(ptr.align_offset(align_of::<HashMapShared<K, V>>())) }; let shared_ptr: *mut HashMapShared<K, V> = ptr.cast(); ptr = unsafe { ptr.add(size_of::<HashMapShared<K, V>>()) }; // carve out the buckets ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::<core::Bucket<K, V>>())) }; let buckets_ptr = ptr; ptr = unsafe { ptr.add(size_of::<core::Bucket<K, V>>() * num_buckets as usize) }; // use remaining space for the dictionary ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::<u32>())) }; assert!(ptr.addr() < end_ptr.addr()); let dictionary_ptr = ptr; let dictionary_size = unsafe { end_ptr.byte_offset_from(ptr) / size_of::<u32>() as isize }; assert!(dictionary_size > 0); let buckets = unsafe { std::slice::from_raw_parts_mut(buckets_ptr.cast(), num_buckets as usize) }; let dictionary = unsafe { std::slice::from_raw_parts_mut(dictionary_ptr.cast(), dictionary_size as usize) }; let hashmap = CoreHashMap::new(buckets, dictionary); unsafe { let lock = RwLock::from_raw(PthreadRwLock::new(raw_lock_ptr.cast()), hashmap); std::ptr::write(shared_ptr, lock); } Self { num_buckets, shmem_handle, shared_ptr, shared_size: area_size, hasher, } } /// Attach to a hash table for writing. pub fn attach_writer(self) -> HashMapAccess<'a, K, V, S> { HashMapAccess { shmem_handle: self.shmem_handle, shared_ptr: self.shared_ptr, hasher: self.hasher, } } /// Initialize a table for reading. Currently identical to [`HashMapInit::attach_writer`]. /// /// This is a holdover from a previous implementation and is being kept around for /// backwards compatibility reasons. pub fn attach_reader(self) -> HashMapAccess<'a, K, V, S> { self.attach_writer() } } /// Hash table data that is actually stored in the shared memory area. /// /// NOTE: We carve out the parts from a contiguous chunk. Growing and shrinking the hash table /// relies on the memory layout! The data structures are laid out in the contiguous shared memory /// area as follows: /// /// [`libc::pthread_rwlock_t`] /// [`HashMapShared`] /// buckets /// dictionary /// /// In between the above parts, there can be padding bytes to align the parts correctly. type HashMapShared<'a, K, V> = RwLock<CoreHashMap<'a, K, V>>; impl<'a, K, V> HashMapInit<'a, K, V, rustc_hash::FxBuildHasher> where K: Clone + Hash + Eq, { /// Place the hash table within a user-supplied fixed memory area. pub fn with_fixed(num_buckets: u32, area: &'a mut [MaybeUninit<u8>]) -> Self { Self::new( num_buckets, None, area.as_mut_ptr().cast(), area.len(), rustc_hash::FxBuildHasher, ) } /// Place a new hash map in the given shared memory area /// /// # Panics /// Will panic on failure to resize area to expected map size. pub fn with_shmem(num_buckets: u32, shmem: ShmemHandle) -> Self { let size = Self::estimate_size(num_buckets); shmem .set_size(size) .expect("could not resize shared memory area"); let ptr = shmem.data_ptr.as_ptr().cast(); Self::new( num_buckets, Some(shmem), ptr, size, rustc_hash::FxBuildHasher, ) } /// Make a resizable hash map within a new shared memory area with the given name. pub fn new_resizeable_named(num_buckets: u32, max_buckets: u32, name: &str) -> Self { let size = Self::estimate_size(num_buckets); let max_size = Self::estimate_size(max_buckets); let shmem = ShmemHandle::new(name, size, max_size).expect("failed to make shared memory area"); let ptr = shmem.data_ptr.as_ptr().cast(); Self::new( num_buckets, Some(shmem), ptr, size, rustc_hash::FxBuildHasher, ) } /// Make a resizable hash map within a new anonymous shared memory area. pub fn new_resizeable(num_buckets: u32, max_buckets: u32) -> Self { use std::sync::atomic::{AtomicUsize, Ordering}; static COUNTER: AtomicUsize = AtomicUsize::new(0); let val = COUNTER.fetch_add(1, Ordering::Relaxed); let name = format!("neon_shmem_hmap{val}"); Self::new_resizeable_named(num_buckets, max_buckets, &name) } } impl<'a, K, V, S: BuildHasher> HashMapAccess<'a, K, V, S> where K: Clone + Hash + Eq, { /// Hash a key using the map's hasher. #[inline] fn get_hash_value(&self, key: &K) -> u64 { self.hasher.hash_one(key) } fn entry_with_hash(&self, key: K, hash: u64) -> Entry<'a, '_, K, V> { let mut map = unsafe { self.shared_ptr.as_ref() }.unwrap().write(); let dict_pos = hash as usize % map.dictionary.len(); let first = map.dictionary[dict_pos]; if first == INVALID_POS { // no existing entry return Entry::Vacant(VacantEntry { map, key, dict_pos: dict_pos as u32, }); } let mut prev_pos = PrevPos::First(dict_pos as u32); let mut next = first; loop { let bucket = &mut map.buckets[next as usize]; let (bucket_key, _bucket_value) = bucket.inner.as_mut().expect("entry is in use"); if *bucket_key == key { // found existing entry return Entry::Occupied(OccupiedEntry { map, _key: key, prev_pos, bucket_pos: next, }); } if bucket.next == INVALID_POS { // No existing entry return Entry::Vacant(VacantEntry { map, key, dict_pos: dict_pos as u32, }); } prev_pos = PrevPos::Chained(next); next = bucket.next; } } /// Get a reference to the corresponding value for a key. pub fn get<'e>(&'e self, key: &K) -> Option<ValueReadGuard<'e, V>> { let hash = self.get_hash_value(key); let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); RwLockReadGuard::try_map(map, |m| m.get_with_hash(key, hash)).ok() } /// Get a reference to the entry containing a key. /// /// NB: THis takes a write lock as there's no way to distinguish whether the intention /// is to use the entry for reading or for writing in advance. pub fn entry(&self, key: K) -> Entry<'a, '_, K, V> { let hash = self.get_hash_value(&key); self.entry_with_hash(key, hash) } /// Remove a key given its hash. Returns the associated value if it existed. pub fn remove(&self, key: &K) -> Option<V> { let hash = self.get_hash_value(key); match self.entry_with_hash(key.clone(), hash) { Entry::Occupied(e) => Some(e.remove()), Entry::Vacant(_) => None, } } /// Insert/update a key. Returns the previous associated value if it existed. /// /// # Errors /// Will return [`core::FullError`] if there is no more space left in the map. pub fn insert(&self, key: K, value: V) -> Result<Option<V>, core::FullError> { let hash = self.get_hash_value(&key); match self.entry_with_hash(key.clone(), hash) { Entry::Occupied(mut e) => Ok(Some(e.insert(value))), Entry::Vacant(e) => { _ = e.insert(value)?; Ok(None) } } } /// Optionally return the entry for a bucket at a given index if it exists. /// /// Has more overhead than one would intuitively expect: performs both a clone of the key /// due to the [`OccupiedEntry`] type owning the key and also a hash of the key in order /// to enable repairing the hash chain if the entry is removed. pub fn entry_at_bucket(&self, pos: usize) -> Option<OccupiedEntry<'a, '_, K, V>> { let map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); if pos >= map.buckets.len() { return None; } let entry = map.buckets[pos].inner.as_ref(); match entry { Some((key, _)) => Some(OccupiedEntry { _key: key.clone(), bucket_pos: pos as u32, prev_pos: entry::PrevPos::Unknown(self.get_hash_value(key)), map, }), _ => None, } } /// Returns the number of buckets in the table. pub fn get_num_buckets(&self) -> usize { let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); map.get_num_buckets() } /// Return the key and value stored in bucket with given index. This can be used to /// iterate through the hash map. // TODO: An Iterator might be nicer. The communicator's clock algorithm needs to // _slowly_ iterate through all buckets with its clock hand, without holding a lock. // If we switch to an Iterator, it must not hold the lock. pub fn get_at_bucket(&self, pos: usize) -> Option<ValueReadGuard<'_, (K, V)>> { let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); if pos >= map.buckets.len() { return None; } RwLockReadGuard::try_map(map, |m| m.buckets[pos].inner.as_ref()).ok() } /// Returns the index of the bucket a given value corresponds to. pub fn get_bucket_for_value(&self, val_ptr: *const V) -> usize { let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); let origin = map.buckets.as_ptr(); let idx = (val_ptr as usize - origin as usize) / size_of::<Bucket<K, V>>(); assert!(idx < map.buckets.len()); idx } /// Returns the number of occupied buckets in the table. pub fn get_num_buckets_in_use(&self) -> usize { let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); map.buckets_in_use as usize } /// Clears all entries in a table. Does not reset any shrinking operations. pub fn clear(&self) { let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); map.clear(); } /// Perform an in-place rehash of some region (0..`rehash_buckets`) of the table and reset /// the `buckets` and `dictionary` slices to be as long as `num_buckets`. Resets the freelist /// in the process. fn rehash_dict( &self, inner: &mut CoreHashMap<'a, K, V>, buckets_ptr: *mut core::Bucket<K, V>, end_ptr: *mut u8, num_buckets: u32, rehash_buckets: u32, ) { inner.free_head = INVALID_POS; let buckets; let dictionary; unsafe { let buckets_end_ptr = buckets_ptr.add(num_buckets as usize); let dictionary_ptr: *mut u32 = buckets_end_ptr .byte_add(buckets_end_ptr.align_offset(align_of::<u32>())) .cast(); let dictionary_size: usize = end_ptr.byte_offset_from(buckets_end_ptr) as usize / size_of::<u32>(); buckets = std::slice::from_raw_parts_mut(buckets_ptr, num_buckets as usize); dictionary = std::slice::from_raw_parts_mut(dictionary_ptr, dictionary_size); } for e in dictionary.iter_mut() { *e = INVALID_POS; } for (i, bucket) in buckets.iter_mut().enumerate().take(rehash_buckets as usize) { if bucket.inner.is_none() { bucket.next = inner.free_head; inner.free_head = i as u32; continue; } let hash = self.hasher.hash_one(&bucket.inner.as_ref().unwrap().0); let pos: usize = (hash % dictionary.len() as u64) as usize; bucket.next = dictionary[pos]; dictionary[pos] = i as u32; } inner.dictionary = dictionary; inner.buckets = buckets; } /// Rehash the map without growing or shrinking. pub fn shuffle(&self) { let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); let num_buckets = map.get_num_buckets() as u32; let size_bytes = HashMapInit::<K, V, S>::estimate_size(num_buckets); let end_ptr: *mut u8 = unsafe { self.shared_ptr.byte_add(size_bytes).cast() }; let buckets_ptr = map.buckets.as_mut_ptr(); self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, num_buckets); } /// Grow the number of buckets within the table. /// /// 1. Grows the underlying shared memory area /// 2. Initializes new buckets and overwrites the current dictionary /// 3. Rehashes the dictionary /// /// # Panics /// Panics if called on a map initialized with [`HashMapInit::with_fixed`]. /// /// # Errors /// Returns an [`shmem::Error`] if any errors occur resizing the memory region. pub fn grow(&self, num_buckets: u32) -> Result<(), shmem::Error> { let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); let old_num_buckets = map.buckets.len() as u32; assert!( num_buckets >= old_num_buckets, "grow called with a smaller number of buckets" ); if num_buckets == old_num_buckets { return Ok(()); } let shmem_handle = self .shmem_handle .as_ref() .expect("grow called on a fixed-size hash table"); let size_bytes = HashMapInit::<K, V, S>::estimate_size(num_buckets); shmem_handle.set_size(size_bytes)?; let end_ptr: *mut u8 = unsafe { shmem_handle.data_ptr.as_ptr().add(size_bytes) }; // Initialize new buckets. The new buckets are linked to the free list. // NB: This overwrites the dictionary! let buckets_ptr = map.buckets.as_mut_ptr(); unsafe { for i in old_num_buckets..num_buckets { let bucket = buckets_ptr.add(i as usize); bucket.write(core::Bucket { next: if i < num_buckets - 1 { i + 1 } else { map.free_head }, inner: None, }); } } self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, old_num_buckets); map.free_head = old_num_buckets; Ok(()) } /// Begin a shrink, limiting all new allocations to be in buckets with index below `num_buckets`. /// /// # Panics /// Panics if called on a map initialized with [`HashMapInit::with_fixed`] or if `num_buckets` is /// greater than the number of buckets in the map. pub fn begin_shrink(&mut self, num_buckets: u32) { let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); assert!( num_buckets <= map.get_num_buckets() as u32, "shrink called with a larger number of buckets" ); _ = self .shmem_handle .as_ref() .expect("shrink called on a fixed-size hash table"); map.alloc_limit = num_buckets; } /// If a shrink operation is underway, returns the target size of the map. Otherwise, returns None. pub fn shrink_goal(&self) -> Option<usize> { let map = unsafe { self.shared_ptr.as_mut() }.unwrap().read(); let goal = map.alloc_limit; if goal == INVALID_POS { None } else { Some(goal as usize) } } /// Complete a shrink after caller has evicted entries, removing the unused buckets and rehashing. /// /// # Panics /// The following cases result in a panic: /// - Calling this function on a map initialized with [`HashMapInit::with_fixed`]. /// - Calling this function on a map when no shrink operation is in progress. pub fn finish_shrink(&self) -> Result<(), HashMapShrinkError> { let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); assert!( map.alloc_limit != INVALID_POS, "called finish_shrink when no shrink is in progress" ); let num_buckets = map.alloc_limit; if map.get_num_buckets() == num_buckets as usize { return Ok(()); } assert!( map.buckets_in_use <= num_buckets, "called finish_shrink before enough entries were removed" ); for i in (num_buckets as usize)..map.buckets.len() { if map.buckets[i].inner.is_some() { return Err(HashMapShrinkError::RemainingEntries(i)); } } let shmem_handle = self .shmem_handle .as_ref() .expect("shrink called on a fixed-size hash table"); let size_bytes = HashMapInit::<K, V, S>::estimate_size(num_buckets); if let Err(e) = shmem_handle.set_size(size_bytes) { return Err(HashMapShrinkError::ResizeError(e)); } let end_ptr: *mut u8 = unsafe { shmem_handle.data_ptr.as_ptr().add(size_bytes) }; let buckets_ptr = map.buckets.as_mut_ptr(); self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, num_buckets); map.alloc_limit = INVALID_POS; Ok(()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/neon-shmem/src/shmem.rs
libs/neon-shmem/src/shmem.rs
//! Dynamically resizable contiguous chunk of shared memory use std::num::NonZeroUsize; use std::os::fd::{AsFd, BorrowedFd, OwnedFd}; use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering}; use nix::errno::Errno; use nix::sys::mman::MapFlags; use nix::sys::mman::ProtFlags; use nix::sys::mman::mmap as nix_mmap; use nix::sys::mman::munmap as nix_munmap; use nix::unistd::ftruncate as nix_ftruncate; /// `ShmemHandle` represents a shared memory area that can be shared by processes over `fork()`. /// Unlike shared memory allocated by Postgres, this area is resizable, up to `max_size` that's /// specified at creation. /// /// The area is backed by an anonymous file created with `memfd_create()`. The full address space for /// `max_size` is reserved up-front with `mmap()`, but whenever you call [`ShmemHandle::set_size`], /// the underlying file is resized. Do not access the area beyond the current size. Currently, that /// will cause the file to be expanded, but we might use `mprotect()` etc. to enforce that in the /// future. pub struct ShmemHandle { /// memfd file descriptor fd: OwnedFd, max_size: usize, // Pointer to the beginning of the shared memory area. The header is stored there. shared_ptr: NonNull<SharedStruct>, // Pointer to the beginning of the user data pub data_ptr: NonNull<u8>, } /// This is stored at the beginning in the shared memory area. struct SharedStruct { max_size: usize, /// Current size of the backing file. The high-order bit is used for the [`RESIZE_IN_PROGRESS`] flag. current_size: AtomicUsize, } const RESIZE_IN_PROGRESS: usize = 1 << 63; const HEADER_SIZE: usize = std::mem::size_of::<SharedStruct>(); /// Error type returned by the [`ShmemHandle`] functions. #[derive(thiserror::Error, Debug)] #[error("{msg}: {errno}")] pub struct Error { pub msg: String, pub errno: Errno, } impl Error { fn new(msg: &str, errno: Errno) -> Self { Self { msg: msg.to_string(), errno, } } } impl ShmemHandle { /// Create a new shared memory area. To communicate between processes, the processes need to be /// `fork()`'d after calling this, so that the `ShmemHandle` is inherited by all processes. /// /// If the `ShmemHandle` is dropped, the memory is unmapped from the current process. Other /// processes can continue using it, however. pub fn new(name: &str, initial_size: usize, max_size: usize) -> Result<Self, Error> { // create the backing anonymous file. let fd = create_backing_file(name)?; Self::new_with_fd(fd, initial_size, max_size) } fn new_with_fd(fd: OwnedFd, initial_size: usize, max_size: usize) -> Result<Self, Error> { // We reserve the high-order bit for the `RESIZE_IN_PROGRESS` flag, and the actual size // is a little larger than this because of the SharedStruct header. Make the upper limit // somewhat smaller than that, because with anything close to that, you'll run out of // memory anyway. assert!(max_size < 1 << 48, "max size {max_size} too large"); assert!( initial_size <= max_size, "initial size {initial_size} larger than max size {max_size}" ); // The actual initial / max size is the one given by the caller, plus the size of // 'SharedStruct'. let initial_size = HEADER_SIZE + initial_size; let max_size = NonZeroUsize::new(HEADER_SIZE + max_size).unwrap(); // Reserve address space for it with mmap // // TODO: Use MAP_HUGETLB if possible let start_ptr = unsafe { nix_mmap( None, max_size, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, MapFlags::MAP_SHARED, &fd, 0, ) } .map_err(|e| Error::new("mmap failed", e))?; // Reserve space for the initial size enlarge_file(fd.as_fd(), initial_size as u64)?; // Initialize the header let shared: NonNull<SharedStruct> = start_ptr.cast(); unsafe { shared.write(SharedStruct { max_size: max_size.into(), current_size: AtomicUsize::new(initial_size), }); } // The user data begins after the header let data_ptr = unsafe { start_ptr.cast().add(HEADER_SIZE) }; Ok(Self { fd, max_size: max_size.into(), shared_ptr: shared, data_ptr, }) } // return reference to the header fn shared(&self) -> &SharedStruct { unsafe { self.shared_ptr.as_ref() } } /// Resize the shared memory area. `new_size` must not be larger than the `max_size` specified /// when creating the area. /// /// This may only be called from one process/thread concurrently. We detect that case /// and return an [`shmem::Error`](Error). pub fn set_size(&self, new_size: usize) -> Result<(), Error> { let new_size = new_size + HEADER_SIZE; let shared = self.shared(); assert!( new_size <= self.max_size, "new size ({new_size}) is greater than max size ({})", self.max_size ); assert_eq!(self.max_size, shared.max_size); // Lock the area by setting the bit in `current_size` // // Ordering::Relaxed would probably be sufficient here, as we don't access any other memory // and the `posix_fallocate`/`ftruncate` call is surely a synchronization point anyway. But // since this is not performance-critical, better safe than sorry. let mut old_size = shared.current_size.load(Ordering::Acquire); loop { if (old_size & RESIZE_IN_PROGRESS) != 0 { return Err(Error::new( "concurrent resize detected", Errno::UnknownErrno, )); } match shared.current_size.compare_exchange( old_size, new_size, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => break, Err(x) => old_size = x, } } // Ok, we got the lock. // // NB: If anything goes wrong, we *must* clear the bit! let result = { use std::cmp::Ordering::{Equal, Greater, Less}; match new_size.cmp(&old_size) { Less => nix_ftruncate(&self.fd, new_size as i64) .map_err(|e| Error::new("could not shrink shmem segment, ftruncate failed", e)), Equal => Ok(()), Greater => enlarge_file(self.fd.as_fd(), new_size as u64), } }; // Unlock shared.current_size.store( if result.is_ok() { new_size } else { old_size }, Ordering::Release, ); result } /// Returns the current user-visible size of the shared memory segment. /// /// NOTE: a concurrent [`ShmemHandle::set_size()`] call can change the size at any time. /// It is the caller's responsibility not to access the area beyond the current size. pub fn current_size(&self) -> usize { let total_current_size = self.shared().current_size.load(Ordering::Relaxed) & !RESIZE_IN_PROGRESS; total_current_size - HEADER_SIZE } } impl Drop for ShmemHandle { fn drop(&mut self) { // SAFETY: The pointer was obtained from mmap() with the given size. // We unmap the entire region. let _ = unsafe { nix_munmap(self.shared_ptr.cast(), self.max_size) }; // The fd is dropped automatically by OwnedFd. } } /// Create a "backing file" for the shared memory area. On Linux, use `memfd_create()`, to create an /// anonymous in-memory file. One macos, fall back to a regular file. That's good enough for /// development and testing, but in production we want the file to stay in memory. /// /// Disable unused variables warnings because `name` is unused in the macos path. #[allow(unused_variables)] fn create_backing_file(name: &str) -> Result<OwnedFd, Error> { #[cfg(not(target_os = "macos"))] { nix::sys::memfd::memfd_create(name, nix::sys::memfd::MFdFlags::empty()) .map_err(|e| Error::new("memfd_create failed", e)) } #[cfg(target_os = "macos")] { let file = tempfile::tempfile().map_err(|e| { Error::new( "could not create temporary file to back shmem area", nix::errno::Errno::from_raw(e.raw_os_error().unwrap_or(0)), ) })?; Ok(OwnedFd::from(file)) } } fn enlarge_file(fd: BorrowedFd, size: u64) -> Result<(), Error> { // Use posix_fallocate() to enlarge the file. It reserves the space correctly, so that // we don't get a segfault later when trying to actually use it. #[cfg(not(target_os = "macos"))] { nix::fcntl::posix_fallocate(fd, 0, size as i64) .map_err(|e| Error::new("could not grow shmem segment, posix_fallocate failed", e)) } // As a fallback on macos, which doesn't have posix_fallocate, use plain 'fallocate' #[cfg(target_os = "macos")] { nix::unistd::ftruncate(fd, size as i64) .map_err(|e| Error::new("could not grow shmem segment, ftruncate failed", e)) } } #[cfg(test)] mod tests { use super::*; use nix::unistd::ForkResult; use std::ops::Range; /// check that all bytes in given range have the expected value. fn assert_range(ptr: *const u8, expected: u8, range: Range<usize>) { for i in range { let b = unsafe { *(ptr.add(i)) }; assert_eq!(expected, b, "unexpected byte at offset {i}"); } } /// Write 'b' to all bytes in the given range fn write_range(ptr: *mut u8, b: u8, range: Range<usize>) { unsafe { std::ptr::write_bytes(ptr.add(range.start), b, range.end - range.start) }; } // simple single-process test of growing and shrinking #[test] fn test_shmem_resize() -> Result<(), Error> { let max_size = 1024 * 1024; let init_struct = ShmemHandle::new("test_shmem_resize", 0, max_size)?; assert_eq!(init_struct.current_size(), 0); // Initial grow let size1 = 10000; init_struct.set_size(size1).unwrap(); assert_eq!(init_struct.current_size(), size1); // Write some data let data_ptr = init_struct.data_ptr.as_ptr(); write_range(data_ptr, 0xAA, 0..size1); assert_range(data_ptr, 0xAA, 0..size1); // Shrink let size2 = 5000; init_struct.set_size(size2).unwrap(); assert_eq!(init_struct.current_size(), size2); // Grow again let size3 = 20000; init_struct.set_size(size3).unwrap(); assert_eq!(init_struct.current_size(), size3); // Try to read it. The area that was shrunk and grown again should read as all zeros now assert_range(data_ptr, 0xAA, 0..5000); assert_range(data_ptr, 0, 5000..size1); // Try to grow beyond max_size //let size4 = max_size + 1; //assert!(init_struct.set_size(size4).is_err()); // Dropping init_struct should unmap the memory drop(init_struct); Ok(()) } /// This is used in tests to coordinate between test processes. It's like `std::sync::Barrier`, /// but is stored in the shared memory area and works across processes. It's implemented by /// polling, because e.g. standard rust mutexes are not guaranteed to work across processes. struct SimpleBarrier { num_procs: usize, count: AtomicUsize, } impl SimpleBarrier { unsafe fn init(ptr: *mut SimpleBarrier, num_procs: usize) { unsafe { *ptr = SimpleBarrier { num_procs, count: AtomicUsize::new(0), } } } pub fn wait(&self) { let old = self.count.fetch_add(1, Ordering::Relaxed); let generation = old / self.num_procs; let mut current = old + 1; while current < (generation + 1) * self.num_procs { std::thread::sleep(std::time::Duration::from_millis(10)); current = self.count.load(Ordering::Relaxed); } } } #[test] fn test_multi_process() { // Initialize let max_size = 1_000_000_000_000; let init_struct = ShmemHandle::new("test_multi_process", 0, max_size).unwrap(); let ptr = init_struct.data_ptr.as_ptr(); // Store the SimpleBarrier in the first 1k of the area. init_struct.set_size(10000).unwrap(); let barrier_ptr: *mut SimpleBarrier = unsafe { ptr.add(ptr.align_offset(std::mem::align_of::<SimpleBarrier>())) .cast() }; unsafe { SimpleBarrier::init(barrier_ptr, 2) }; let barrier = unsafe { barrier_ptr.as_ref().unwrap() }; // Fork another test process. The code after this runs in both processes concurrently. let fork_result = unsafe { nix::unistd::fork().unwrap() }; // In the parent, fill bytes between 1000..2000. In the child, between 2000..3000 if fork_result.is_parent() { write_range(ptr, 0xAA, 1000..2000); } else { write_range(ptr, 0xBB, 2000..3000); } barrier.wait(); // Verify the contents. (in both processes) assert_range(ptr, 0xAA, 1000..2000); assert_range(ptr, 0xBB, 2000..3000); // Grow, from the child this time let size = 10_000_000; if !fork_result.is_parent() { init_struct.set_size(size).unwrap(); } barrier.wait(); // make some writes at the end if fork_result.is_parent() { write_range(ptr, 0xAA, (size - 10)..size); } else { write_range(ptr, 0xBB, (size - 20)..(size - 10)); } barrier.wait(); // Verify the contents. (This runs in both processes) assert_range(ptr, 0, (size - 1000)..(size - 20)); assert_range(ptr, 0xBB, (size - 20)..(size - 10)); assert_range(ptr, 0xAA, (size - 10)..size); if let ForkResult::Parent { child } = fork_result { nix::sys::wait::waitpid(child, None).unwrap(); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false