repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/controller_upcall_client.rs
pageserver/src/controller_upcall_client.rs
use std::collections::HashMap; use std::net::IpAddr; use futures::Future; use pageserver_api::config::NodeMetadata; use pageserver_api::controller_api::{AvailabilityZone, NodeRegisterRequest}; use pageserver_api::models::ShardImportStatus; use pageserver_api::shard::TenantShardId; use pageserver_api::upcall_api::{ PutTimelineImportStatusRequest, ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, TimelineImportStatusRequest, ValidateRequest, ValidateRequestTenant, ValidateResponse, }; use reqwest::Certificate; use serde::Serialize; use serde::de::DeserializeOwned; use tokio_util::sync::CancellationToken; use url::Url; use utils::generation::Generation; use utils::id::{NodeId, TimelineId}; use utils::{backoff, failpoint_support, ip_address}; use crate::config::PageServerConf; use crate::virtual_file::on_fatal_io_error; /// The Pageserver's client for using the storage controller upcall API: this is a small API /// for dealing with generations (see docs/rfcs/025-generation-numbers.md). pub struct StorageControllerUpcallClient { http_client: reqwest::Client, base_url: Url, node_id: NodeId, node_ip_addr: Option<IpAddr>, cancel: CancellationToken, } /// Represent operations which internally retry on all errors other than /// cancellation token firing: the only way they can fail is ShuttingDown. pub enum RetryForeverError { ShuttingDown, } pub trait StorageControllerUpcallApi { fn re_attach( &self, conf: &PageServerConf, empty_local_disk: bool, ) -> impl Future< Output = Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError>, > + Send; fn validate( &self, tenants: Vec<(TenantShardId, Generation)>, ) -> impl Future<Output = Result<HashMap<TenantShardId, bool>, RetryForeverError>> + Send; fn put_timeline_import_status( &self, tenant_shard_id: TenantShardId, timeline_id: TimelineId, generation: Generation, status: ShardImportStatus, ) -> impl Future<Output = Result<(), RetryForeverError>> + Send; fn get_timeline_import_status( &self, tenant_shard_id: TenantShardId, timeline_id: TimelineId, generation: Generation, ) -> impl Future<Output = Result<ShardImportStatus, RetryForeverError>> + Send; } impl StorageControllerUpcallClient { /// A None return value indicates that the input `conf` object does not have control /// plane API enabled. pub fn new(conf: &'static PageServerConf, cancel: &CancellationToken) -> Self { let mut url = conf.control_plane_api.clone(); if let Ok(mut segs) = url.path_segments_mut() { // This ensures that `url` ends with a slash if it doesn't already. // That way, we can subsequently use join() to safely attach extra path elements. segs.pop_if_empty().push(""); } let mut client = reqwest::ClientBuilder::new(); if let Some(jwt) = &conf.control_plane_api_token { let mut headers = reqwest::header::HeaderMap::new(); headers.insert( "Authorization", format!("Bearer {}", jwt.get_contents()).parse().unwrap(), ); client = client.default_headers(headers); } for cert in &conf.ssl_ca_certs { client = client.add_root_certificate( Certificate::from_der(cert.contents()).expect("Invalid certificate in config"), ); } // Intentionally panics if we encountered any errors parsing or reading the IP address. // Note that if the required environment variable is not set, `read_node_ip_addr_from_env` returns `Ok(None)` // instead of an error. let node_ip_addr = ip_address::read_node_ip_addr_from_env().expect("Error reading node IP address."); Self { http_client: client.build().expect("Failed to construct HTTP client"), base_url: url, node_id: conf.id, cancel: cancel.clone(), node_ip_addr, } } #[tracing::instrument(skip_all)] async fn retry_http_forever<R, T>( &self, url: &url::Url, request: R, method: reqwest::Method, ) -> Result<T, RetryForeverError> where R: Serialize, T: DeserializeOwned, { let res = backoff::retry( || async { let response = self .http_client .request(method.clone(), url.clone()) .json(&request) .send() .await?; response.error_for_status_ref()?; response.json::<T>().await }, |_| false, 3, u32::MAX, "storage controller upcall", &self.cancel, ) .await .ok_or(RetryForeverError::ShuttingDown)? .expect("We retry forever, this should never be reached"); Ok(res) } pub(crate) fn base_url(&self) -> &Url { &self.base_url } } impl StorageControllerUpcallApi for StorageControllerUpcallClient { /// Block until we get a successful response, or error out if we are shut down #[tracing::instrument(skip_all)] // so that warning logs from retry_http_forever have context async fn re_attach( &self, conf: &PageServerConf, empty_local_disk: bool, ) -> Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError> { let url = self .base_url .join("re-attach") .expect("Failed to build re-attach path"); // Include registration content in the re-attach request if a metadata file is readable let metadata_path = conf.metadata_path(); let register = match tokio::fs::read_to_string(&metadata_path).await { Ok(metadata_str) => match serde_json::from_str::<NodeMetadata>(&metadata_str) { Ok(m) => { // Since we run one time at startup, be generous in our logging and // dump all metadata. tracing::info!("Loaded node metadata: {m}"); let az_id = { let az_id_from_metadata = m .other .get("availability_zone_id") .and_then(|jv| jv.as_str().map(|str| str.to_owned())); match az_id_from_metadata { Some(az_id) => Some(AvailabilityZone(az_id)), None => { tracing::warn!( "metadata.json does not contain an 'availability_zone_id' field" ); conf.availability_zone.clone().map(AvailabilityZone) } } }; if az_id.is_none() { panic!( "Availablity zone id could not be inferred from metadata.json or pageserver config" ); } Some(NodeRegisterRequest { node_id: conf.id, listen_pg_addr: m.postgres_host, listen_pg_port: m.postgres_port, listen_grpc_addr: m.grpc_host, listen_grpc_port: m.grpc_port, listen_http_addr: m.http_host, listen_http_port: m.http_port, listen_https_port: m.https_port, node_ip_addr: self.node_ip_addr, availability_zone_id: az_id.expect("Checked above"), }) } Err(e) => { tracing::error!("Unreadable metadata in {metadata_path}: {e}"); None } }, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { // This is legal: we may have been deployed with some external script // doing registration for us. tracing::info!("Metadata file not found at {metadata_path}"); } else { on_fatal_io_error(&e, &format!("Loading metadata at {metadata_path}")) } None } }; let request = ReAttachRequest { node_id: self.node_id, register: register.clone(), empty_local_disk: Some(empty_local_disk), }; let response: ReAttachResponse = self .retry_http_forever(&url, request, reqwest::Method::POST) .await?; tracing::info!( "Received re-attach response with {} tenants (node {}, register: {:?})", response.tenants.len(), self.node_id, register, ); failpoint_support::sleep_millis_async!("control-plane-client-re-attach"); Ok(response .tenants .into_iter() .map(|rart| (rart.id, rart)) .collect::<HashMap<_, _>>()) } /// Block until we get a successful response, or error out if we are shut down #[tracing::instrument(skip_all)] // so that warning logs from retry_http_forever have context async fn validate( &self, tenants: Vec<(TenantShardId, Generation)>, ) -> Result<HashMap<TenantShardId, bool>, RetryForeverError> { let url = self .base_url .join("validate") .expect("Failed to build validate path"); // When sending validate requests, break them up into chunks so that we // avoid possible edge cases of generating any HTTP requests that // require database I/O across many thousands of tenants. let mut result: HashMap<TenantShardId, bool> = HashMap::with_capacity(tenants.len()); for tenant_chunk in (tenants).chunks(128) { let request = ValidateRequest { tenants: tenant_chunk .iter() .map(|(id, generation)| ValidateRequestTenant { id: *id, r#gen: (*generation).into().expect( "Generation should always be valid for a Tenant doing deletions", ), }) .collect(), }; failpoint_support::sleep_millis_async!( "control-plane-client-validate-sleep", &self.cancel ); if self.cancel.is_cancelled() { return Err(RetryForeverError::ShuttingDown); } let response: ValidateResponse = self .retry_http_forever(&url, request, reqwest::Method::POST) .await?; for rt in response.tenants { result.insert(rt.id, rt.valid); } } Ok(result.into_iter().collect()) } /// Send a shard import status to the storage controller /// /// The implementation must have at-least-once delivery semantics. /// To this end, we retry the request until it succeeds. If the pageserver /// restarts or crashes, the shard import will start again from the beggining. #[tracing::instrument(skip_all)] // so that warning logs from retry_http_forever have context async fn put_timeline_import_status( &self, tenant_shard_id: TenantShardId, timeline_id: TimelineId, generation: Generation, status: ShardImportStatus, ) -> Result<(), RetryForeverError> { let url = self .base_url .join("timeline_import_status") .expect("Failed to build path"); let request = PutTimelineImportStatusRequest { tenant_shard_id, timeline_id, generation, status, }; self.retry_http_forever(&url, request, reqwest::Method::POST) .await } #[tracing::instrument(skip_all)] // so that warning logs from retry_http_forever have context async fn get_timeline_import_status( &self, tenant_shard_id: TenantShardId, timeline_id: TimelineId, generation: Generation, ) -> Result<ShardImportStatus, RetryForeverError> { let url = self .base_url .join("timeline_import_status") .expect("Failed to build path"); let request = TimelineImportStatusRequest { tenant_shard_id, timeline_id, generation, }; let response: ShardImportStatus = self .retry_http_forever(&url, request, reqwest::Method::GET) .await?; Ok(response) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/aux_file.rs
pageserver/src/aux_file.rs
use std::sync::Arc; use ::metrics::IntGauge; use bytes::{Buf, BufMut, Bytes}; use pageserver_api::key::{AUX_KEY_PREFIX, Key, METADATA_KEY_SIZE}; use tracing::warn; // BEGIN Copyright (c) 2017 Servo Contributors /// Const version of FNV hash. #[inline] #[must_use] pub const fn fnv_hash(bytes: &[u8]) -> u128 { const INITIAL_STATE: u128 = 0x6c62272e07bb014262b821756295c58d; const PRIME: u128 = 0x0000000001000000000000000000013B; let mut hash = INITIAL_STATE; let mut i = 0; while i < bytes.len() { hash ^= bytes[i] as u128; hash = hash.wrapping_mul(PRIME); i += 1; } hash } // END Copyright (c) 2017 Servo Contributors /// Create a metadata key from a hash, encoded as [AUX_KEY_PREFIX, 2B directory prefix, least significant 13B of FNV hash]. fn aux_hash_to_metadata_key(dir_level1: u8, dir_level2: u8, data: &[u8]) -> Key { let mut key: [u8; 16] = [0; METADATA_KEY_SIZE]; let hash = fnv_hash(data).to_be_bytes(); key[0] = AUX_KEY_PREFIX; key[1] = dir_level1; key[2] = dir_level2; key[3..16].copy_from_slice(&hash[3..16]); Key::from_metadata_key_fixed_size(&key) } const AUX_DIR_PG_LOGICAL: u8 = 0x01; const AUX_DIR_PG_REPLSLOT: u8 = 0x02; const AUX_DIR_PG_STAT: u8 = 0x03; const AUX_DIR_PG_UNKNOWN: u8 = 0xFF; /// Encode the aux file into a fixed-size key. /// /// The first byte is the AUX key prefix. We use the next 2 bytes of the key for the directory / aux file type. /// We have one-to-one mapping for each of the aux file that we support. We hash the remaining part of the path /// (usually a single file name, or several components) into 13-byte hash. The way we determine the 2-byte prefix /// is roughly based on the first two components of the path, one unique number for one component. /// /// * pg_logical/mappings -> 0x0101 /// * pg_logical/snapshots -> 0x0102 /// * pg_logical/replorigin_checkpoint -> 0x0103 /// * pg_logical/others -> 0x01FF /// * pg_replslot/ -> 0x0201 /// * pg_stat/pgstat.stat -> 0x0301 /// * others -> 0xFFFF /// /// If you add new AUX files to this function, please also add a test case to `test_encoding_portable`. /// The new file type must have never been written to the storage before. Otherwise, there could be data /// corruptions as the new file belongs to a new prefix but it might have been stored under the `others` prefix. pub fn encode_aux_file_key(path: &str) -> Key { if let Some(fname) = path.strip_prefix("pg_logical/mappings/") { aux_hash_to_metadata_key(AUX_DIR_PG_LOGICAL, 0x01, fname.as_bytes()) } else if let Some(fname) = path.strip_prefix("pg_logical/snapshots/") { aux_hash_to_metadata_key(AUX_DIR_PG_LOGICAL, 0x02, fname.as_bytes()) } else if path == "pg_logical/replorigin_checkpoint" { aux_hash_to_metadata_key(AUX_DIR_PG_LOGICAL, 0x03, b"") } else if let Some(fname) = path.strip_prefix("pg_logical/") { if cfg!(debug_assertions) { warn!( "unsupported pg_logical aux file type: {}, putting to 0x01FF, would affect path scanning", path ); } aux_hash_to_metadata_key(AUX_DIR_PG_LOGICAL, 0xFF, fname.as_bytes()) } else if let Some(fname) = path.strip_prefix("pg_replslot/") { aux_hash_to_metadata_key(AUX_DIR_PG_REPLSLOT, 0x01, fname.as_bytes()) } else if let Some(fname) = path.strip_prefix("pg_stat/") { aux_hash_to_metadata_key(AUX_DIR_PG_STAT, 0x01, fname.as_bytes()) } else { if cfg!(debug_assertions) { warn!( "unsupported aux file type: {}, putting to 0xFFFF, would affect path scanning", path ); } aux_hash_to_metadata_key(AUX_DIR_PG_UNKNOWN, 0xFF, path.as_bytes()) } } const AUX_FILE_ENCODING_VERSION: u8 = 0x01; pub fn decode_file_value(val: &[u8]) -> anyhow::Result<Vec<(&str, &[u8])>> { let mut ptr = val; if ptr.is_empty() { // empty value = no files return Ok(Vec::new()); } assert_eq!( ptr.get_u8(), AUX_FILE_ENCODING_VERSION, "unsupported aux file value" ); let mut files = vec![]; while ptr.has_remaining() { let key_len = ptr.get_u32() as usize; let key = &ptr[..key_len]; ptr.advance(key_len); let val_len = ptr.get_u32() as usize; let content = &ptr[..val_len]; ptr.advance(val_len); let path = std::str::from_utf8(key)?; files.push((path, content)); } Ok(files) } /// Decode an aux file key-value pair into a list of files. The returned `Bytes` contains reference /// to the original value slice. Be cautious about memory consumption. pub fn decode_file_value_bytes(val: &Bytes) -> anyhow::Result<Vec<(String, Bytes)>> { let mut ptr = val.clone(); if ptr.is_empty() { // empty value = no files return Ok(Vec::new()); } assert_eq!( ptr.get_u8(), AUX_FILE_ENCODING_VERSION, "unsupported aux file value" ); let mut files = vec![]; while ptr.has_remaining() { let key_len = ptr.get_u32() as usize; let key = ptr.slice(..key_len); ptr.advance(key_len); let val_len = ptr.get_u32() as usize; let content = ptr.slice(..val_len); ptr.advance(val_len); let path = std::str::from_utf8(&key)?.to_string(); files.push((path, content)); } Ok(files) } pub fn encode_file_value(files: &[(&str, &[u8])]) -> anyhow::Result<Vec<u8>> { if files.is_empty() { // no files = empty value return Ok(Vec::new()); } let mut encoded = vec![]; encoded.put_u8(AUX_FILE_ENCODING_VERSION); for (path, content) in files { if path.len() > u32::MAX as usize { anyhow::bail!("{} exceeds path size limit", path); } encoded.put_u32(path.len() as u32); encoded.put_slice(path.as_bytes()); if content.len() > u32::MAX as usize { anyhow::bail!("{} exceeds content size limit", path); } encoded.put_u32(content.len() as u32); encoded.put_slice(content); } Ok(encoded) } /// An estimation of the size of aux files. pub struct AuxFileSizeEstimator { aux_file_size_gauge: IntGauge, size: Arc<std::sync::Mutex<Option<isize>>>, } impl AuxFileSizeEstimator { pub fn new(aux_file_size_gauge: IntGauge) -> Self { Self { aux_file_size_gauge, size: Arc::new(std::sync::Mutex::new(None)), } } /// When generating base backup or doing initial logical size calculation pub fn on_initial(&self, new_size: usize) { let mut guard = self.size.lock().unwrap(); *guard = Some(new_size as isize); self.report(new_size as isize); } pub fn on_add(&self, file_size: usize) { let mut guard = self.size.lock().unwrap(); if let Some(size) = &mut *guard { *size += file_size as isize; self.report(*size); } } pub fn on_remove(&self, file_size: usize) { let mut guard = self.size.lock().unwrap(); if let Some(size) = &mut *guard { *size -= file_size as isize; self.report(*size); } } pub fn on_update(&self, old_size: usize, new_size: usize) { let mut guard = self.size.lock().unwrap(); if let Some(size) = &mut *guard { *size += new_size as isize - old_size as isize; self.report(*size); } } pub fn report(&self, size: isize) { self.aux_file_size_gauge.set(size as i64); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash_portable() { // AUX file encoding requires the hash to be portable across all platforms. This test case checks // if the algorithm produces the same hash across different environments. assert_eq!( 265160408618497461376862998434862070044, super::fnv_hash("test1".as_bytes()) ); assert_eq!( 295486155126299629456360817749600553988, super::fnv_hash("test/test2".as_bytes()) ); assert_eq!( 144066263297769815596495629667062367629, super::fnv_hash("".as_bytes()) ); } #[test] fn test_encoding_portable() { // To correct retrieve AUX files, the generated keys for the same file must be the same for all versions // of the page server. assert_eq!( "62000001017F8B83D94F7081693471ABF91C", encode_aux_file_key("pg_logical/mappings/test1").to_string(), ); assert_eq!( "62000001027F8E83D94F7081693471ABFCCD", encode_aux_file_key("pg_logical/snapshots/test2").to_string(), ); assert_eq!( "62000001032E07BB014262B821756295C58D", encode_aux_file_key("pg_logical/replorigin_checkpoint").to_string(), ); assert_eq!( "62000001FF4F38E1C74754E7D03C1A660178", encode_aux_file_key("pg_logical/unsupported").to_string(), ); assert_eq!( "62000002017F8D83D94F7081693471ABFB92", encode_aux_file_key("pg_replslot/test3").to_string() ); assert_eq!( "620000FFFF2B6ECC8AEF93F643DC44F15E03", encode_aux_file_key("other_file_not_supported").to_string(), ); } #[test] fn test_value_encoding() { let files = vec![ ("pg_logical/1.file", "1111".as_bytes()), ("pg_logical/2.file", "2222".as_bytes()), ]; assert_eq!( files, decode_file_value(&encode_file_value(&files).unwrap()).unwrap() ); let files = vec![]; assert_eq!( files, decode_file_value(&encode_file_value(&files).unwrap()).unwrap() ); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/utilization.rs
pageserver/src/utilization.rs
//! An utilization metric which is used to decide on which pageserver to put next tenant. //! //! The metric is exposed via `GET /v1/utilization`. Refer and maintain its openapi spec as the //! truth. use std::path::Path; use anyhow::Context; use pageserver_api::models::PageserverUtilization; use utils::serde_percent::Percent; use crate::config::PageServerConf; use crate::metrics::NODE_UTILIZATION_SCORE; use crate::tenant::mgr::TenantManager; pub(crate) fn regenerate( conf: &PageServerConf, tenants_path: &Path, tenant_manager: &TenantManager, ) -> anyhow::Result<PageserverUtilization> { let statvfs = nix::sys::statvfs::statvfs(tenants_path) .map_err(std::io::Error::from) .context("statvfs tenants directory")?; // https://unix.stackexchange.com/a/703650 let blocksz = if statvfs.fragment_size() > 0 { statvfs.fragment_size() } else { statvfs.block_size() }; #[cfg_attr(not(target_os = "macos"), allow(clippy::unnecessary_cast))] let free = statvfs.blocks_available() as u64 * blocksz; #[cfg_attr(not(target_os = "macos"), allow(clippy::unnecessary_cast))] let used = statvfs .blocks() // use blocks_free instead of available here to match df in case someone compares .saturating_sub(statvfs.blocks_free()) as u64 * blocksz; let captured_at = std::time::SystemTime::now(); // Calculate aggregate utilization from tenants on this pageserver let (disk_wanted_bytes, shard_count) = tenant_manager.calculate_utilization()?; // Fetch the fraction of disk space which may be used let disk_usable_pct = if conf.disk_usage_based_eviction.enabled { conf.disk_usage_based_eviction.max_usage_pct } else { Percent::new(100).unwrap() }; // Express a static value for how many shards we may schedule on one node const MAX_SHARDS: u32 = 2500; let mut doc = PageserverUtilization { disk_usage_bytes: used, free_space_bytes: free, disk_wanted_bytes, disk_usable_pct, shard_count, max_shard_count: MAX_SHARDS, utilization_score: None, captured_at: utils::serde_system_time::SystemTime(captured_at), }; // Initialize `PageserverUtilization::utilization_score` let score = doc.cached_score(); NODE_UTILIZATION_SCORE.set(score); Ok(doc) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/feature_resolver.rs
pageserver/src/feature_resolver.rs
use std::{ collections::HashMap, sync::{Arc, atomic::AtomicBool}, time::Duration, }; use arc_swap::ArcSwap; use pageserver_api::config::NodeMetadata; use posthog_client_lite::{ CaptureEvent, FeatureResolverBackgroundLoop, PostHogEvaluationError, PostHogFlagFilterPropertyValue, }; use rand::Rng; use remote_storage::RemoteStorageKind; use serde_json::json; use tokio_util::sync::CancellationToken; use utils::id::TenantId; use crate::{config::PageServerConf, metrics::FEATURE_FLAG_EVALUATION, tenant::TenantShard}; const DEFAULT_POSTHOG_REFRESH_INTERVAL: Duration = Duration::from_secs(600); #[derive(Clone)] pub struct FeatureResolver { inner: Option<Arc<FeatureResolverBackgroundLoop>>, internal_properties: Option<Arc<HashMap<String, PostHogFlagFilterPropertyValue>>>, force_overrides_for_testing: Arc<ArcSwap<HashMap<String, String>>>, } impl FeatureResolver { pub fn new_disabled() -> Self { Self { inner: None, internal_properties: None, force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), } } pub fn update(&self, spec: String) -> anyhow::Result<()> { if let Some(inner) = &self.inner { inner.update(spec)?; } Ok(()) } pub fn spawn( conf: &PageServerConf, shutdown_pageserver: CancellationToken, handle: &tokio::runtime::Handle, ) -> anyhow::Result<Self> { // DO NOT block in this function: make it return as fast as possible to avoid startup delays. if let Some(posthog_config) = &conf.posthog_config { let posthog_client_config = match posthog_config.clone().try_into_posthog_config() { Ok(config) => config, Err(e) => { tracing::warn!( "invalid posthog config, skipping posthog integration: {}", e ); return Ok(FeatureResolver { inner: None, internal_properties: None, force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new( HashMap::new(), ))), }); } }; let inner = FeatureResolverBackgroundLoop::new(posthog_client_config, shutdown_pageserver); let inner = Arc::new(inner); // The properties shared by all tenants on this pageserver. let internal_properties = { let mut properties = HashMap::new(); properties.insert( "pageserver_id".to_string(), PostHogFlagFilterPropertyValue::String(conf.id.to_string()), ); if let Some(availability_zone) = &conf.availability_zone { properties.insert( "availability_zone".to_string(), PostHogFlagFilterPropertyValue::String(availability_zone.clone()), ); } // Infer region based on the remote storage config. if let Some(remote_storage) = &conf.remote_storage_config { match &remote_storage.storage { RemoteStorageKind::AwsS3(config) => { properties.insert( "region".to_string(), PostHogFlagFilterPropertyValue::String(format!( "aws-{}", config.bucket_region )), ); } RemoteStorageKind::AzureContainer(config) => { properties.insert( "region".to_string(), PostHogFlagFilterPropertyValue::String(format!( "azure-{}", config.container_region )), ); } RemoteStorageKind::LocalFs { .. } => { properties.insert( "region".to_string(), PostHogFlagFilterPropertyValue::String("local".to_string()), ); } RemoteStorageKind::GCS { .. } => { properties.insert( "region".to_string(), PostHogFlagFilterPropertyValue::String("local".to_string()), ); } } } // TODO: move this to a background task so that we don't block startup in case of slow disk let metadata_path = conf.metadata_path(); match std::fs::read_to_string(&metadata_path) { Ok(metadata_str) => match serde_json::from_str::<NodeMetadata>(&metadata_str) { Ok(metadata) => { properties.insert( "hostname".to_string(), PostHogFlagFilterPropertyValue::String(metadata.http_host), ); if let Some(cplane_region) = metadata.other.get("region_id") { if let Some(cplane_region) = cplane_region.as_str() { // This region contains the cell number properties.insert( "neon_region".to_string(), PostHogFlagFilterPropertyValue::String( cplane_region.to_string(), ), ); } } } Err(e) => { tracing::warn!("Failed to parse metadata.json: {}", e); } }, Err(e) => { tracing::warn!("Failed to read metadata.json: {}", e); } } Arc::new(properties) }; let fake_tenants = { let mut tenants = Vec::new(); for i in 0..10 { let distinct_id = format!( "fake_tenant_{}_{}_{}", conf.availability_zone.as_deref().unwrap_or_default(), conf.id, i ); let tenant_properties = PerTenantProperties { remote_size_mb: Some(rand::rng().random_range(100.0..1000000.00)), db_count_max: Some(rand::rng().random_range(1..1000)), rel_count_max: Some(rand::rng().random_range(1..1000)), } .into_posthog_properties(); let properties = Self::collect_properties_inner( distinct_id.clone(), Some(&internal_properties), &tenant_properties, ); tenants.push(CaptureEvent { event: "initial_tenant_report".to_string(), distinct_id, properties: json!({ "$set": properties }), // use `$set` to set the person properties instead of the event properties }); } tenants }; inner.clone().spawn( handle, posthog_config .refresh_interval .unwrap_or(DEFAULT_POSTHOG_REFRESH_INTERVAL), fake_tenants, ); Ok(FeatureResolver { inner: Some(inner), internal_properties: Some(internal_properties), force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), }) } else { Ok(FeatureResolver { inner: None, internal_properties: None, force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), }) } } fn collect_properties_inner( tenant_id: String, internal_properties: Option<&HashMap<String, PostHogFlagFilterPropertyValue>>, tenant_properties: &HashMap<String, PostHogFlagFilterPropertyValue>, ) -> HashMap<String, PostHogFlagFilterPropertyValue> { let mut properties = HashMap::new(); if let Some(internal_properties) = internal_properties { for (key, value) in internal_properties.iter() { properties.insert(key.clone(), value.clone()); } } properties.insert( "tenant_id".to_string(), PostHogFlagFilterPropertyValue::String(tenant_id), ); for (key, value) in tenant_properties.iter() { properties.insert(key.clone(), value.clone()); } properties } /// Collect all properties availble for the feature flag evaluation. pub(crate) fn collect_properties( &self, tenant_id: TenantId, tenant_properties: &HashMap<String, PostHogFlagFilterPropertyValue>, ) -> HashMap<String, PostHogFlagFilterPropertyValue> { Self::collect_properties_inner( tenant_id.to_string(), self.internal_properties.as_deref(), tenant_properties, ) } /// Evaluate a multivariate feature flag. Currently, we do not support any properties. /// /// Error handling: the caller should inspect the error and decide the behavior when a feature flag /// cannot be evaluated (i.e., default to false if it cannot be resolved). The error should *not* be /// propagated beyond where the feature flag gets resolved. pub fn evaluate_multivariate( &self, flag_key: &str, tenant_id: TenantId, tenant_properties: &HashMap<String, PostHogFlagFilterPropertyValue>, ) -> Result<String, PostHogEvaluationError> { let force_overrides = self.force_overrides_for_testing.load(); if let Some(value) = force_overrides.get(flag_key) { return Ok(value.clone()); } if let Some(inner) = &self.inner { let res = inner.feature_store().evaluate_multivariate( flag_key, &tenant_id.to_string(), &self.collect_properties(tenant_id, tenant_properties), ); match &res { Ok(value) => { FEATURE_FLAG_EVALUATION .with_label_values(&[flag_key, "ok", value]) .inc(); } Err(e) => { FEATURE_FLAG_EVALUATION .with_label_values(&[flag_key, "error", e.as_variant_str()]) .inc(); } } res } else { Err(PostHogEvaluationError::NotAvailable( "PostHog integration is not enabled".to_string(), )) } } /// Evaluate a boolean feature flag. Currently, we do not support any properties. /// /// Returns `Ok(())` if the flag is evaluated to true, otherwise returns an error. /// /// Error handling: the caller should inspect the error and decide the behavior when a feature flag /// cannot be evaluated (i.e., default to false if it cannot be resolved). The error should *not* be /// propagated beyond where the feature flag gets resolved. pub fn evaluate_boolean( &self, flag_key: &str, tenant_id: TenantId, tenant_properties: &HashMap<String, PostHogFlagFilterPropertyValue>, ) -> Result<(), PostHogEvaluationError> { let force_overrides = self.force_overrides_for_testing.load(); if let Some(value) = force_overrides.get(flag_key) { return if value == "true" { Ok(()) } else { Err(PostHogEvaluationError::NoConditionGroupMatched) }; } if let Some(inner) = &self.inner { let res = inner.feature_store().evaluate_boolean( flag_key, &tenant_id.to_string(), &self.collect_properties(tenant_id, tenant_properties), ); match &res { Ok(()) => { FEATURE_FLAG_EVALUATION .with_label_values(&[flag_key, "ok", "true"]) .inc(); } Err(e) => { FEATURE_FLAG_EVALUATION .with_label_values(&[flag_key, "error", e.as_variant_str()]) .inc(); } } res } else { Err(PostHogEvaluationError::NotAvailable( "PostHog integration is not enabled".to_string(), )) } } pub fn is_feature_flag_boolean(&self, flag_key: &str) -> Result<bool, PostHogEvaluationError> { if let Some(inner) = &self.inner { inner.feature_store().is_feature_flag_boolean(flag_key) } else { Err(PostHogEvaluationError::NotAvailable( "PostHog integration is not enabled, cannot auto-determine the flag type" .to_string(), )) } } /// Force override a feature flag for testing. This is only for testing purposes. Assume the caller only call it /// from a single thread so it won't race. pub fn force_override_for_testing(&self, flag_key: &str, value: Option<&str>) { let mut force_overrides = self.force_overrides_for_testing.load().as_ref().clone(); if let Some(value) = value { force_overrides.insert(flag_key.to_string(), value.to_string()); } else { force_overrides.remove(flag_key); } self.force_overrides_for_testing .store(Arc::new(force_overrides)); } } struct PerTenantProperties { pub remote_size_mb: Option<f64>, pub db_count_max: Option<usize>, pub rel_count_max: Option<usize>, } impl PerTenantProperties { pub fn into_posthog_properties(self) -> HashMap<String, PostHogFlagFilterPropertyValue> { let mut properties = HashMap::new(); if let Some(remote_size_mb) = self.remote_size_mb { properties.insert( "tenant_remote_size_mb".to_string(), PostHogFlagFilterPropertyValue::Number(remote_size_mb), ); } if let Some(db_count) = self.db_count_max { properties.insert( "tenant_db_count_max".to_string(), PostHogFlagFilterPropertyValue::Number(db_count as f64), ); } if let Some(rel_count) = self.rel_count_max { properties.insert( "tenant_rel_count_max".to_string(), PostHogFlagFilterPropertyValue::Number(rel_count as f64), ); } properties } } pub struct TenantFeatureResolver { inner: FeatureResolver, tenant_id: TenantId, cached_tenant_properties: ArcSwap<HashMap<String, PostHogFlagFilterPropertyValue>>, // Add feature flag on the critical path below. // // If a feature flag will be used on the critical path, we will update it in the tenant housekeeping loop insetad of // resolving directly by calling `evaluate_multivariate` or `evaluate_boolean`. Remember to update the flag in the // housekeeping loop. The user should directly read this atomic flag instead of using the set of evaluate functions. pub feature_test_remote_size_flag: AtomicBool, } impl TenantFeatureResolver { pub fn new(inner: FeatureResolver, tenant_id: TenantId) -> Self { Self { inner, tenant_id, cached_tenant_properties: ArcSwap::new(Arc::new(HashMap::new())), feature_test_remote_size_flag: AtomicBool::new(false), } } pub fn evaluate_multivariate(&self, flag_key: &str) -> Result<String, PostHogEvaluationError> { self.inner.evaluate_multivariate( flag_key, self.tenant_id, &self.cached_tenant_properties.load(), ) } pub fn evaluate_boolean(&self, flag_key: &str) -> Result<(), PostHogEvaluationError> { self.inner.evaluate_boolean( flag_key, self.tenant_id, &self.cached_tenant_properties.load(), ) } pub fn collect_properties(&self) -> HashMap<String, PostHogFlagFilterPropertyValue> { self.inner .collect_properties(self.tenant_id, &self.cached_tenant_properties.load()) } pub fn is_feature_flag_boolean(&self, flag_key: &str) -> Result<bool, PostHogEvaluationError> { self.inner.is_feature_flag_boolean(flag_key) } /// Refresh the cached properties and flags on the critical path. pub fn refresh_properties_and_flags(&self, tenant_shard: &TenantShard) { // Any of the remote size is none => this property is none. let mut remote_size_mb = Some(0.0); // Any of the db or rel count is available => this property is available. let mut db_count_max = None; let mut rel_count_max = None; for timeline in tenant_shard.list_timelines() { let size = timeline.metrics.resident_physical_size_get(); if size == 0 { remote_size_mb = None; break; } if let Some(ref mut remote_size_mb) = remote_size_mb { *remote_size_mb += size as f64 / 1024.0 / 1024.0; } if let Some(data) = timeline.db_rel_count.load_full() { let (db_count, rel_count) = *data.as_ref(); if db_count_max.is_none() { db_count_max = Some(db_count); } if rel_count_max.is_none() { rel_count_max = Some(rel_count); } db_count_max = db_count_max.map(|max| max.max(db_count)); rel_count_max = rel_count_max.map(|max| max.max(rel_count)); } } self.cached_tenant_properties.store(Arc::new( PerTenantProperties { remote_size_mb, db_count_max, rel_count_max, } .into_posthog_properties(), )); // BEGIN: Update the feature flag on the critical path. self.feature_test_remote_size_flag.store( self.evaluate_boolean("test-remote-size-flag").is_ok(), std::sync::atomic::Ordering::Relaxed, ); // END: Update the feature flag on the critical path. } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/metrics.rs
pageserver/src/metrics.rs
use std::cell::Cell; use std::collections::HashMap; use std::num::NonZeroUsize; use std::os::fd::RawFd; use std::sync::atomic::AtomicU64; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use enum_map::{Enum as _, EnumMap}; use futures::Future; use metrics::{ Counter, CounterVec, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterPair, IntCounterPairVec, IntCounterVec, IntGauge, IntGaugeVec, UIntGauge, UIntGaugeVec, register_counter_vec, register_gauge_vec, register_histogram, register_histogram_vec, register_int_counter, register_int_counter_pair_vec, register_int_counter_vec, register_int_gauge, register_int_gauge_vec, register_uint_gauge, register_uint_gauge_vec, }; use once_cell::sync::Lazy; use pageserver_api::config::defaults::DEFAULT_MAX_GET_VECTORED_KEYS; use pageserver_api::config::{ PageServicePipeliningConfig, PageServicePipeliningConfigPipelined, PageServiceProtocolPipelinedBatchingStrategy, PageServiceProtocolPipelinedExecutionStrategy, }; use pageserver_api::models::InMemoryLayerInfo; use pageserver_api::shard::TenantShardId; use postgres_backend::{QueryError, is_expected_io_error}; use pq_proto::framed::ConnectionError; use strum::{EnumCount, IntoEnumIterator as _, VariantNames}; use strum_macros::{IntoStaticStr, VariantNames}; use utils::id::TimelineId; use crate::config; use crate::config::PageServerConf; use crate::context::{PageContentKind, RequestContext}; use crate::pgdatadir_mapping::DatadirModificationStats; use crate::task_mgr::TaskKind; use crate::tenant::layer_map::LayerMap; use crate::tenant::mgr::TenantSlot; use crate::tenant::storage_layer::{InMemoryLayer, PersistentLayerDesc}; use crate::tenant::tasks::BackgroundLoopKind; use crate::tenant::throttle::ThrottleResult; /// Prometheus histogram buckets (in seconds) for operations in the critical /// path. In other words, operations that directly affect that latency of user /// queries. /// /// The buckets capture the majority of latencies in the microsecond and /// millisecond range but also extend far enough up to distinguish "bad" from /// "really bad". const CRITICAL_OP_BUCKETS: &[f64] = &[ 0.000_001, 0.000_010, 0.000_100, // 1 us, 10 us, 100 us 0.001_000, 0.010_000, 0.100_000, // 1 ms, 10 ms, 100 ms 1.0, 10.0, 100.0, // 1 s, 10 s, 100 s ]; // Metrics collected on operations on the storage repository. #[derive(Debug, VariantNames, IntoStaticStr)] #[strum(serialize_all = "kebab_case")] pub(crate) enum StorageTimeOperation { #[strum(serialize = "layer flush")] LayerFlush, #[strum(serialize = "layer flush delay")] LayerFlushDelay, #[strum(serialize = "compact")] Compact, #[strum(serialize = "create images")] CreateImages, #[strum(serialize = "logical size")] LogicalSize, #[strum(serialize = "imitate logical size")] ImitateLogicalSize, #[strum(serialize = "load layer map")] LoadLayerMap, #[strum(serialize = "gc")] Gc, #[strum(serialize = "find gc cutoffs")] FindGcCutoffs, } pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy<CounterVec> = Lazy::new(|| { register_counter_vec!( "pageserver_storage_operations_seconds_sum", "Total time spent on storage operations with operation, tenant and timeline dimensions", &["operation", "tenant_id", "shard_id", "timeline_id"], ) .expect("failed to define a metric") }); pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_storage_operations_seconds_count", "Count of storage operations with operation, tenant and timeline dimensions", &["operation", "tenant_id", "shard_id", "timeline_id"], ) .expect("failed to define a metric") }); /* BEGIN_HADRON */ pub(crate) static STORAGE_ACTIVE_COUNT_PER_TIMELINE: Lazy<IntGaugeVec> = Lazy::new(|| { register_int_gauge_vec!( "pageserver_active_storage_operations_count", "Count of active storage operations with operation, tenant and timeline dimensions", &["operation", "tenant_id", "shard_id", "timeline_id"], ) .expect("failed to define a metric") }); /*END_HADRON */ // Buckets for background operations like compaction, GC, size calculation const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0]; pub(crate) static STORAGE_TIME_GLOBAL: Lazy<HistogramVec> = Lazy::new(|| { register_histogram_vec!( "pageserver_storage_operations_seconds_global", "Time spent on storage operations", &["operation"], STORAGE_OP_BUCKETS.into(), ) .expect("failed to define a metric") }); /// Measures layers visited per read (i.e. read amplification). /// /// NB: for a batch, we count all visited layers towards each read. While the cost of layer visits /// are amortized across the batch, and some layers may not intersect with a given key, each visited /// layer contributes directly to the observed latency for every read in the batch, which is what we /// care about. pub(crate) static LAYERS_PER_READ: Lazy<HistogramVec> = Lazy::new(|| { register_histogram_vec!( "pageserver_layers_per_read", "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.", &["tenant_id", "shard_id", "timeline_id"], // Low resolution to reduce cardinality. vec![4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0], ) .expect("failed to define a metric") }); pub(crate) static LAYERS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| { register_histogram!( "pageserver_layers_per_read_global", "Layers visited to serve a single read (read amplification). In a batch, all visited layers count towards every read.", vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0], ) .expect("failed to define a metric") }); pub(crate) static LAYERS_PER_READ_BATCH_GLOBAL: Lazy<Histogram> = Lazy::new(|| { register_histogram!( "pageserver_layers_per_read_batch_global", "Layers visited to serve a single read batch (read amplification), regardless of number of reads.", vec![ 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0 ], ) .expect("failed to define a metric") }); pub(crate) static LAYERS_PER_READ_AMORTIZED_GLOBAL: Lazy<Histogram> = Lazy::new(|| { register_histogram!( "pageserver_layers_per_read_amortized_global", "Layers visited to serve a single read (read amplification). Amortized across a batch: \ all visited layers are divided by number of reads.", vec![ 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0 ], ) .expect("failed to define a metric") }); pub(crate) static DELTAS_PER_READ_GLOBAL: Lazy<Histogram> = Lazy::new(|| { // We expect this to be low because of Postgres checkpoints. Let's see if that holds. register_histogram!( "pageserver_deltas_per_read_global", "Number of delta pages applied to image page per read", vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0], ) .expect("failed to define a metric") }); pub(crate) static CONCURRENT_INITDBS: Lazy<UIntGauge> = Lazy::new(|| { register_uint_gauge!( "pageserver_concurrent_initdb", "Number of initdb processes running" ) .expect("failed to define a metric") }); pub(crate) static INITDB_SEMAPHORE_ACQUISITION_TIME: Lazy<Histogram> = Lazy::new(|| { register_histogram!( "pageserver_initdb_semaphore_seconds_global", "Time spent getting a permit from the global initdb semaphore", STORAGE_OP_BUCKETS.into() ) .expect("failed to define metric") }); pub(crate) static INITDB_RUN_TIME: Lazy<Histogram> = Lazy::new(|| { register_histogram!( "pageserver_initdb_seconds_global", "Time spent performing initdb", STORAGE_OP_BUCKETS.into() ) .expect("failed to define metric") }); pub(crate) struct GetVectoredLatency { map: EnumMap<TaskKind, Option<Histogram>>, } #[allow(dead_code)] pub(crate) struct ScanLatency { map: EnumMap<TaskKind, Option<Histogram>>, } impl GetVectoredLatency { // Only these task types perform vectored gets. Filter all other tasks out to reduce total // cardinality of the metric. const TRACKED_TASK_KINDS: [TaskKind; 2] = [TaskKind::Compaction, TaskKind::PageRequestHandler]; pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> { self.map[task_kind].as_ref() } } impl ScanLatency { // Only these task types perform vectored gets. Filter all other tasks out to reduce total // cardinality of the metric. const TRACKED_TASK_KINDS: [TaskKind; 1] = [TaskKind::PageRequestHandler]; pub(crate) fn for_task_kind(&self, task_kind: TaskKind) -> Option<&Histogram> { self.map[task_kind].as_ref() } } pub(crate) struct ScanLatencyOngoingRecording<'a> { parent: &'a Histogram, start: std::time::Instant, } impl<'a> ScanLatencyOngoingRecording<'a> { pub(crate) fn start_recording(parent: &'a Histogram) -> ScanLatencyOngoingRecording<'a> { let start = Instant::now(); ScanLatencyOngoingRecording { parent, start } } pub(crate) fn observe(self) { let elapsed = self.start.elapsed(); self.parent.observe(elapsed.as_secs_f64()); } } pub(crate) static GET_VECTORED_LATENCY: Lazy<GetVectoredLatency> = Lazy::new(|| { let inner = register_histogram_vec!( "pageserver_get_vectored_seconds", "Time spent in get_vectored.", &["task_kind"], CRITICAL_OP_BUCKETS.into(), ) .expect("failed to define a metric"); GetVectoredLatency { map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| { let task_kind = TaskKind::from_usize(task_kind_idx); if GetVectoredLatency::TRACKED_TASK_KINDS.contains(&task_kind) { let task_kind = task_kind.into(); Some(inner.with_label_values(&[task_kind])) } else { None } })), } }); pub(crate) static SCAN_LATENCY: Lazy<ScanLatency> = Lazy::new(|| { let inner = register_histogram_vec!( "pageserver_scan_seconds", "Time spent in scan.", &["task_kind"], CRITICAL_OP_BUCKETS.into(), ) .expect("failed to define a metric"); ScanLatency { map: EnumMap::from_array(std::array::from_fn(|task_kind_idx| { let task_kind = TaskKind::from_usize(task_kind_idx); if ScanLatency::TRACKED_TASK_KINDS.contains(&task_kind) { let task_kind = task_kind.into(); Some(inner.with_label_values(&[task_kind])) } else { None } })), } }); pub(crate) struct PageCacheMetricsForTaskKind { pub read_accesses_immutable: IntCounter, pub read_hits_immutable: IntCounter, } pub(crate) struct PageCacheMetrics { map: EnumMap<TaskKind, EnumMap<PageContentKind, PageCacheMetricsForTaskKind>>, } static PAGE_CACHE_READ_HITS: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_page_cache_read_hits_total", "Number of read accesses to the page cache that hit", &["task_kind", "key_kind", "content_kind", "hit_kind"] ) .expect("failed to define a metric") }); static PAGE_CACHE_READ_ACCESSES: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_page_cache_read_accesses_total", "Number of read accesses to the page cache", &["task_kind", "key_kind", "content_kind"] ) .expect("failed to define a metric") }); pub(crate) static PAGE_CACHE: Lazy<PageCacheMetrics> = Lazy::new(|| PageCacheMetrics { map: EnumMap::from_array(std::array::from_fn(|task_kind| { let task_kind = TaskKind::from_usize(task_kind); let task_kind: &'static str = task_kind.into(); EnumMap::from_array(std::array::from_fn(|content_kind| { let content_kind = PageContentKind::from_usize(content_kind); let content_kind: &'static str = content_kind.into(); PageCacheMetricsForTaskKind { read_accesses_immutable: { PAGE_CACHE_READ_ACCESSES .get_metric_with_label_values(&[task_kind, "immutable", content_kind]) .unwrap() }, read_hits_immutable: { PAGE_CACHE_READ_HITS .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"]) .unwrap() }, } })) })), }); impl PageCacheMetrics { pub(crate) fn for_ctx(&self, ctx: &RequestContext) -> &PageCacheMetricsForTaskKind { &self.map[ctx.task_kind()][ctx.page_content_kind()] } } pub(crate) struct PageCacheSizeMetrics { pub max_bytes: UIntGauge, pub current_bytes_immutable: UIntGauge, } static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_page_cache_size_current_bytes", "Current size of the page cache in bytes, by key kind", &["key_kind"] ) .expect("failed to define a metric") }); pub(crate) static PAGE_CACHE_SIZE: Lazy<PageCacheSizeMetrics> = Lazy::new(|| PageCacheSizeMetrics { max_bytes: { register_uint_gauge!( "pageserver_page_cache_size_max_bytes", "Maximum size of the page cache in bytes" ) .expect("failed to define a metric") }, current_bytes_immutable: { PAGE_CACHE_SIZE_CURRENT_BYTES .get_metric_with_label_values(&["immutable"]) .unwrap() }, }); pub(crate) mod page_cache_eviction_metrics { use std::num::NonZeroUsize; use metrics::{IntCounter, IntCounterVec, register_int_counter_vec}; use once_cell::sync::Lazy; #[derive(Clone, Copy)] pub(crate) enum Outcome { FoundSlotUnused { iters: NonZeroUsize }, FoundSlotEvicted { iters: NonZeroUsize }, ItersExceeded { iters: NonZeroUsize }, } static ITERS_TOTAL_VEC: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_page_cache_find_victim_iters_total", "Counter for the number of iterations in the find_victim loop", &["outcome"], ) .expect("failed to define a metric") }); static CALLS_VEC: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_page_cache_find_victim_calls", "Incremented at the end of each find_victim() call.\ Filter by outcome to get e.g., eviction rate.", &["outcome"] ) .unwrap() }); pub(crate) fn observe(outcome: Outcome) { macro_rules! dry { ($label:literal, $iters:expr) => {{ static LABEL: &'static str = $label; static ITERS_TOTAL: Lazy<IntCounter> = Lazy::new(|| ITERS_TOTAL_VEC.with_label_values(&[LABEL])); static CALLS: Lazy<IntCounter> = Lazy::new(|| CALLS_VEC.with_label_values(&[LABEL])); ITERS_TOTAL.inc_by(($iters.get()) as u64); CALLS.inc(); }}; } match outcome { Outcome::FoundSlotUnused { iters } => dry!("found_empty", iters), Outcome::FoundSlotEvicted { iters } => { dry!("found_evicted", iters) } Outcome::ItersExceeded { iters } => { dry!("err_iters_exceeded", iters); super::page_cache_errors_inc(super::PageCacheErrorKind::EvictIterLimit); } } } } static PAGE_CACHE_ERRORS: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "page_cache_errors_total", "Number of timeouts while acquiring a pinned slot in the page cache", &["error_kind"] ) .expect("failed to define a metric") }); pub(crate) static FEATURE_FLAG_EVALUATION: Lazy<CounterVec> = Lazy::new(|| { register_counter_vec!( "pageserver_feature_flag_evaluation", "Number of times a feature flag is evaluated", &["flag_key", "status", "value"], ) .unwrap() }); #[derive(IntoStaticStr)] #[strum(serialize_all = "kebab_case")] pub(crate) enum PageCacheErrorKind { AcquirePinnedSlotTimeout, EvictIterLimit, } pub(crate) fn page_cache_errors_inc(error_kind: PageCacheErrorKind) { PAGE_CACHE_ERRORS .get_metric_with_label_values(&[error_kind.into()]) .unwrap() .inc(); } pub(crate) static WAIT_LSN_TIME: Lazy<Histogram> = Lazy::new(|| { register_histogram!( "pageserver_wait_lsn_seconds", "Time spent waiting for WAL to arrive. Updated on completion of the wait_lsn operation.", CRITICAL_OP_BUCKETS.into(), ) .expect("failed to define a metric") }); pub(crate) static WAIT_LSN_START_FINISH_COUNTERPAIR: Lazy<IntCounterPairVec> = Lazy::new(|| { register_int_counter_pair_vec!( "pageserver_wait_lsn_started_count", "Number of wait_lsn operations started.", "pageserver_wait_lsn_finished_count", "Number of wait_lsn operations finished.", &["tenant_id", "shard_id", "timeline_id"], ) .expect("failed to define a metric") }); pub(crate) static WAIT_LSN_IN_PROGRESS_MICROS: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_wait_lsn_in_progress_micros", "Time spent waiting for WAL to arrive, by timeline_id. Updated periodically while waiting.", &["tenant_id", "shard_id", "timeline_id"], ) .expect("failed to define a metric") }); pub(crate) static WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_wait_lsn_in_progress_micros_global", "Time spent waiting for WAL to arrive, globally. Updated periodically while waiting." ) .expect("failed to define a metric") }); pub(crate) static ONDEMAND_DOWNLOAD_BYTES: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_ondemand_download_bytes_total", "Total bytes of layers on-demand downloaded", &["task_kind"] ) .expect("failed to define a metric") }); pub(crate) static ONDEMAND_DOWNLOAD_COUNT: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "pageserver_ondemand_download_count", "Total count of layers on-demand downloaded", &["task_kind"] ) .expect("failed to define a metric") }); pub(crate) mod wait_ondemand_download_time { use super::*; const WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS: &[f64] = &[ 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, // 10 ms - 100ms 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, // 100ms to 1s 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, // 1s to 10s 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, // 10s to 1m ]; /// The task kinds for which we want to track wait times for on-demand downloads. /// Other task kinds' wait times are accumulated in label value `unknown`. pub(crate) const WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS: [TaskKind; 2] = [ TaskKind::PageRequestHandler, TaskKind::WalReceiverConnectionHandler, ]; pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL: Lazy<Vec<Histogram>> = Lazy::new(|| { let histo = register_histogram_vec!( "pageserver_wait_ondemand_download_seconds_global", "Observations are individual tasks' wait times for on-demand downloads. \ If N tasks coalesce on an on-demand download, and it takes 10s, than we observe N * 10s.", &["task_kind"], WAIT_ONDEMAND_DOWNLOAD_TIME_BUCKETS.into(), ) .expect("failed to define a metric"); WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS .iter() .map(|task_kind| histo.with_label_values(&[task_kind.into()])) .collect::<Vec<_>>() }); pub(crate) static WAIT_ONDEMAND_DOWNLOAD_TIME_SUM: Lazy<CounterVec> = Lazy::new(|| { register_counter_vec!( // use a name that _could_ be evolved into a per-timeline histogram later "pageserver_wait_ondemand_download_seconds_sum", "Like `pageserver_wait_ondemand_download_seconds_global` but per timeline", &["tenant_id", "shard_id", "timeline_id", "task_kind"], ) .unwrap() }); pub struct WaitOndemandDownloadTimeSum { counters: [Counter; WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS.len()], } impl WaitOndemandDownloadTimeSum { pub(crate) fn new(tenant_id: &str, shard_id: &str, timeline_id: &str) -> Self { let counters = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS .iter() .map(|task_kind| { WAIT_ONDEMAND_DOWNLOAD_TIME_SUM .get_metric_with_label_values(&[ tenant_id, shard_id, timeline_id, task_kind.into(), ]) .unwrap() }) .collect::<Vec<_>>(); Self { counters: counters.try_into().unwrap(), } } pub(crate) fn observe(&self, task_kind: TaskKind, duration: Duration) { let maybe = WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS .iter() .enumerate() .find(|(_, kind)| **kind == task_kind); let Some((idx, _)) = maybe else { return; }; WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL[idx].observe(duration.as_secs_f64()); let counter = &self.counters[idx]; counter.inc_by(duration.as_secs_f64()); } } pub(crate) fn shutdown_timeline(tenant_id: &str, shard_id: &str, timeline_id: &str) { for task_kind in WAIT_ONDEMAND_DOWNLOAD_METRIC_TASK_KINDS { let _ = WAIT_ONDEMAND_DOWNLOAD_TIME_SUM.remove_label_values(&[ tenant_id, shard_id, timeline_id, task_kind.into(), ]); } } pub(crate) fn preinitialize_global_metrics() { Lazy::force(&WAIT_ONDEMAND_DOWNLOAD_TIME_GLOBAL); } } static LAST_RECORD_LSN: Lazy<IntGaugeVec> = Lazy::new(|| { register_int_gauge_vec!( "pageserver_last_record_lsn", "Last record LSN grouped by timeline", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); static DISK_CONSISTENT_LSN: Lazy<IntGaugeVec> = Lazy::new(|| { register_int_gauge_vec!( "pageserver_disk_consistent_lsn", "Disk consistent LSN grouped by timeline", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); pub(crate) static PROJECTED_REMOTE_CONSISTENT_LSN: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_projected_remote_consistent_lsn", "Projected remote consistent LSN grouped by timeline", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); static PITR_HISTORY_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_pitr_history_size", "Data written since PITR cutoff on this timeline", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); #[derive( strum_macros::EnumIter, strum_macros::EnumString, strum_macros::Display, strum_macros::IntoStaticStr, )] #[strum(serialize_all = "kebab_case")] pub(crate) enum LayerKind { Delta, Image, } #[derive( strum_macros::EnumIter, strum_macros::EnumString, strum_macros::Display, strum_macros::IntoStaticStr, )] #[strum(serialize_all = "kebab_case")] pub(crate) enum LayerLevel { // We don't track the currently open ephemeral layer, since there's always exactly 1 and its // size changes. See `TIMELINE_EPHEMERAL_BYTES`. Frozen, L0, L1, } static TIMELINE_LAYER_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_layer_bytes", "Sum of frozen, L0, and L1 layer physical sizes in bytes (excluding the open ephemeral layer)", &["tenant_id", "shard_id", "timeline_id", "level", "kind"] ) .expect("failed to define a metric") }); static TIMELINE_LAYER_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_layer_count", "Number of frozen, L0, and L1 layers (excluding the open ephemeral layer)", &["tenant_id", "shard_id", "timeline_id", "level", "kind"] ) .expect("failed to define a metric") }); static TIMELINE_ARCHIVE_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_archive_size", "Timeline's logical size if it is considered eligible for archival (outside PITR window), else zero", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); static STANDBY_HORIZON: Lazy<IntGaugeVec> = Lazy::new(|| { register_int_gauge_vec!( "pageserver_standby_horizon", "Standby apply LSN for which GC is hold off, by timeline.", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); static RESIDENT_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_resident_physical_size", "The size of the layer files present in the pageserver's filesystem, for attached locations.", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); static VISIBLE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_visible_physical_size", "The size of the layer files present in the pageserver's filesystem.", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); pub(crate) static RESIDENT_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| { register_uint_gauge!( "pageserver_resident_physical_size_global", "Like `pageserver_resident_physical_size`, but without tenant/timeline dimensions." ) .expect("failed to define a metric") }); static REMOTE_PHYSICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_remote_physical_size", "The size of the layer files present in the remote storage that are listed in the remote index_part.json.", // Corollary: If any files are missing from the index part, they won't be included here. &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); static REMOTE_PHYSICAL_SIZE_GLOBAL: Lazy<UIntGauge> = Lazy::new(|| { register_uint_gauge!( "pageserver_remote_physical_size_global", "Like `pageserver_remote_physical_size`, but without tenant/timeline dimensions." ) .expect("failed to define a metric") }); pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_LAYERS: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_remote_ondemand_downloaded_layers_total", "Total on-demand downloaded layers" ) .unwrap() }); pub(crate) static REMOTE_ONDEMAND_DOWNLOADED_BYTES: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_remote_ondemand_downloaded_bytes_total", "Total bytes of layers on-demand downloaded", ) .unwrap() }); static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_current_logical_size", "Current logical size grouped by timeline", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define current logical size metric") }); static AUX_FILE_SIZE: Lazy<IntGaugeVec> = Lazy::new(|| { register_int_gauge_vec!( "pageserver_aux_file_estimated_size", "The size of all aux files for a timeline in aux file v2 store.", &["tenant_id", "shard_id", "timeline_id"] ) .expect("failed to define a metric") }); static VALID_LSN_LEASE_COUNT: Lazy<UIntGaugeVec> = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_valid_lsn_lease_count", "The number of valid leases after refreshing gc info.", &["tenant_id", "shard_id", "timeline_id"], ) .expect("failed to define a metric") }); pub(crate) static CIRCUIT_BREAKERS_BROKEN: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_circuit_breaker_broken", "How many times a circuit breaker has broken" ) .expect("failed to define a metric") }); pub(crate) static CIRCUIT_BREAKERS_UNBROKEN: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_circuit_breaker_unbroken", "How many times a circuit breaker has been un-broken (recovered)" ) .expect("failed to define a metric") }); pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_compression_image_in_bytes_total", "Size of data written into image layers before compression" ) .expect("failed to define a metric") }); pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_compression_image_in_bytes_considered", "Size of potentially compressible data written into image layers before compression" ) .expect("failed to define a metric") }); pub(crate) static COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_compression_image_in_bytes_chosen", "Size of data whose compressed form was written into image layers" ) .expect("failed to define a metric") }); pub(crate) static COMPRESSION_IMAGE_OUTPUT_BYTES: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_compression_image_out_bytes_total", "Size of compressed image layer written" ) .expect("failed to define a metric") }); pub(crate) static RELSIZE_LATEST_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| { register_uint_gauge!( "pageserver_relsize_latest_cache_entries", "Number of entries in the latest relation size cache", ) .expect("failed to define a metric") }); pub(crate) static RELSIZE_LATEST_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_relsize_latest_cache_hits", "Latest relation size cache hits", ) .expect("failed to define a metric") }); pub(crate) static RELSIZE_LATEST_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_relsize_latest_cache_misses", "Relation size latest cache misses", ) .expect("failed to define a metric") }); pub(crate) static RELSIZE_SNAPSHOT_CACHE_ENTRIES: Lazy<UIntGauge> = Lazy::new(|| { register_uint_gauge!( "pageserver_relsize_snapshot_cache_entries", "Number of entries in the pitr relation size cache", ) .expect("failed to define a metric") }); pub(crate) static RELSIZE_SNAPSHOT_CACHE_HITS: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_relsize_snapshot_cache_hits", "Pitr relation size cache hits", ) .expect("failed to define a metric") }); pub(crate) static RELSIZE_SNAPSHOT_CACHE_MISSES: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_relsize_snapshot_cache_misses", "Relation size snapshot cache misses", ) .expect("failed to define a metric") }); pub(crate) static RELSIZE_CACHE_MISSES_OLD: Lazy<IntCounter> = Lazy::new(|| { register_int_counter!( "pageserver_relsize_cache_misses_old", "Relation size cache misses where the lookup LSN is older than the last relation update" ) .expect("failed to define a metric") }); pub(crate) mod initial_logical_size { use metrics::{IntCounter, IntCounterVec, register_int_counter, register_int_counter_vec}; use once_cell::sync::Lazy;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/context.rs
pageserver/src/context.rs
//! Defines [`RequestContext`]. //! //! It is a structure that we use throughout the pageserver to propagate //! high-level context from places that _originate_ activity down to the //! shared code paths at the heart of the pageserver. It's inspired by //! Golang's `context.Context`. //! //! For example, in `Timeline::get(page_nr, lsn)` we need to answer the following questions: //! 1. What high-level activity ([`TaskKind`]) needs this page? //! We need that information as a categorical dimension for page access //! statistics, which we, in turn, need to guide layer eviction policy design. //! 2. How should we behave if, to produce the page image, we need to //! on-demand download a layer file ([`DownloadBehavior`]). //! //! [`RequestContext`] satisfies those needs. //! The current implementation is a small `struct` that is passed through //! the call chain by reference. //! //! ### Future Work //! //! However, we do not intend to stop here, since there are other needs that //! require carrying information from high to low levels of the app. //! //! Most importantly, **cancellation signaling** in response to //! 1. timeouts (page_service max response time) and //! 2. lifecycle requests (detach tenant, delete timeline). //! //! Related to that, there is sometimes a need to ensure that all tokio tasks spawned //! by the transitive callees of a request have finished. The keyword here //! is **Structured Concurrency**, and right now, we use `task_mgr` in most places, //! `TaskHandle` in some places, and careful code review around `FuturesUnordered` //! or `JoinSet` in other places. //! //! We do not yet have a systematic cancellation story in pageserver, and it is //! pretty clear that [`RequestContext`] will be responsible for that. //! So, the API already prepares for this role through the //! [`RequestContext::detached_child`] and [`RequestContext::attached_child`] methods. //! See their doc comments for details on how we will use them in the future. //! //! It is not clear whether or how we will enforce Structured Concurrency, and //! what role [`RequestContext`] will play there. //! So, the API doesn't prepare us for this topic. //! //! Other future uses of `RequestContext`: //! - Communicate compute & IO priorities (user-initiated request vs. background-loop) //! - Request IDs for distributed tracing //! - Request/Timeline/Tenant-scoped log levels //! //! RequestContext might look quite different once it supports those features. //! Likely, it will have a shape similar to Golang's `context.Context`. //! //! ### Why A Struct Instead Of Method Parameters //! //! What's typical about such information is that it needs to be passed down //! along the call chain from high level to low level, but few of the functions //! in the middle need to understand it. //! Further, it is to be expected that we will need to propagate more data //! in the future (see the earlier section on future work). //! Hence, for functions in the middle of the call chain, we have the following //! requirements: //! 1. It should be easy to forward the context to callees. //! 2. To propagate more data from high-level to low-level code, the functions in //! the middle should not need to be modified. //! //! The solution is to have a container structure ([`RequestContext`]) that //! carries the information. Functions that don't care about what's in it //! pass it along to callees. //! //! ### Why Not Task-Local Variables //! //! One could use task-local variables (the equivalent of thread-local variables) //! to address the immediate needs outlined above. //! However, we reject task-local variables because: //! 1. they are implicit, thereby making it harder to trace the data flow in code //! reviews and during debugging, //! 2. they can be mutable, which enables implicit return data flow, //! 3. they are restrictive in that code which fans out into multiple tasks, //! or even threads, needs to carefully propagate the state. //! //! In contrast, information flow with [`RequestContext`] is //! 1. always explicit, //! 2. strictly uni-directional because RequestContext is immutable, //! 3. tangible because a [`RequestContext`] is just a value. //! When creating child activities, regardless of whether it's a task, //! thread, or even an RPC to another service, the value can //! be used like any other argument. //! //! The solution is that all code paths are infected with precisely one //! [`RequestContext`] argument. Functions in the middle of the call chain //! only need to pass it on. use std::{sync::Arc, time::Duration}; use once_cell::sync::Lazy; use tracing::warn; use utils::{id::TimelineId, shard::TenantShardId}; use crate::{ metrics::{StorageIoSizeMetrics, TimelineMetrics}, task_mgr::TaskKind, tenant::Timeline, }; use futures::FutureExt; use futures::future::BoxFuture; use std::future::Future; use tracing_utils::perf_span::{PerfInstrument, PerfSpan}; use tracing::{Dispatch, Span}; // The main structure of this module, see module-level comment. pub struct RequestContext { task_kind: TaskKind, download_behavior: DownloadBehavior, access_stats_behavior: AccessStatsBehavior, page_content_kind: PageContentKind, read_path_debug: bool, scope: Scope, perf_span: Option<PerfSpan>, perf_span_dispatch: Option<Dispatch>, } #[derive(Clone)] pub(crate) enum Scope { Global { io_size_metrics: &'static crate::metrics::StorageIoSizeMetrics, }, SecondaryTenant { io_size_metrics: &'static crate::metrics::StorageIoSizeMetrics, }, SecondaryTimeline { io_size_metrics: crate::metrics::StorageIoSizeMetrics, }, Timeline { // We wrap the `Arc<TimelineMetrics>`s inside another Arc to avoid child // context creation contending for the ref counters of the Arc<TimelineMetrics>, // which are shared among all tasks that operate on the timeline, especially // concurrent page_service connections. #[allow(clippy::redundant_allocation)] arc_arc: Arc<Arc<TimelineMetrics>>, }, #[cfg(test)] UnitTest { io_size_metrics: &'static crate::metrics::StorageIoSizeMetrics, }, DebugTools { io_size_metrics: &'static crate::metrics::StorageIoSizeMetrics, }, } static GLOBAL_IO_SIZE_METRICS: Lazy<crate::metrics::StorageIoSizeMetrics> = Lazy::new(|| crate::metrics::StorageIoSizeMetrics::new("*", "*", "*")); impl Scope { pub(crate) fn new_global() -> Self { Scope::Global { io_size_metrics: &GLOBAL_IO_SIZE_METRICS, } } /// NB: this allocates, so, use only at relatively long-lived roots, e.g., at start /// of a compaction iteration. pub(crate) fn new_timeline(timeline: &Timeline) -> Self { Scope::Timeline { arc_arc: Arc::new(Arc::clone(&timeline.metrics)), } } pub(crate) fn new_page_service_pagestream( timeline_handle: &crate::tenant::timeline::handle::Handle< crate::page_service::TenantManagerTypes, >, ) -> Self { Scope::Timeline { arc_arc: Arc::clone(&timeline_handle.metrics), } } pub(crate) fn new_secondary_timeline( tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, ) -> Self { // TODO(https://github.com/neondatabase/neon/issues/11156): secondary timelines have no infrastructure for metrics lifecycle. let tenant_id = tenant_shard_id.tenant_id.to_string(); let shard_id = tenant_shard_id.shard_slug().to_string(); let timeline_id = timeline_id.to_string(); let io_size_metrics = crate::metrics::StorageIoSizeMetrics::new(&tenant_id, &shard_id, &timeline_id); Scope::SecondaryTimeline { io_size_metrics } } pub(crate) fn new_secondary_tenant(_tenant_shard_id: &TenantShardId) -> Self { // Before propagating metrics via RequestContext, the labels were inferred from file path. // The only user of VirtualFile at tenant scope is the heatmap download & read. // The inferred labels for the path of the heatmap file on local disk were that of the global metric (*,*,*). // Thus, we do the same here, and extend that for anything secondary-tenant scoped. // // If we want to have (tenant_id, shard_id, '*') labels for secondary tenants in the future, // we will need to think about the metric lifecycle, i.e., remove them during secondary tenant shutdown, // like we do for attached timelines. (We don't have attached-tenant-scoped usage of VirtualFile // at this point, so, we were able to completely side-step tenant-scoped stuff there). Scope::SecondaryTenant { io_size_metrics: &GLOBAL_IO_SIZE_METRICS, } } #[cfg(test)] pub(crate) fn new_unit_test() -> Self { Scope::UnitTest { io_size_metrics: &GLOBAL_IO_SIZE_METRICS, } } pub(crate) fn new_debug_tools() -> Self { Scope::DebugTools { io_size_metrics: &GLOBAL_IO_SIZE_METRICS, } } } /// The kind of access to the page cache. #[derive(Clone, Copy, PartialEq, Eq, Debug, enum_map::Enum, strum_macros::IntoStaticStr)] pub enum PageContentKind { Unknown, DeltaLayerSummary, DeltaLayerBtreeNode, DeltaLayerValue, ImageLayerSummary, ImageLayerBtreeNode, ImageLayerValue, InMemoryLayer, } /// Desired behavior if the operation requires an on-demand download /// to proceed. #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum DownloadBehavior { /// Download the layer file. It can take a while. Download, /// Download the layer file, but print a warning to the log. This should be used /// in code where the layer file is expected to already exist locally. Warn, /// Return a PageReconstructError::NeedsDownload error Error, } /// Whether this request should update access times used in LRU eviction #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(crate) enum AccessStatsBehavior { /// Update access times: this request's access to data should be taken /// as a hint that the accessed layer is likely to be accessed again Update, /// Do not update access times: this request is accessing the layer /// but does not want to indicate that the layer should be retained in cache, /// perhaps because the requestor is a compaction routine that will soon cover /// this layer with another. Skip, } pub struct RequestContextBuilder { inner: RequestContext, } impl RequestContextBuilder { /// A new builder with default settings pub fn new(task_kind: TaskKind) -> Self { Self { inner: RequestContext { task_kind, download_behavior: DownloadBehavior::Download, access_stats_behavior: AccessStatsBehavior::Update, page_content_kind: PageContentKind::Unknown, read_path_debug: false, scope: Scope::new_global(), perf_span: None, perf_span_dispatch: None, }, } } pub fn from(original: &RequestContext) -> Self { Self { inner: original.clone(), } } pub fn task_kind(mut self, k: TaskKind) -> Self { self.inner.task_kind = k; self } /// Configure the DownloadBehavior of the context: whether to /// download missing layers, and/or warn on the download. pub fn download_behavior(mut self, b: DownloadBehavior) -> Self { self.inner.download_behavior = b; self } /// Configure the AccessStatsBehavior of the context: whether layer /// accesses should update the access time of the layer. pub(crate) fn access_stats_behavior(mut self, b: AccessStatsBehavior) -> Self { self.inner.access_stats_behavior = b; self } pub(crate) fn page_content_kind(mut self, k: PageContentKind) -> Self { self.inner.page_content_kind = k; self } pub(crate) fn read_path_debug(mut self, b: bool) -> Self { self.inner.read_path_debug = b; self } pub(crate) fn scope(mut self, s: Scope) -> Self { self.inner.scope = s; self } pub(crate) fn perf_span_dispatch(mut self, dispatch: Option<Dispatch>) -> Self { self.inner.perf_span_dispatch = dispatch; self } pub fn root_perf_span<Fn>(mut self, make_span: Fn) -> Self where Fn: FnOnce() -> Span, { assert!(self.inner.perf_span.is_none()); assert!(self.inner.perf_span_dispatch.is_some()); let dispatcher = self.inner.perf_span_dispatch.as_ref().unwrap(); let new_span = tracing::dispatcher::with_default(dispatcher, make_span); self.inner.perf_span = Some(PerfSpan::new(new_span, dispatcher.clone())); self } pub fn perf_span<Fn>(mut self, make_span: Fn) -> Self where Fn: FnOnce(&Span) -> Span, { if let Some(ref perf_span) = self.inner.perf_span { assert!(self.inner.perf_span_dispatch.is_some()); let dispatcher = self.inner.perf_span_dispatch.as_ref().unwrap(); let new_span = tracing::dispatcher::with_default(dispatcher, || make_span(perf_span.inner())); self.inner.perf_span = Some(PerfSpan::new(new_span, dispatcher.clone())); } self } pub fn root(self) -> RequestContext { self.inner } pub fn attached_child(self) -> RequestContext { self.inner } pub fn detached_child(self) -> RequestContext { self.inner } } impl RequestContext { /// Private clone implementation /// /// Callers should use the [`RequestContextBuilder`] or child spaning APIs of /// [`RequestContext`]. fn clone(&self) -> Self { Self { task_kind: self.task_kind, download_behavior: self.download_behavior, access_stats_behavior: self.access_stats_behavior, page_content_kind: self.page_content_kind, read_path_debug: self.read_path_debug, scope: self.scope.clone(), perf_span: self.perf_span.clone(), perf_span_dispatch: self.perf_span_dispatch.clone(), } } /// Create a new RequestContext that has no parent. /// /// The function is called `new` because, once we add children /// to it using `detached_child` or `attached_child`, the context /// form a tree (not implemented yet since cancellation will be /// the first feature that requires a tree). /// /// # Future: Cancellation /// /// The only reason why a context like this one can be canceled is /// because someone explicitly canceled it. /// It has no parent, so it cannot inherit cancellation from there. pub fn new(task_kind: TaskKind, download_behavior: DownloadBehavior) -> Self { RequestContextBuilder::new(task_kind) .download_behavior(download_behavior) .root() } /// Create a detached child context for a task that may outlive `self`. /// /// Use this when spawning new background activity that should complete /// even if the current request is canceled. /// /// # Future: Cancellation /// /// Cancellation of `self` will not propagate to the child context returned /// by this method. /// /// # Future: Structured Concurrency /// /// We could add the Future as a parameter to this function, spawn it as a task, /// and pass to the new task the child context as an argument. /// That would be an ergonomic improvement. /// /// We could make new calls to this function fail if `self` is already canceled. pub fn detached_child(&self, task_kind: TaskKind, download_behavior: DownloadBehavior) -> Self { RequestContextBuilder::from(self) .task_kind(task_kind) .download_behavior(download_behavior) .detached_child() } /// Create a child of context `self` for a task that shall not outlive `self`. /// /// Use this when fanning-out work to other async tasks. /// /// # Future: Cancellation /// /// Cancelling a context will propagate to its attached children. /// /// # Future: Structured Concurrency /// /// We could add the Future as a parameter to this function, spawn it as a task, /// and track its `JoinHandle` inside the `RequestContext`. /// /// We could then provide another method to allow waiting for all child tasks /// to finish. /// /// We could make new calls to this function fail if `self` is already canceled. /// Alternatively, we could allow the creation but not spawn the task. /// The method to wait for child tasks would return an error, indicating /// that the child task was not started because the context was canceled. pub fn attached_child(&self) -> Self { RequestContextBuilder::from(self).attached_child() } /// Use this function when you should be creating a child context using /// [`attached_child`] or [`detached_child`], but your caller doesn't provide /// a context and you are unwilling to change all callers to provide one. /// /// Before we add cancellation, we should get rid of this method. /// /// [`attached_child`]: Self::attached_child /// [`detached_child`]: Self::detached_child pub fn todo_child(task_kind: TaskKind, download_behavior: DownloadBehavior) -> Self { Self::new(task_kind, download_behavior) } pub fn with_scope_timeline(&self, timeline: &Arc<Timeline>) -> Self { RequestContextBuilder::from(self) .scope(Scope::new_timeline(timeline)) .attached_child() } pub(crate) fn with_scope_page_service_pagestream( &self, timeline_handle: &crate::tenant::timeline::handle::Handle< crate::page_service::TenantManagerTypes, >, ) -> Self { RequestContextBuilder::from(self) .scope(Scope::new_page_service_pagestream(timeline_handle)) .attached_child() } pub fn with_scope_secondary_timeline( &self, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, ) -> Self { RequestContextBuilder::from(self) .scope(Scope::new_secondary_timeline(tenant_shard_id, timeline_id)) .attached_child() } pub fn with_scope_secondary_tenant(&self, tenant_shard_id: &TenantShardId) -> Self { RequestContextBuilder::from(self) .scope(Scope::new_secondary_tenant(tenant_shard_id)) .attached_child() } #[cfg(test)] pub fn with_scope_unit_test(&self) -> Self { RequestContextBuilder::from(self) .task_kind(TaskKind::UnitTest) .scope(Scope::new_unit_test()) .attached_child() } pub fn with_scope_debug_tools(&self) -> Self { RequestContextBuilder::from(self) .task_kind(TaskKind::DebugTool) .scope(Scope::new_debug_tools()) .attached_child() } pub fn task_kind(&self) -> TaskKind { self.task_kind } pub fn download_behavior(&self) -> DownloadBehavior { self.download_behavior } pub(crate) fn access_stats_behavior(&self) -> AccessStatsBehavior { self.access_stats_behavior } pub(crate) fn page_content_kind(&self) -> PageContentKind { self.page_content_kind } pub(crate) fn read_path_debug(&self) -> bool { self.read_path_debug } pub(crate) fn io_size_metrics(&self) -> &StorageIoSizeMetrics { match &self.scope { Scope::Global { io_size_metrics } => { let is_unit_test = cfg!(test); let is_regress_test_build = cfg!(feature = "testing"); if is_unit_test || is_regress_test_build { panic!("all VirtualFile instances are timeline-scoped"); } else { use once_cell::sync::Lazy; use std::sync::Mutex; use std::time::Duration; use utils::rate_limit::RateLimit; static LIMIT: Lazy<Mutex<RateLimit>> = Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(1)))); let mut guard = LIMIT.lock().unwrap(); guard.call2(|rate_limit_stats| { warn!( %rate_limit_stats, backtrace=%std::backtrace::Backtrace::force_capture(), "all VirtualFile instances are timeline-scoped", ); }); io_size_metrics } } Scope::Timeline { arc_arc } => &arc_arc.storage_io_size, Scope::SecondaryTimeline { io_size_metrics } => io_size_metrics, Scope::SecondaryTenant { io_size_metrics } => io_size_metrics, #[cfg(test)] Scope::UnitTest { io_size_metrics } => io_size_metrics, Scope::DebugTools { io_size_metrics } => io_size_metrics, } } pub(crate) fn ondemand_download_wait_observe(&self, duration: Duration) { if duration == Duration::ZERO { return; } match &self.scope { Scope::Timeline { arc_arc } => arc_arc .wait_ondemand_download_time .observe(self.task_kind, duration), _ => { use once_cell::sync::Lazy; use std::sync::Mutex; use std::time::Duration; use utils::rate_limit::RateLimit; static LIMIT: Lazy<Mutex<RateLimit>> = Lazy::new(|| Mutex::new(RateLimit::new(Duration::from_secs(1)))); let mut guard = LIMIT.lock().unwrap(); guard.call2(|rate_limit_stats| { warn!( %rate_limit_stats, backtrace=%std::backtrace::Backtrace::force_capture(), "ondemand downloads should always happen within timeline scope", ); }); } } } pub(crate) fn perf_follows_from(&self, from: &RequestContext) { if let (Some(span), Some(from_span)) = (&self.perf_span, &from.perf_span) { span.inner().follows_from(from_span.inner()); } } pub(crate) fn has_perf_span(&self) -> bool { self.perf_span.is_some() } } /// [`Future`] extension trait that allow for creating performance /// spans on sampled requests pub(crate) trait PerfInstrumentFutureExt<'a>: Future + Send { /// Instrument this future with a new performance span when the /// provided request context indicates the originator request /// was sampled. Otherwise, just box the future and return it as is. fn maybe_perf_instrument<Fn>( self, ctx: &RequestContext, make_span: Fn, ) -> BoxFuture<'a, Self::Output> where Self: Sized + 'a, Fn: FnOnce(&Span) -> Span, { match &ctx.perf_span { Some(perf_span) => { assert!(ctx.perf_span_dispatch.is_some()); let dispatcher = ctx.perf_span_dispatch.as_ref().unwrap(); let new_span = tracing::dispatcher::with_default(dispatcher, || make_span(perf_span.inner())); let new_perf_span = PerfSpan::new(new_span, dispatcher.clone()); self.instrument(new_perf_span).boxed() } None => self.boxed(), } } } // Implement the trait for all types that satisfy the trait bounds impl<'a, T: Future + Send + 'a> PerfInstrumentFutureExt<'a> for T {}
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file.rs
pageserver/src/virtual_file.rs
//! VirtualFile is like a normal File, but it's not bound directly to //! a file descriptor. //! //! Instead, the file is opened when it's read from, //! and if too many files are open globally in the system, least-recently //! used ones are closed. //! //! To track which files have been recently used, we use the clock algorithm //! with a 'recently_used' flag on each slot. //! //! This is similar to PostgreSQL's virtual file descriptor facility in //! src/backend/storage/file/fd.c //! use std::fs::File; use std::io::{Error, ErrorKind}; use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}; use std::sync::LazyLock; use std::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering}; use camino::{Utf8Path, Utf8PathBuf}; use once_cell::sync::OnceCell; use owned_buffers_io::aligned_buffer::buffer::AlignedBuffer; use owned_buffers_io::aligned_buffer::{AlignedBufferMut, AlignedSlice, ConstAlign}; use owned_buffers_io::io_buf_aligned::{IoBufAligned, IoBufAlignedMut}; use owned_buffers_io::io_buf_ext::FullSlice; use pageserver_api::config::defaults::DEFAULT_IO_BUFFER_ALIGNMENT; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use tokio::time::Instant; use tokio_epoll_uring::{BoundedBuf, IoBuf, IoBufMut, Slice}; use self::owned_buffers_io::write::OwnedAsyncWriter; use crate::assert_u64_eq_usize::UsizeIsU64; use crate::context::RequestContext; use crate::metrics::{STORAGE_IO_TIME_METRIC, StorageIoOperation}; use crate::page_cache::{PAGE_SZ, PageWriteGuard}; pub(crate) use api::IoMode; pub(crate) use io_engine::IoEngineKind; pub use io_engine::{ FeatureTestResult as IoEngineFeatureTestResult, feature_test as io_engine_feature_test, io_engine_for_bench, }; pub(crate) use metadata::Metadata; pub(crate) use open_options::*; pub use pageserver_api::models::virtual_file as api; pub use temporary::TempVirtualFile; pub(crate) mod io_engine; mod metadata; mod open_options; mod temporary; pub(crate) mod owned_buffers_io { //! Abstractions for IO with owned buffers. //! //! Not actually tied to [`crate::virtual_file`] specifically, but, it's the primary //! reason we need this abstraction. //! //! Over time, this could move into the `tokio-epoll-uring` crate, maybe `uring-common`, //! but for the time being we're proving out the primitives in the neon.git repo //! for faster iteration. pub(crate) mod aligned_buffer; pub(crate) mod io_buf_aligned; pub(crate) mod io_buf_ext; pub(crate) mod slice; pub(crate) mod write; } #[derive(Debug)] pub struct VirtualFile { inner: VirtualFileInner, _mode: IoMode, } impl VirtualFile { /// Open a file in read-only mode. Like File::open. /// /// Insensitive to `virtual_file_io_mode` setting. pub async fn open<P: AsRef<Utf8Path>>( path: P, ctx: &RequestContext, ) -> Result<Self, std::io::Error> { let inner = VirtualFileInner::open(path, ctx).await?; Ok(VirtualFile { inner, _mode: IoMode::Buffered, }) } /// Open a file in read-only mode. Like File::open. /// /// `O_DIRECT` will be enabled base on `virtual_file_io_mode`. pub async fn open_v2<P: AsRef<Utf8Path>>( path: P, ctx: &RequestContext, ) -> Result<Self, std::io::Error> { Self::open_with_options_v2(path.as_ref(), OpenOptions::new().read(true), ctx).await } /// `O_DIRECT` will be enabled base on `virtual_file_io_mode`. pub async fn open_with_options_v2<P: AsRef<Utf8Path>>( path: P, mut open_options: OpenOptions, ctx: &RequestContext, ) -> Result<Self, std::io::Error> { let mode = get_io_mode(); let direct = match (mode, open_options.is_write()) { (IoMode::Buffered, _) => false, (IoMode::Direct, false) => true, (IoMode::Direct, true) => false, (IoMode::DirectRw, _) => true, }; open_options = open_options.direct(direct); let inner = VirtualFileInner::open_with_options(path, open_options, ctx).await?; Ok(VirtualFile { inner, _mode: mode }) } pub fn path(&self) -> &Utf8Path { self.inner.path.as_path() } pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>( final_path: Utf8PathBuf, tmp_path: Utf8PathBuf, content: B, ) -> std::io::Result<()> { VirtualFileInner::crashsafe_overwrite(final_path, tmp_path, content).await } pub async fn sync_all(&self) -> Result<(), Error> { if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 { return Ok(()); } self.inner.sync_all().await } pub async fn sync_data(&self) -> Result<(), Error> { if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 { return Ok(()); } self.inner.sync_data().await } pub async fn set_len(&self, len: u64, ctx: &RequestContext) -> Result<(), Error> { self.inner.set_len(len, ctx).await } pub async fn metadata(&self) -> Result<Metadata, Error> { self.inner.metadata().await } pub async fn read_exact_at<Buf>( &self, slice: Slice<Buf>, offset: u64, ctx: &RequestContext, ) -> Result<Slice<Buf>, Error> where Buf: IoBufAlignedMut + Send, { self.inner.read_exact_at(slice, offset, ctx).await } pub async fn read_exact_at_page( &self, page: PageWriteGuard<'static>, offset: u64, ctx: &RequestContext, ) -> Result<PageWriteGuard<'static>, Error> { self.inner.read_exact_at_page(page, offset, ctx).await } pub async fn write_all_at<Buf: IoBufAligned + Send>( &self, buf: FullSlice<Buf>, offset: u64, ctx: &RequestContext, ) -> (FullSlice<Buf>, Result<(), Error>) { self.inner.write_all_at(buf, offset, ctx).await } pub(crate) async fn read_to_string<P: AsRef<Utf8Path>>( path: P, ctx: &RequestContext, ) -> std::io::Result<String> { let file = VirtualFile::open(path, ctx).await?; // TODO: open_v2 let mut buf = Vec::new(); let mut tmp = vec![0; 128]; let mut pos: u64 = 0; loop { let slice = tmp.slice(..128); let (slice, res) = file.inner.read_at(slice, pos, ctx).await; match res { Ok(0) => break, Ok(n) => { pos += n as u64; buf.extend_from_slice(&slice[..n]); } Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {} Err(e) => return Err(e), } tmp = slice.into_inner(); } String::from_utf8(buf).map_err(|_| { std::io::Error::new(ErrorKind::InvalidData, "file contents are not valid UTF-8") }) } } /// Indicates whether to enable fsync, fdatasync, or O_SYNC/O_DSYNC when writing /// files. Switching this off is unsafe and only used for testing on machines /// with slow drives. #[repr(u8)] pub enum SyncMode { Sync, UnsafeNoSync, } impl TryFrom<u8> for SyncMode { type Error = u8; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { v if v == (SyncMode::Sync as u8) => SyncMode::Sync, v if v == (SyncMode::UnsafeNoSync as u8) => SyncMode::UnsafeNoSync, x => return Err(x), }) } } /// /// A virtual file descriptor. You can use this just like std::fs::File, but internally /// the underlying file is closed if the system is low on file descriptors, /// and re-opened when it's accessed again. /// /// Like with std::fs::File, multiple threads can read/write the file concurrently, /// holding just a shared reference the same VirtualFile, using the read_at() / write_at() /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits /// require a mutable reference, because they modify the "current position". /// /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the /// slot that 'handle points to, if the underlying file is currently open. If it's not /// currently open, the 'handle' can still point to the slot where it was last kept. The /// 'tag' field is used to detect whether the handle still is valid or not. /// #[derive(Debug)] pub struct VirtualFileInner { /// Lazy handle to the global file descriptor cache. The slot that this points to /// might contain our File, or it may be empty, or it may contain a File that /// belongs to a different VirtualFile. handle: RwLock<SlotHandle>, /// File path and options to use to open it. /// /// Note: this only contains the options needed to re-open it. For example, /// if a new file is created, we only pass the create flag when it's initially /// opened, in the VirtualFile::create() function, and strip the flag before /// storing it here. pub path: Utf8PathBuf, open_options: OpenOptions, } #[derive(Debug, PartialEq, Clone, Copy)] struct SlotHandle { /// Index into OPEN_FILES.slots index: usize, /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has /// been recycled and no longer contains the FD for this virtual file. tag: u64, } /// OPEN_FILES is the global array that holds the physical file descriptors that /// are currently open. Each slot in the array is protected by a separate lock, /// so that different files can be accessed independently. The lock must be held /// in write mode to replace the slot with a different file, but a read mode /// is enough to operate on the file, whether you're reading or writing to it. /// /// OPEN_FILES starts in uninitialized state, and it's initialized by /// the virtual_file::init() function. It must be called exactly once at page /// server startup. static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new(); struct OpenFiles { slots: &'static [Slot], /// clock arm for the clock algorithm next: AtomicUsize, } struct Slot { inner: RwLock<SlotInner>, /// has this file been used since last clock sweep? recently_used: AtomicBool, } struct SlotInner { /// Counter that's incremented every time a different file is stored here. /// To avoid the ABA problem. tag: u64, /// the underlying file file: Option<OwnedFd>, } /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`]. struct PageWriteGuardBuf { page: PageWriteGuard<'static>, } // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot, // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved. // Page cache pages are zero-initialized, so, wrt uninitialized memory we're good. // (Page cache tracks separately whether the contents are valid, see `PageWriteGuard::mark_valid`.) unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf { fn stable_ptr(&self) -> *const u8 { self.page.as_ptr() } fn bytes_init(&self) -> usize { self.page.len() } fn bytes_total(&self) -> usize { self.page.len() } } // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access, // hence it's safe to hand out the `stable_mut_ptr()`. unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf { fn stable_mut_ptr(&mut self) -> *mut u8 { self.page.as_mut_ptr() } unsafe fn set_init(&mut self, pos: usize) { // There shouldn't really be any reason to call this API since bytes_init() == bytes_total(). assert!(pos <= self.page.len()); } } impl OpenFiles { /// Find a slot to use, evicting an existing file descriptor if needed. /// /// On return, we hold a lock on the slot, and its 'tag' has been updated /// recently_used has been set. It's all ready for reuse. async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) { // // Run the clock algorithm to find a slot to replace. // let num_slots = self.slots.len(); let mut retries = 0; let mut slot; let mut slot_guard; let index; loop { let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots; slot = &self.slots[next]; // If the recently_used flag on this slot is set, continue the clock // sweep. Otherwise try to use this slot. If we cannot acquire the // lock, also continue the clock sweep. // // We only continue in this manner for a while, though. If we loop // through the array twice without finding a victim, just pick the // next slot and wait until we can reuse it. This way, we avoid // spinning in the extreme case that all the slots are busy with an // I/O operation. if retries < num_slots * 2 { if !slot.recently_used.swap(false, Ordering::Release) { if let Ok(guard) = slot.inner.try_write() { slot_guard = guard; index = next; break; } } retries += 1; } else { slot_guard = slot.inner.write().await; index = next; break; } } // // We now have the victim slot locked. If it was in use previously, close the // old file. // if let Some(old_file) = slot_guard.file.take() { // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to // distinguish the two. STORAGE_IO_TIME_METRIC .get(StorageIoOperation::CloseByReplace) .observe_closure_duration(|| drop(old_file)); } // Prepare the slot for reuse and return it slot_guard.tag += 1; slot.recently_used.store(true, Ordering::Relaxed); ( SlotHandle { index, tag: slot_guard.tag, }, slot_guard, ) } } /// Identify error types that should alwways terminate the process. Other /// error types may be elegible for retry. pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool { use nix::errno::Errno::*; match e.raw_os_error().map(nix::errno::Errno::from_raw) { Some(EIO) => { // Terminate on EIO because we no longer trust the device to store // data safely, or to uphold persistence guarantees on fsync. true } Some(EROFS) => { // Terminate on EROFS because a filesystem is usually remounted // readonly when it has experienced some critical issue, so the same // logic as EIO applies. true } Some(EACCES) => { // Terminate on EACCESS because we should always have permissions // for our own data dir: if we don't, then we can't do our job and // need administrative intervention to fix permissions. Terminating // is the best way to make sure we stop cleanly rather than going // into infinite retry loops, and will make it clear to the outside // world that we need help. true } _ => { // Treat all other local file I/O errors are retryable. This includes: // - ENOSPC: we stay up and wait for eviction to free some space // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue // - WriteZero, Interrupted: these are used internally VirtualFile false } } } /// Call this when the local filesystem gives us an error with an external /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either /// bad storage or bad configuration, and we can't fix that from inside /// a running process. pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! { let backtrace = std::backtrace::Backtrace::force_capture(); tracing::error!("Fatal I/O error: {e}: {context})\n{backtrace}"); std::process::abort(); } pub(crate) trait MaybeFatalIo<T> { fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>; fn fatal_err(self, context: &str) -> T; } impl<T> MaybeFatalIo<T> for std::io::Result<T> { /// Terminate the process if the result is an error of a fatal type, else pass it through /// /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but /// not on ENOSPC. fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> { if let Err(e) = &self { if is_fatal_io_error(e) { on_fatal_io_error(e, context); } } self } /// Terminate the process on any I/O error. /// /// This is appropriate for reads on files that we know exist: they should always work. fn fatal_err(self, context: &str) -> T { match self { Ok(v) => v, Err(e) => { on_fatal_io_error(&e, context); } } } } /// Observe duration for the given storage I/O operation /// /// Unlike `observe_closure_duration`, this supports async, /// where "support" means that we measure wall clock time. macro_rules! observe_duration { ($op:expr, $($body:tt)*) => {{ let instant = Instant::now(); let result = $($body)*; let elapsed = instant.elapsed().as_secs_f64(); STORAGE_IO_TIME_METRIC .get($op) .observe(elapsed); result }} } macro_rules! with_file { ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{ let $ident = $this.lock_file().await?; observe_duration!($op, $($body)*) }}; ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{ let mut $ident = $this.lock_file().await?; observe_duration!($op, $($body)*) }}; } impl VirtualFileInner { /// Open a file in read-only mode. Like File::open. pub async fn open<P: AsRef<Utf8Path>>( path: P, ctx: &RequestContext, ) -> Result<VirtualFileInner, std::io::Error> { Self::open_with_options(path.as_ref(), OpenOptions::new().read(true), ctx).await } /// Open a file with given options. /// /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt, /// they will be applied also when the file is subsequently re-opened, not only /// on the first time. Make sure that's sane! pub async fn open_with_options<P: AsRef<Utf8Path>>( path: P, open_options: OpenOptions, _ctx: &RequestContext, ) -> Result<VirtualFileInner, std::io::Error> { let path = path.as_ref(); let (handle, mut slot_guard) = get_open_files().find_victim_slot().await; // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case // where our caller doesn't get to use the returned VirtualFile before its // slot gets re-used by someone else. let file = observe_duration!(StorageIoOperation::Open, { open_options.open(path.as_std_path()).await? }); // Strip all options other than read and write. // // It would perhaps be nicer to check just for the read and write flags // explicitly, but OpenOptions doesn't contain any functions to read flags, // only to set them. let reopen_options = open_options .clone() .create(false) .create_new(false) .truncate(false); let vfile = VirtualFileInner { handle: RwLock::new(handle), path: path.to_owned(), open_options: reopen_options, }; // TODO: Under pressure, it's likely the slot will get re-used and // the underlying file closed before they get around to using it. // => https://github.com/neondatabase/neon/issues/6065 slot_guard.file.replace(file); Ok(vfile) } /// Async version of [`::utils::crashsafe::overwrite`]. /// /// # NB: /// /// Doesn't actually use the [`VirtualFile`] file descriptor cache, but, /// it did at an earlier time. /// And it will use this module's [`io_engine`] in the near future, so, leaving it here. pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>( final_path: Utf8PathBuf, tmp_path: Utf8PathBuf, content: B, ) -> std::io::Result<()> { // TODO: use tokio_epoll_uring if configured as `io_engine`. // See https://github.com/neondatabase/neon/issues/6663 tokio::task::spawn_blocking(move || { let slice_storage; let content_len = content.bytes_init(); let content = if content.bytes_init() > 0 { slice_storage = Some(content.slice(0..content_len)); slice_storage.as_deref().expect("just set it to Some()") } else { &[] }; utils::crashsafe::overwrite(&final_path, &tmp_path, content) .maybe_fatal_err("crashsafe_overwrite") }) .await .expect("blocking task is never aborted") } /// Call File::sync_all() on the underlying File. pub async fn sync_all(&self) -> Result<(), Error> { with_file!(self, StorageIoOperation::Fsync, |file_guard| { let (_file_guard, res) = io_engine::get().sync_all(file_guard).await; res.maybe_fatal_err("sync_all") }) } /// Call File::sync_data() on the underlying File. pub async fn sync_data(&self) -> Result<(), Error> { with_file!(self, StorageIoOperation::Fsync, |file_guard| { let (_file_guard, res) = io_engine::get().sync_data(file_guard).await; res.maybe_fatal_err("sync_data") }) } pub async fn metadata(&self) -> Result<Metadata, Error> { with_file!(self, StorageIoOperation::Metadata, |file_guard| { let (_file_guard, res) = io_engine::get().metadata(file_guard).await; res }) } pub async fn set_len(&self, len: u64, _ctx: &RequestContext) -> Result<(), Error> { with_file!(self, StorageIoOperation::SetLen, |file_guard| { let (_file_guard, res) = io_engine::get().set_len(file_guard, len).await; res.maybe_fatal_err("set_len") }) } /// Helper function internal to `VirtualFile` that looks up the underlying File, /// opens it and evicts some other File if necessary. The passed parameter is /// assumed to be a function available for the physical `File`. /// /// We are doing it via a macro as Rust doesn't support async closures that /// take on parameters with lifetimes. async fn lock_file(&self) -> Result<FileGuard, Error> { let open_files = get_open_files(); let mut handle_guard = { // Read the cached slot handle, and see if the slot that it points to still // contains our File. // // We only need to hold the handle lock while we read the current handle. If // another thread closes the file and recycles the slot for a different file, // we will notice that the handle we read is no longer valid and retry. let mut handle = *self.handle.read().await; loop { // Check if the slot contains our File { let slot = &open_files.slots[handle.index]; let slot_guard = slot.inner.read().await; if slot_guard.tag == handle.tag && slot_guard.file.is_some() { // Found a cached file descriptor. slot.recently_used.store(true, Ordering::Relaxed); return Ok(FileGuard { slot_guard }); } } // The slot didn't contain our File. We will have to open it ourselves, // but before that, grab a write lock on handle in the VirtualFile, so // that no other thread will try to concurrently open the same file. let handle_guard = self.handle.write().await; // If another thread changed the handle while we were not holding the lock, // then the handle might now be valid again. Loop back to retry. if *handle_guard != handle { handle = *handle_guard; continue; } break handle_guard; } }; // We need to open the file ourselves. The handle in the VirtualFile is // now locked in write-mode. Find a free slot to put it in. let (handle, mut slot_guard) = open_files.find_victim_slot().await; // Re-open the physical file. // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this // case from StorageIoOperation::Open. This helps with identifying thrashing // of the virtual file descriptor cache. let file = observe_duration!(StorageIoOperation::OpenAfterReplace, { self.open_options.open(self.path.as_std_path()).await? }); // Store the File in the slot and update the handle in the VirtualFile // to point to it. slot_guard.file.replace(file); *handle_guard = handle; Ok(FileGuard { slot_guard: slot_guard.downgrade(), }) } /// Read the file contents in range `offset..(offset + slice.bytes_total())` into `slice[0..slice.bytes_total()]`. /// /// The returned `Slice<Buf>` is equivalent to the input `slice`, i.e., it's the same view into the same buffer. pub async fn read_exact_at<Buf>( &self, slice: Slice<Buf>, offset: u64, ctx: &RequestContext, ) -> Result<Slice<Buf>, Error> where Buf: IoBufAlignedMut + Send, { let assert_we_return_original_bounds = if cfg!(debug_assertions) { Some((slice.stable_ptr() as usize, slice.bytes_total())) } else { None }; let original_bounds = slice.bounds(); let (buf, res) = read_exact_at_impl(slice, offset, |buf, offset| self.read_at(buf, offset, ctx)).await; let res = res.map(|_| buf.slice(original_bounds)); if let Some(original_bounds) = assert_we_return_original_bounds { if let Ok(slice) = &res { let returned_bounds = (slice.stable_ptr() as usize, slice.bytes_total()); assert_eq!(original_bounds, returned_bounds); } } res } /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`]. pub async fn read_exact_at_page( &self, page: PageWriteGuard<'static>, offset: u64, ctx: &RequestContext, ) -> Result<PageWriteGuard<'static>, Error> { let buf = PageWriteGuardBuf { page }.slice_full(); debug_assert_eq!(buf.bytes_total(), PAGE_SZ); self.read_exact_at(buf, offset, ctx) .await .map(|slice| slice.into_inner().page) } // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235 pub async fn write_all_at<Buf: IoBuf + Send>( &self, buf: FullSlice<Buf>, mut offset: u64, ctx: &RequestContext, ) -> (FullSlice<Buf>, Result<(), Error>) { let buf = buf.into_raw_slice(); let bounds = buf.bounds(); let restore = |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds)); let mut buf = buf; while !buf.is_empty() { let (tmp, res) = self.write_at(FullSlice::must_new(buf), offset, ctx).await; buf = tmp.into_raw_slice(); match res { Ok(0) => { return ( restore(buf), Err(Error::new( std::io::ErrorKind::WriteZero, "failed to write whole buffer", )), ); } Ok(n) => { buf = buf.slice(n..); offset += n as u64; } Err(e) if e.kind() == std::io::ErrorKind::Interrupted => {} Err(e) => return (restore(buf), Err(e)), } } (restore(buf), Ok(())) } pub(super) async fn read_at<Buf>( &self, buf: tokio_epoll_uring::Slice<Buf>, offset: u64, ctx: &RequestContext, ) -> (tokio_epoll_uring::Slice<Buf>, Result<usize, Error>) where Buf: tokio_epoll_uring::IoBufMut + Send, { self.validate_direct_io( Slice::stable_ptr(&buf).addr(), Slice::bytes_total(&buf), offset, ); let file_guard = match self .lock_file() .await .maybe_fatal_err("lock_file inside VirtualFileInner::read_at") { Ok(file_guard) => file_guard, Err(e) => return (buf, Err(e)), }; observe_duration!(StorageIoOperation::Read, { let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await; let res = res.maybe_fatal_err("io_engine read_at inside VirtualFileInner::read_at"); if let Ok(size) = res { ctx.io_size_metrics().read.add(size.into_u64()); } (buf, res) }) } async fn write_at<B: IoBuf + Send>( &self, buf: FullSlice<B>, offset: u64, ctx: &RequestContext, ) -> (FullSlice<B>, Result<usize, Error>) { self.validate_direct_io(buf.as_ptr().addr(), buf.len(), offset); let file_guard = match self.lock_file().await { Ok(file_guard) => file_guard, Err(e) => return (buf, Err(e)), }; observe_duration!(StorageIoOperation::Write, { let ((_file_guard, buf), result) = io_engine::get().write_at(file_guard, offset, buf).await; let result = result.maybe_fatal_err("write_at"); if let Ok(size) = result { ctx.io_size_metrics().write.add(size.into_u64()); } (buf, result) }) } /// Validate all reads and writes to adhere to the O_DIRECT requirements of our production systems. /// /// Validating it iin userspace sets a consistent bar, independent of what actual OS/filesystem/block device is in use. fn validate_direct_io(&self, addr: usize, size: usize, offset: u64) { // TODO: eventually enable validation in the builds we use in real environments like staging, preprod, and prod. if !(cfg!(feature = "testing") || cfg!(test)) { return; } if !self.open_options.is_direct() { return; } // Validate buffer memory alignment. // // What practically matters as of Linux 6.1 is bdev_dma_alignment() // which is practically between 512 and 4096. // On our production systems, the value is 512. // The IoBuffer/IoBufferMut hard-code that value. // // Because the alloctor might return _more_ aligned addresses than requested, // there is a chance that testing would not catch violations of a runtime requirement stricter than 512. { let requirement = get_io_buffer_alignment(); let remainder = addr % requirement; assert!( remainder == 0, "Direct I/O buffer must be aligned: buffer_addr=0x{addr:x} % 0x{requirement:x} = 0x{remainder:x}" ); } // Validate offset alignment. // // We hard-code 512 throughout the code base. // So enforce just that and not anything more restrictive. // Even the shallowest testing will expose more restrictive requirements if those ever arise. { let requirement = get_io_buffer_alignment() as u64; let remainder = offset % requirement; assert!( remainder == 0,
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/page_cache.rs
pageserver/src/page_cache.rs
//! //! Global page cache //! //! The page cache uses up most of the memory in the page server. It is shared //! by all tenants, and it is used to store different kinds of pages. Sharing //! the cache allows memory to be dynamically allocated where it's needed the //! most. //! //! The page cache consists of fixed-size buffers, 8 kB each to match the //! PostgreSQL buffer size, and a Slot struct for each buffer to contain //! information about what's stored in the buffer. //! //! # Types Of Pages //! //! [`PageCache`] only supports immutable pages. //! Hence there is no need to worry about coherency. //! //! Two types of pages are supported: //! //! * **Immutable File pages**, filled & used by [`crate::tenant::block_io`] and [`crate::tenant::ephemeral_file`]. //! //! Note that [`crate::tenant::ephemeral_file::EphemeralFile`] is generally mutable, but, it's append-only. //! It uses the page cache only for the blocks that are already fully written and immutable. //! //! # Filling The Page Cache //! //! Page cache maps from a cache key to a buffer slot. //! The cache key uniquely identifies the piece of data that is being cached. //! //! The cache key for **immutable file** pages is [`FileId`] and a block number. //! Users of page cache that wish to page-cache an arbitrary (immutable!) on-disk file do the following: //! * Have a mechanism to deterministically associate the on-disk file with a [`FileId`]. //! * Get a [`FileId`] using [`next_file_id`]. //! * Use the mechanism to associate the on-disk file with the returned [`FileId`]. //! * Use [`PageCache::read_immutable_buf`] to get a [`ReadBufResult`]. //! * If the page was already cached, it'll be the [`ReadBufResult::Found`] variant that contains //! a read guard for the page. Just use it. //! * If the page was not cached, it'll be the [`ReadBufResult::NotFound`] variant that contains //! a write guard for the page. Fill the page with the contents of the on-disk file. //! Then call [`PageWriteGuard::mark_valid`] to mark the page as valid. //! Then try again to [`PageCache::read_immutable_buf`]. //! Unless there's high cache pressure, the page should now be cached. //! (TODO: allow downgrading the write guard to a read guard to ensure forward progress.) //! //! # Locking //! //! There are two levels of locking involved: There's one lock for the "mapping" //! from page identifier (tenant ID, timeline ID, rel, block, LSN) to the buffer //! slot, and a separate lock on each slot. To read or write the contents of a //! slot, you must hold the lock on the slot in read or write mode, //! respectively. To change the mapping of a slot, i.e. to evict a page or to //! assign a buffer for a page, you must hold the mapping lock and the lock on //! the slot at the same time. //! //! Whenever you need to hold both locks simultaneously, the slot lock must be //! acquired first. This consistent ordering avoids deadlocks. To look up a page //! in the cache, you would first look up the mapping, while holding the mapping //! lock, and then lock the slot. You must release the mapping lock in between, //! to obey the lock ordering and avoid deadlock. //! //! A slot can momentarily have invalid contents, even if it's already been //! inserted to the mapping, but you must hold the write-lock on the slot until //! the contents are valid. If you need to release the lock without initializing //! the contents, you must remove the mapping first. We make that easy for the //! callers with PageWriteGuard: the caller must explicitly call guard.mark_valid() after it has //! initialized it. If the guard is dropped without calling mark_valid(), the //! mapping is automatically removed and the slot is marked free. //! use std::collections::HashMap; use std::collections::hash_map::Entry; use std::sync::atomic::{AtomicU8, AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; use std::time::Duration; use anyhow::Context; use once_cell::sync::OnceCell; use crate::context::RequestContext; use crate::metrics::{PageCacheSizeMetrics, page_cache_eviction_metrics}; use crate::virtual_file::{IoBufferMut, IoPageSlice}; static PAGE_CACHE: OnceCell<PageCache> = OnceCell::new(); const TEST_PAGE_CACHE_SIZE: usize = 50; /// /// Initialize the page cache. This must be called once at page server startup. /// pub fn init(size: usize) { if PAGE_CACHE.set(PageCache::new(size)).is_err() { panic!("page cache already initialized"); } } /// /// Get a handle to the page cache. /// pub fn get() -> &'static PageCache { // // In unit tests, page server startup doesn't happen and no one calls // page_cache::init(). Initialize it here with a tiny cache, so that the // page cache is usable in unit tests. // if cfg!(test) { PAGE_CACHE.get_or_init(|| PageCache::new(TEST_PAGE_CACHE_SIZE)) } else { PAGE_CACHE.get().expect("page cache not initialized") } } pub const PAGE_SZ: usize = postgres_ffi::BLCKSZ as usize; const MAX_USAGE_COUNT: u8 = 5; /// See module-level comment. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct FileId(u64); static NEXT_ID: AtomicU64 = AtomicU64::new(1); /// See module-level comment. pub fn next_file_id() -> FileId { FileId(NEXT_ID.fetch_add(1, Ordering::Relaxed)) } /// /// CacheKey uniquely identifies a "thing" to cache in the page cache. /// #[derive(Debug, PartialEq, Eq, Clone)] #[allow(clippy::enum_variant_names)] enum CacheKey { ImmutableFilePage { file_id: FileId, blkno: u32 }, } struct Slot { inner: tokio::sync::RwLock<SlotInner>, usage_count: AtomicU8, } struct SlotInner { key: Option<CacheKey>, // for `coalesce_readers_permit` permit: std::sync::Mutex<Weak<PinnedSlotsPermit>>, buf: IoPageSlice<'static>, } impl Slot { /// Increment usage count on the buffer, with ceiling at MAX_USAGE_COUNT. fn inc_usage_count(&self) { let _ = self .usage_count .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |val| { if val == MAX_USAGE_COUNT { None } else { Some(val + 1) } }); } /// Decrement usage count on the buffer, unless it's already zero. Returns /// the old usage count. fn dec_usage_count(&self) -> u8 { let count_res = self.usage_count .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |val| { if val == 0 { None } else { Some(val - 1) } }); match count_res { Ok(usage_count) => usage_count, Err(usage_count) => usage_count, } } /// Sets the usage count to a specific value. fn set_usage_count(&self, count: u8) { self.usage_count.store(count, Ordering::Relaxed); } } impl SlotInner { /// If there is aready a reader, drop our permit and share its permit, just like we share read access. fn coalesce_readers_permit(&self, permit: PinnedSlotsPermit) -> Arc<PinnedSlotsPermit> { let mut guard = self.permit.lock().unwrap(); if let Some(existing_permit) = guard.upgrade() { drop(guard); drop(permit); existing_permit } else { let permit = Arc::new(permit); *guard = Arc::downgrade(&permit); permit } } } pub struct PageCache { immutable_page_map: std::sync::RwLock<HashMap<(FileId, u32), usize>>, /// The actual buffers with their metadata. slots: Box<[Slot]>, pinned_slots: Arc<tokio::sync::Semaphore>, /// Index of the next candidate to evict, for the Clock replacement algorithm. /// This is interpreted modulo the page cache size. next_evict_slot: AtomicUsize, size_metrics: &'static PageCacheSizeMetrics, } struct PinnedSlotsPermit { _permit: tokio::sync::OwnedSemaphorePermit, } /// /// PageReadGuard is a "lease" on a buffer, for reading. The page is kept locked /// until the guard is dropped. /// pub struct PageReadGuard<'i> { _permit: Arc<PinnedSlotsPermit>, slot_guard: tokio::sync::RwLockReadGuard<'i, SlotInner>, } impl std::ops::Deref for PageReadGuard<'_> { type Target = [u8; PAGE_SZ]; fn deref(&self) -> &Self::Target { self.slot_guard.buf.deref() } } impl AsRef<[u8; PAGE_SZ]> for PageReadGuard<'_> { fn as_ref(&self) -> &[u8; PAGE_SZ] { self.slot_guard.buf.as_ref() } } /// /// PageWriteGuard is a lease on a buffer for modifying it. The page is kept locked /// until the guard is dropped. /// /// Counterintuitively, this is used even for a read, if the requested page is not /// currently found in the page cache. In that case, the caller of lock_for_read() /// is expected to fill in the page contents and call mark_valid(). pub struct PageWriteGuard<'i> { state: PageWriteGuardState<'i>, } enum PageWriteGuardState<'i> { Invalid { inner: tokio::sync::RwLockWriteGuard<'i, SlotInner>, _permit: PinnedSlotsPermit, }, Downgraded, } impl std::ops::DerefMut for PageWriteGuard<'_> { fn deref_mut(&mut self) -> &mut Self::Target { match &mut self.state { PageWriteGuardState::Invalid { inner, _permit } => inner.buf.deref_mut(), PageWriteGuardState::Downgraded => unreachable!(), } } } impl std::ops::Deref for PageWriteGuard<'_> { type Target = [u8; PAGE_SZ]; fn deref(&self) -> &Self::Target { match &self.state { PageWriteGuardState::Invalid { inner, _permit } => inner.buf.deref(), PageWriteGuardState::Downgraded => unreachable!(), } } } impl<'a> PageWriteGuard<'a> { /// Mark that the buffer contents are now valid. #[must_use] pub fn mark_valid(mut self) -> PageReadGuard<'a> { let prev = std::mem::replace(&mut self.state, PageWriteGuardState::Downgraded); match prev { PageWriteGuardState::Invalid { inner, _permit } => { assert!(inner.key.is_some()); PageReadGuard { _permit: Arc::new(_permit), slot_guard: inner.downgrade(), } } PageWriteGuardState::Downgraded => unreachable!(), } } } impl Drop for PageWriteGuard<'_> { /// /// If the buffer was allocated for a page that was not already in the /// cache, but the lock_for_read/write() caller dropped the buffer without /// initializing it, remove the mapping from the page cache. /// fn drop(&mut self) { match &mut self.state { PageWriteGuardState::Invalid { inner, _permit } => { assert!(inner.key.is_some()); let self_key = inner.key.as_ref().unwrap(); PAGE_CACHE.get().unwrap().remove_mapping(self_key); inner.key = None; } PageWriteGuardState::Downgraded => {} } } } /// lock_for_read() return value pub enum ReadBufResult<'a> { Found(PageReadGuard<'a>), NotFound(PageWriteGuard<'a>), } impl PageCache { pub async fn read_immutable_buf( &self, file_id: FileId, blkno: u32, ctx: &RequestContext, ) -> anyhow::Result<ReadBufResult> { self.lock_for_read(&(CacheKey::ImmutableFilePage { file_id, blkno }), ctx) .await } // // Section 2: Internal interface functions for lookup/update. // // To add support for a new kind of "thing" to cache, you will need // to add public interface routines above, and code to deal with the // "mappings" after this section. But the routines in this section should // not require changes. async fn try_get_pinned_slot_permit(&self) -> anyhow::Result<PinnedSlotsPermit> { match tokio::time::timeout( // Choose small timeout, neon_smgr does its own retries. // https://neondb.slack.com/archives/C04DGM6SMTM/p1694786876476869 Duration::from_secs(10), Arc::clone(&self.pinned_slots).acquire_owned(), ) .await { Ok(res) => Ok(PinnedSlotsPermit { _permit: res.expect("this semaphore is never closed"), }), Err(_timeout) => { crate::metrics::page_cache_errors_inc( crate::metrics::PageCacheErrorKind::AcquirePinnedSlotTimeout, ); anyhow::bail!("timeout: there were page guards alive for all page cache slots") } } } /// Look up a page in the cache. /// async fn try_lock_for_read( &self, cache_key: &CacheKey, permit: &mut Option<PinnedSlotsPermit>, ) -> Option<PageReadGuard> { if let Some(slot_idx) = self.search_mapping(cache_key) { // The page was found in the mapping. Lock the slot, and re-check // that it's still what we expected (because we released the mapping // lock already, another thread could have evicted the page) let slot = &self.slots[slot_idx]; let inner = slot.inner.read().await; if inner.key.as_ref() == Some(cache_key) { slot.inc_usage_count(); return Some(PageReadGuard { _permit: inner.coalesce_readers_permit(permit.take().unwrap()), slot_guard: inner, }); } } None } /// Return a locked buffer for given block. /// /// Like try_lock_for_read(), if the search criteria is not exact and the /// page is already found in the cache, *cache_key is updated. /// /// If the page is not found in the cache, this allocates a new buffer for /// it. The caller may then initialize the buffer with the contents, and /// call mark_valid(). /// /// Example usage: /// /// ```ignore /// let cache = page_cache::get(); /// /// match cache.lock_for_read(&key) { /// ReadBufResult::Found(read_guard) => { /// // The page was found in cache. Use it /// }, /// ReadBufResult::NotFound(write_guard) => { /// // The page was not found in cache. Read it from disk into the /// // buffer. /// //read_my_page_from_disk(write_guard); /// /// // The buffer contents are now valid. Tell the page cache. /// write_guard.mark_valid(); /// }, /// } /// ``` /// async fn lock_for_read( &self, cache_key: &CacheKey, ctx: &RequestContext, ) -> anyhow::Result<ReadBufResult> { let mut permit = Some(self.try_get_pinned_slot_permit().await?); let (read_access, hit) = match cache_key { CacheKey::ImmutableFilePage { .. } => ( &crate::metrics::PAGE_CACHE .for_ctx(ctx) .read_accesses_immutable, &crate::metrics::PAGE_CACHE.for_ctx(ctx).read_hits_immutable, ), }; read_access.inc(); let mut is_first_iteration = true; loop { // First check if the key already exists in the cache. if let Some(read_guard) = self.try_lock_for_read(cache_key, &mut permit).await { debug_assert!(permit.is_none()); if is_first_iteration { hit.inc(); } return Ok(ReadBufResult::Found(read_guard)); } debug_assert!(permit.is_some()); is_first_iteration = false; // Not found. Find a victim buffer let (slot_idx, mut inner) = self .find_victim(permit.as_ref().unwrap()) .await .context("Failed to find evict victim")?; // Insert mapping for this. At this point, we may find that another // thread did the same thing concurrently. In that case, we evicted // our victim buffer unnecessarily. Put it into the free list and // continue with the slot that the other thread chose. if let Some(_existing_slot_idx) = self.try_insert_mapping(cache_key, slot_idx) { // TODO: put to free list // We now just loop back to start from beginning. This is not // optimal, we'll perform the lookup in the mapping again, which // is not really necessary because we already got // 'existing_slot_idx'. But this shouldn't happen often enough // to matter much. continue; } // Make the slot ready let slot = &self.slots[slot_idx]; inner.key = Some(cache_key.clone()); slot.set_usage_count(1); debug_assert!( { let guard = inner.permit.lock().unwrap(); guard.upgrade().is_none() }, "we hold a write lock, so, no one else should have a permit" ); return Ok(ReadBufResult::NotFound(PageWriteGuard { state: PageWriteGuardState::Invalid { _permit: permit.take().unwrap(), inner, }, })); } } // // Section 3: Mapping functions // /// Search for a page in the cache using the given search key. /// /// Returns the slot index, if any. /// /// NOTE: We don't hold any lock on the mapping on return, so the slot might /// get recycled for an unrelated page immediately after this function /// returns. The caller is responsible for re-checking that the slot still /// contains the page with the same key before using it. /// fn search_mapping(&self, cache_key: &CacheKey) -> Option<usize> { match cache_key { CacheKey::ImmutableFilePage { file_id, blkno } => { let map = self.immutable_page_map.read().unwrap(); Some(*map.get(&(*file_id, *blkno))?) } } } /// /// Remove mapping for given key. /// fn remove_mapping(&self, old_key: &CacheKey) { match old_key { CacheKey::ImmutableFilePage { file_id, blkno } => { let mut map = self.immutable_page_map.write().unwrap(); map.remove(&(*file_id, *blkno)) .expect("could not find old key in mapping"); self.size_metrics.current_bytes_immutable.sub_page_sz(1); } } } /// /// Insert mapping for given key. /// /// If a mapping already existed for the given key, returns the slot index /// of the existing mapping and leaves it untouched. fn try_insert_mapping(&self, new_key: &CacheKey, slot_idx: usize) -> Option<usize> { match new_key { CacheKey::ImmutableFilePage { file_id, blkno } => { let mut map = self.immutable_page_map.write().unwrap(); match map.entry((*file_id, *blkno)) { Entry::Occupied(entry) => Some(*entry.get()), Entry::Vacant(entry) => { entry.insert(slot_idx); self.size_metrics.current_bytes_immutable.add_page_sz(1); None } } } } } // // Section 4: Misc internal helpers // /// Find a slot to evict. /// /// On return, the slot is empty and write-locked. async fn find_victim( &self, _permit_witness: &PinnedSlotsPermit, ) -> anyhow::Result<(usize, tokio::sync::RwLockWriteGuard<SlotInner>)> { let iter_limit = self.slots.len() * 10; let mut iters = 0; loop { iters += 1; let slot_idx = self.next_evict_slot.fetch_add(1, Ordering::Relaxed) % self.slots.len(); let slot = &self.slots[slot_idx]; if slot.dec_usage_count() == 0 { let mut inner = match slot.inner.try_write() { Ok(inner) => inner, Err(_err) => { if iters > iter_limit { // NB: Even with the permits, there's no hard guarantee that we will find a slot with // any particular number of iterations: other threads might race ahead and acquire and // release pins just as we're scanning the array. // // Imagine that nslots is 2, and as starting point, usage_count==1 on all // slots. There are two threads running concurrently, A and B. A has just // acquired the permit from the semaphore. // // A: Look at slot 1. Its usage_count == 1, so decrement it to zero, and continue the search // B: Acquire permit. // B: Look at slot 2, decrement its usage_count to zero and continue the search // B: Look at slot 1. Its usage_count is zero, so pin it and bump up its usage_count to 1. // B: Release pin and permit again // B: Acquire permit. // B: Look at slot 2. Its usage_count is zero, so pin it and bump up its usage_count to 1. // B: Release pin and permit again // // Now we're back in the starting situation that both slots have // usage_count 1, but A has now been through one iteration of the // find_victim() loop. This can repeat indefinitely and on each // iteration, A's iteration count increases by one. // // So, even though the semaphore for the permits is fair, the victim search // itself happens in parallel and is not fair. // Hence even with a permit, a task can theoretically be starved. // To avoid this, we'd need tokio to give priority to tasks that are holding // permits for longer. // Note that just yielding to tokio during iteration without such // priority boosting is likely counter-productive. We'd just give more opportunities // for B to bump usage count, further starving A. page_cache_eviction_metrics::observe( page_cache_eviction_metrics::Outcome::ItersExceeded { iters: iters.try_into().unwrap(), }, ); anyhow::bail!("exceeded evict iter limit"); } continue; } }; if let Some(old_key) = &inner.key { // remove mapping for old buffer self.remove_mapping(old_key); inner.key = None; page_cache_eviction_metrics::observe( page_cache_eviction_metrics::Outcome::FoundSlotEvicted { iters: iters.try_into().unwrap(), }, ); } else { page_cache_eviction_metrics::observe( page_cache_eviction_metrics::Outcome::FoundSlotUnused { iters: iters.try_into().unwrap(), }, ); } return Ok((slot_idx, inner)); } } } /// Initialize a new page cache /// /// This should be called only once at page server startup. fn new(num_pages: usize) -> Self { assert!(num_pages > 0, "page cache size must be > 0"); // We could use Vec::leak here, but that potentially also leaks // uninitialized reserved capacity. With into_boxed_slice and Box::leak // this is avoided. let page_buffer = IoBufferMut::with_capacity_zeroed(num_pages * PAGE_SZ).leak(); let size_metrics = &crate::metrics::PAGE_CACHE_SIZE; size_metrics.max_bytes.set_page_sz(num_pages); size_metrics.current_bytes_immutable.set_page_sz(0); let slots = page_buffer .chunks_exact_mut(PAGE_SZ) .map(|chunk| { // SAFETY: Each chunk has `PAGE_SZ` (8192) bytes, greater than 512, still aligned. let buf = unsafe { IoPageSlice::new_unchecked(chunk.try_into().unwrap()) }; Slot { inner: tokio::sync::RwLock::new(SlotInner { key: None, buf, permit: std::sync::Mutex::new(Weak::new()), }), usage_count: AtomicU8::new(0), } }) .collect(); Self { immutable_page_map: Default::default(), slots, next_evict_slot: AtomicUsize::new(0), size_metrics, pinned_slots: Arc::new(tokio::sync::Semaphore::new(num_pages)), } } } trait PageSzBytesMetric { fn set_page_sz(&self, count: usize); fn add_page_sz(&self, count: usize); fn sub_page_sz(&self, count: usize); } #[inline(always)] fn count_times_page_sz(count: usize) -> u64 { u64::try_from(count).unwrap() * u64::try_from(PAGE_SZ).unwrap() } impl PageSzBytesMetric for metrics::UIntGauge { fn set_page_sz(&self, count: usize) { self.set(count_times_page_sz(count)); } fn add_page_sz(&self, count: usize) { self.add(count_times_page_sz(count)); } fn sub_page_sz(&self, count: usize) { self.sub(count_times_page_sz(count)); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/pgdatadir_mapping.rs
pageserver/src/pgdatadir_mapping.rs
//! //! This provides an abstraction to store PostgreSQL relations and other files //! in the key-value store that implements the Repository interface. //! //! (TODO: The line between PUT-functions here and walingest.rs is a bit blurry, as //! walingest.rs handles a few things like implicit relation creation and extension. //! Clarify that) //! use std::collections::{BTreeSet, HashMap, HashSet, hash_map}; use std::ops::{ControlFlow, Range}; use std::sync::Arc; use crate::walingest::{WalIngestError, WalIngestErrorKind}; use crate::{PERF_TRACE_TARGET, ensure_walingest}; use anyhow::Context; use bytes::{Buf, Bytes, BytesMut}; use enum_map::Enum; use pageserver_api::key::{ AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, CompactKey, DBDIR_KEY, Key, RelDirExists, TWOPHASEDIR_KEY, dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range, rel_size_to_key, rel_tag_sparse_key, rel_tag_sparse_key_range, relmap_file_key, repl_origin_key, repl_origin_key_range, slru_block_to_key, slru_dir_to_key, slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range, }; use pageserver_api::keyspace::{KeySpaceRandomAccum, SparseKeySpace}; use pageserver_api::models::RelSizeMigration; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; use postgres_ffi::{BLCKSZ, PgMajorVersion, TransactionId}; use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi_types::{Oid, RepOriginId, TimestampTz}; use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; use tracing::{debug, info, info_span, trace, warn}; use utils::bin_ser::{BeSer, DeserializeError}; use utils::lsn::Lsn; use utils::pausable_failpoint; use wal_decoder::models::record::NeonWalRecord; use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta}; use super::tenant::{PageReconstructError, Timeline}; use crate::aux_file; use crate::context::{PerfInstrumentFutureExt, RequestContext, RequestContextBuilder}; use crate::keyspace::{KeySpace, KeySpaceAccum}; use crate::metrics::{ RELSIZE_CACHE_MISSES_OLD, RELSIZE_LATEST_CACHE_ENTRIES, RELSIZE_LATEST_CACHE_HITS, RELSIZE_LATEST_CACHE_MISSES, RELSIZE_SNAPSHOT_CACHE_ENTRIES, RELSIZE_SNAPSHOT_CACHE_HITS, RELSIZE_SNAPSHOT_CACHE_MISSES, }; use crate::span::{ debug_assert_current_span_has_tenant_and_timeline_id, debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id, }; use crate::tenant::storage_layer::IoConcurrency; use crate::tenant::timeline::{GetVectoredError, VersionedKeySpaceQuery}; /// Max delta records appended to the AUX_FILES_KEY (for aux v1). The write path will write a full image once this threshold is reached. pub const MAX_AUX_FILE_DELTAS: usize = 1024; /// Max number of aux-file-related delta layers. The compaction will create a new image layer once this threshold is reached. pub const MAX_AUX_FILE_V2_DELTAS: usize = 16; #[derive(Debug)] pub enum LsnForTimestamp { /// Found commits both before and after the given timestamp Present(Lsn), /// Found no commits after the given timestamp, this means /// that the newest data in the branch is older than the given /// timestamp. /// /// All commits <= LSN happened before the given timestamp Future(Lsn), /// The queried timestamp is past our horizon we look back at (PITR) /// /// All commits > LSN happened after the given timestamp, /// but any commits < LSN might have happened before or after /// the given timestamp. We don't know because no data before /// the given lsn is available. Past(Lsn), /// We have found no commit with a timestamp, /// so we can't return anything meaningful. /// /// The associated LSN is the lower bound value we can safely /// create branches on, but no statement is made if it is /// older or newer than the timestamp. /// /// This variant can e.g. be returned right after a /// cluster import. NoData(Lsn), } /// Each request to page server contains LSN range: `not_modified_since..request_lsn`. /// See comments libs/pageserver_api/src/models.rs. /// Based on this range and `last_record_lsn` PS calculates `effective_lsn`. /// But to distinguish requests from primary and replicas we need also to pass `request_lsn`. #[derive(Debug, Clone, Copy, Default)] pub struct LsnRange { pub effective_lsn: Lsn, pub request_lsn: Lsn, } impl LsnRange { pub fn at(lsn: Lsn) -> LsnRange { LsnRange { effective_lsn: lsn, request_lsn: lsn, } } pub fn is_latest(&self) -> bool { self.request_lsn == Lsn::MAX } } #[derive(Debug, thiserror::Error)] pub(crate) enum CalculateLogicalSizeError { #[error("cancelled")] Cancelled, /// Something went wrong while reading the metadata we use to calculate logical size /// Note that cancellation variants of `PageReconstructError` are transformed to [`Self::Cancelled`] /// in the `From` implementation for this variant. #[error(transparent)] PageRead(PageReconstructError), /// Something went wrong deserializing metadata that we read to calculate logical size #[error("decode error: {0}")] Decode(#[from] DeserializeError), } #[derive(Debug, thiserror::Error)] pub(crate) enum CollectKeySpaceError { #[error(transparent)] Decode(#[from] DeserializeError), #[error(transparent)] PageRead(PageReconstructError), #[error("cancelled")] Cancelled, } impl CollectKeySpaceError { pub(crate) fn is_cancel(&self) -> bool { match self { CollectKeySpaceError::Decode(_) => false, CollectKeySpaceError::PageRead(e) => e.is_cancel(), CollectKeySpaceError::Cancelled => true, } } pub(crate) fn into_anyhow(self) -> anyhow::Error { match self { CollectKeySpaceError::Decode(e) => anyhow::Error::new(e), CollectKeySpaceError::PageRead(e) => anyhow::Error::new(e), CollectKeySpaceError::Cancelled => anyhow::Error::new(self), } } } impl From<PageReconstructError> for CollectKeySpaceError { fn from(err: PageReconstructError) -> Self { match err { PageReconstructError::Cancelled => Self::Cancelled, err => Self::PageRead(err), } } } impl From<PageReconstructError> for CalculateLogicalSizeError { fn from(pre: PageReconstructError) -> Self { match pre { PageReconstructError::Cancelled => Self::Cancelled, _ => Self::PageRead(pre), } } } #[derive(Debug, thiserror::Error)] pub enum RelationError { #[error("invalid relnode")] InvalidRelnode, } /// /// This impl provides all the functionality to store PostgreSQL relations, SLRUs, /// and other special kinds of files, in a versioned key-value store. The /// Timeline struct provides the key-value store. /// /// This is a separate impl, so that we can easily include all these functions in a Timeline /// implementation, and might be moved into a separate struct later. impl Timeline { /// Start ingesting a WAL record, or other atomic modification of /// the timeline. /// /// This provides a transaction-like interface to perform a bunch /// of modifications atomically. /// /// To ingest a WAL record, call begin_modification(lsn) to get a /// DatadirModification object. Use the functions in the object to /// modify the repository state, updating all the pages and metadata /// that the WAL record affects. When you're done, call commit() to /// commit the changes. /// /// Lsn stored in modification is advanced by `ingest_record` and /// is used by `commit()` to update `last_record_lsn`. /// /// Calling commit() will flush all the changes and reset the state, /// so the `DatadirModification` struct can be reused to perform the next modification. /// /// Note that any pending modifications you make through the /// modification object won't be visible to calls to the 'get' and list /// functions of the timeline until you finish! And if you update the /// same page twice, the last update wins. /// pub fn begin_modification(&self, lsn: Lsn) -> DatadirModification where Self: Sized, { DatadirModification { tline: self, pending_lsns: Vec::new(), pending_metadata_pages: HashMap::new(), pending_data_batch: None, pending_deletions: Vec::new(), pending_nblocks: 0, pending_directory_entries: Vec::new(), pending_metadata_bytes: 0, is_importing_pgdata: false, lsn, } } pub fn begin_modification_for_import(&self, lsn: Lsn) -> DatadirModification where Self: Sized, { DatadirModification { tline: self, pending_lsns: Vec::new(), pending_metadata_pages: HashMap::new(), pending_data_batch: None, pending_deletions: Vec::new(), pending_nblocks: 0, pending_directory_entries: Vec::new(), pending_metadata_bytes: 0, is_importing_pgdata: true, lsn, } } //------------------------------------------------------------------------------ // Public GET functions //------------------------------------------------------------------------------ /// Look up given page version. pub(crate) async fn get_rel_page_at_lsn( &self, tag: RelTag, blknum: BlockNumber, version: Version<'_>, ctx: &RequestContext, io_concurrency: IoConcurrency, ) -> Result<Bytes, PageReconstructError> { match version { Version::LsnRange(lsns) => { let pages: smallvec::SmallVec<[_; 1]> = smallvec::smallvec![(tag, blknum)]; let res = self .get_rel_page_at_lsn_batched( pages .iter() .map(|(tag, blknum)| (tag, blknum, lsns, ctx.attached_child())), io_concurrency.clone(), ctx, ) .await; assert_eq!(res.len(), 1); res.into_iter().next().unwrap() } Version::Modified(modification) => { if tag.relnode == 0 { return Err(PageReconstructError::Other( RelationError::InvalidRelnode.into(), )); } let nblocks = self.get_rel_size(tag, version, ctx).await?; if blknum >= nblocks { debug!( "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page", tag, blknum, version.get_lsn(), nblocks ); return Ok(ZERO_PAGE.clone()); } let key = rel_block_to_key(tag, blknum); modification.get(key, ctx).await } } } /// Like [`Self::get_rel_page_at_lsn`], but returns a batch of pages. /// /// The ordering of the returned vec corresponds to the ordering of `pages`. /// /// NB: the read path must be cancellation-safe. The Tonic gRPC service will drop the future /// if the client goes away (e.g. due to timeout or cancellation). /// TODO: verify that it actually is cancellation-safe. pub(crate) async fn get_rel_page_at_lsn_batched( &self, pages: impl ExactSizeIterator<Item = (&RelTag, &BlockNumber, LsnRange, RequestContext)>, io_concurrency: IoConcurrency, ctx: &RequestContext, ) -> Vec<Result<Bytes, PageReconstructError>> { debug_assert_current_span_has_tenant_and_timeline_id(); let mut slots_filled = 0; let page_count = pages.len(); // Would be nice to use smallvec here but it doesn't provide the spare_capacity_mut() API. let mut result = Vec::with_capacity(pages.len()); let result_slots = result.spare_capacity_mut(); let mut keys_slots: HashMap<Key, smallvec::SmallVec<[(usize, RequestContext); 1]>> = HashMap::with_capacity(pages.len()); let mut req_keyspaces: HashMap<Lsn, KeySpaceRandomAccum> = HashMap::with_capacity(pages.len()); for (response_slot_idx, (tag, blknum, lsns, ctx)) in pages.enumerate() { if tag.relnode == 0 { result_slots[response_slot_idx].write(Err(PageReconstructError::Other( RelationError::InvalidRelnode.into(), ))); slots_filled += 1; continue; } let lsn = lsns.effective_lsn; let nblocks = { let ctx = RequestContextBuilder::from(&ctx) .perf_span(|crnt_perf_span| { info_span!( target: PERF_TRACE_TARGET, parent: crnt_perf_span, "GET_REL_SIZE", reltag=%tag, lsn=%lsn, ) }) .attached_child(); match self .get_rel_size(*tag, Version::LsnRange(lsns), &ctx) .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone()) .await { Ok(nblocks) => nblocks, Err(err) => { result_slots[response_slot_idx].write(Err(err)); slots_filled += 1; continue; } } }; if *blknum >= nblocks { debug!( "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page", tag, blknum, lsn, nblocks ); result_slots[response_slot_idx].write(Ok(ZERO_PAGE.clone())); slots_filled += 1; continue; } let key = rel_block_to_key(*tag, *blknum); let ctx = RequestContextBuilder::from(&ctx) .perf_span(|crnt_perf_span| { info_span!( target: PERF_TRACE_TARGET, parent: crnt_perf_span, "GET_BATCH", batch_size = %page_count, ) }) .attached_child(); let key_slots = keys_slots.entry(key).or_default(); key_slots.push((response_slot_idx, ctx)); let acc = req_keyspaces.entry(lsn).or_default(); acc.add_key(key); } let query: Vec<(Lsn, KeySpace)> = req_keyspaces .into_iter() .map(|(lsn, acc)| (lsn, acc.to_keyspace())) .collect(); let query = VersionedKeySpaceQuery::scattered(query); let res = self .get_vectored(query, io_concurrency, ctx) .maybe_perf_instrument(ctx, |current_perf_span| current_perf_span.clone()) .await; match res { Ok(results) => { for (key, res) in results { let mut key_slots = keys_slots.remove(&key).unwrap().into_iter(); let (first_slot, first_req_ctx) = key_slots.next().unwrap(); for (slot, req_ctx) in key_slots { let clone = match &res { Ok(buf) => Ok(buf.clone()), Err(err) => Err(match err { PageReconstructError::Cancelled => PageReconstructError::Cancelled, x @ PageReconstructError::Other(_) | x @ PageReconstructError::AncestorLsnTimeout(_) | x @ PageReconstructError::WalRedo(_) | x @ PageReconstructError::MissingKey(_) => { PageReconstructError::Other(anyhow::anyhow!( "there was more than one request for this key in the batch, error logged once: {x:?}" )) } }), }; result_slots[slot].write(clone); // There is no standardized way to express that the batched span followed from N request spans. // So, abuse the system and mark the request contexts as follows_from the batch span, so we get // some linkage in our trace viewer. It allows us to answer: which GET_VECTORED did this GET_PAGE wait for. req_ctx.perf_follows_from(ctx); slots_filled += 1; } result_slots[first_slot].write(res); first_req_ctx.perf_follows_from(ctx); slots_filled += 1; } } Err(err) => { // this cannot really happen because get_vectored only errors globally on invalid LSN or too large batch size // (We enforce the max batch size outside of this function, in the code that constructs the batch request.) for (slot, req_ctx) in keys_slots.values().flatten() { // this whole `match` is a lot like `From<GetVectoredError> for PageReconstructError` // but without taking ownership of the GetVectoredError let err = match &err { GetVectoredError::Cancelled => Err(PageReconstructError::Cancelled), // TODO: restructure get_vectored API to make this error per-key GetVectoredError::MissingKey(err) => { Err(PageReconstructError::Other(anyhow::anyhow!( "whole vectored get request failed because one or more of the requested keys were missing: {err:?}" ))) } // TODO: restructure get_vectored API to make this error per-key GetVectoredError::GetReadyAncestorError(err) => { Err(PageReconstructError::Other(anyhow::anyhow!( "whole vectored get request failed because one or more key required ancestor that wasn't ready: {err:?}" ))) } // TODO: restructure get_vectored API to make this error per-key GetVectoredError::Other(err) => Err(PageReconstructError::Other( anyhow::anyhow!("whole vectored get request failed: {err:?}"), )), // TODO: we can prevent this error class by moving this check into the type system GetVectoredError::InvalidLsn(e) => { Err(anyhow::anyhow!("invalid LSN: {e:?}").into()) } // NB: this should never happen in practice because we limit batch size to be smaller than max_get_vectored_keys // TODO: we can prevent this error class by moving this check into the type system GetVectoredError::Oversized(err, max) => { Err(anyhow::anyhow!("batching oversized: {err} > {max}").into()) } }; req_ctx.perf_follows_from(ctx); result_slots[*slot].write(err); } slots_filled += keys_slots.values().map(|slots| slots.len()).sum::<usize>(); } }; assert_eq!(slots_filled, page_count); // SAFETY: // 1. `result` and any of its uninint members are not read from until this point // 2. The length below is tracked at run-time and matches the number of requested pages. unsafe { result.set_len(page_count); } result } /// Get size of a database in blocks. This is only accurate on shard 0. It will undercount on /// other shards, by only accounting for relations the shard has pages for, and only accounting /// for pages up to the highest page number it has stored. pub(crate) async fn get_db_size( &self, spcnode: Oid, dbnode: Oid, version: Version<'_>, ctx: &RequestContext, ) -> Result<usize, PageReconstructError> { let mut total_blocks = 0; let rels = self.list_rels(spcnode, dbnode, version, ctx).await?; if rels.is_empty() { return Ok(0); } // Pre-deserialize the rel directory to avoid duplicated work in `get_relsize_cached`. let reldir_key = rel_dir_to_key(spcnode, dbnode); let buf = version.get(self, reldir_key, ctx).await?; let reldir = RelDirectory::des(&buf)?; for rel in rels { let n_blocks = self .get_rel_size_in_reldir(rel, version, Some((reldir_key, &reldir)), false, ctx) .await? .expect("allow_missing=false"); total_blocks += n_blocks as usize; } Ok(total_blocks) } /// Get size of a relation file. The relation must exist, otherwise an error is returned. /// /// This is only accurate on shard 0. On other shards, it will return the size up to the highest /// page number stored in the shard. pub(crate) async fn get_rel_size( &self, tag: RelTag, version: Version<'_>, ctx: &RequestContext, ) -> Result<BlockNumber, PageReconstructError> { Ok(self .get_rel_size_in_reldir(tag, version, None, false, ctx) .await? .expect("allow_missing=false")) } /// Get size of a relation file. If `allow_missing` is true, returns None for missing relations, /// otherwise errors. /// /// INVARIANT: never returns None if `allow_missing=false`. /// /// See [`Self::get_rel_exists_in_reldir`] on why we need `deserialized_reldir_v1`. pub(crate) async fn get_rel_size_in_reldir( &self, tag: RelTag, version: Version<'_>, deserialized_reldir_v1: Option<(Key, &RelDirectory)>, allow_missing: bool, ctx: &RequestContext, ) -> Result<Option<BlockNumber>, PageReconstructError> { if tag.relnode == 0 { return Err(PageReconstructError::Other( RelationError::InvalidRelnode.into(), )); } if let Some(nblocks) = self.get_cached_rel_size(&tag, version) { return Ok(Some(nblocks)); } if allow_missing && !self .get_rel_exists_in_reldir(tag, version, deserialized_reldir_v1, ctx) .await? { return Ok(None); } if (tag.forknum == FSM_FORKNUM || tag.forknum == VISIBILITYMAP_FORKNUM) && !self .get_rel_exists_in_reldir(tag, version, deserialized_reldir_v1, ctx) .await? { // FIXME: Postgres sometimes calls smgrcreate() to create // FSM, and smgrnblocks() on it immediately afterwards, // without extending it. Tolerate that by claiming that // any non-existent FSM fork has size 0. return Ok(Some(0)); } let key = rel_size_to_key(tag); let mut buf = version.get(self, key, ctx).await?; let nblocks = buf.get_u32_le(); self.update_cached_rel_size(tag, version, nblocks); Ok(Some(nblocks)) } /// Does the relation exist? /// /// Only shard 0 has a full view of the relations. Other shards only know about relations that /// the shard stores pages for. /// pub(crate) async fn get_rel_exists( &self, tag: RelTag, version: Version<'_>, ctx: &RequestContext, ) -> Result<bool, PageReconstructError> { self.get_rel_exists_in_reldir(tag, version, None, ctx).await } async fn get_rel_exists_in_reldir_v1( &self, tag: RelTag, version: Version<'_>, deserialized_reldir_v1: Option<(Key, &RelDirectory)>, ctx: &RequestContext, ) -> Result<bool, PageReconstructError> { let key = rel_dir_to_key(tag.spcnode, tag.dbnode); if let Some((cached_key, dir)) = deserialized_reldir_v1 { if cached_key == key { return Ok(dir.rels.contains(&(tag.relnode, tag.forknum))); } else if cfg!(test) || cfg!(feature = "testing") { panic!("cached reldir key mismatch: {cached_key} != {key}"); } else { warn!("cached reldir key mismatch: {cached_key} != {key}"); } // Fallback to reading the directory from the datadir. } let buf = version.get(self, key, ctx).await?; let dir = RelDirectory::des(&buf)?; Ok(dir.rels.contains(&(tag.relnode, tag.forknum))) } async fn get_rel_exists_in_reldir_v2( &self, tag: RelTag, version: Version<'_>, ctx: &RequestContext, ) -> Result<bool, PageReconstructError> { let key = rel_tag_sparse_key(tag.spcnode, tag.dbnode, tag.relnode, tag.forknum); let buf = RelDirExists::decode_option(version.sparse_get(self, key, ctx).await?).map_err( |_| { PageReconstructError::Other(anyhow::anyhow!( "invalid reldir key: decode failed, {}", key )) }, )?; let exists_v2 = buf == RelDirExists::Exists; Ok(exists_v2) } /// Does the relation exist? With a cached deserialized `RelDirectory`. /// /// There are some cases where the caller loops across all relations. In that specific case, /// the caller should obtain the deserialized `RelDirectory` first and then call this function /// to avoid duplicated work of deserliazation. This is a hack and should be removed by introducing /// a new API (e.g., `get_rel_exists_batched`). pub(crate) async fn get_rel_exists_in_reldir( &self, tag: RelTag, version: Version<'_>, deserialized_reldir_v1: Option<(Key, &RelDirectory)>, ctx: &RequestContext, ) -> Result<bool, PageReconstructError> { if tag.relnode == 0 { return Err(PageReconstructError::Other( RelationError::InvalidRelnode.into(), )); } // first try to lookup relation in cache if let Some(_nblocks) = self.get_cached_rel_size(&tag, version) { return Ok(true); } // then check if the database was already initialized. // get_rel_exists can be called before dbdir is created. let buf = version.get(self, DBDIR_KEY, ctx).await?; let dbdirs = DbDirectory::des(&buf)?.dbdirs; if !dbdirs.contains_key(&(tag.spcnode, tag.dbnode)) { return Ok(false); } let (v2_status, migrated_lsn) = self.get_rel_size_v2_status(); match v2_status { RelSizeMigration::Legacy => { let v1_exists = self .get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx) .await?; Ok(v1_exists) } RelSizeMigration::Migrating | RelSizeMigration::Migrated if version.get_lsn() < migrated_lsn.unwrap_or(Lsn(0)) => { // For requests below the migrated LSN, we still use the v1 read path. let v1_exists = self .get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx) .await?; Ok(v1_exists) } RelSizeMigration::Migrating => { let v1_exists = self .get_rel_exists_in_reldir_v1(tag, version, deserialized_reldir_v1, ctx) .await?; let v2_exists_res = self.get_rel_exists_in_reldir_v2(tag, version, ctx).await; match v2_exists_res { Ok(v2_exists) if v1_exists == v2_exists => {} Ok(v2_exists) => { tracing::warn!( "inconsistent v1/v2 reldir keyspace for rel {}: v1_exists={}, v2_exists={}", tag, v1_exists, v2_exists ); } Err(e) => { tracing::warn!("failed to get rel exists in v2: {e}"); } } Ok(v1_exists) } RelSizeMigration::Migrated => { let v2_exists = self.get_rel_exists_in_reldir_v2(tag, version, ctx).await?; Ok(v2_exists) } } } async fn list_rels_v1( &self, spcnode: Oid, dbnode: Oid, version: Version<'_>, ctx: &RequestContext, ) -> Result<HashSet<RelTag>, PageReconstructError> { let key = rel_dir_to_key(spcnode, dbnode); let buf = version.get(self, key, ctx).await?; let dir = RelDirectory::des(&buf)?; let rels_v1: HashSet<RelTag> = HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag { spcnode, dbnode, relnode: *relnode, forknum: *forknum, })); Ok(rels_v1) } async fn list_rels_v2( &self, spcnode: Oid, dbnode: Oid, version: Version<'_>, ctx: &RequestContext, ) -> Result<HashSet<RelTag>, PageReconstructError> { let key_range = rel_tag_sparse_key_range(spcnode, dbnode); let io_concurrency = IoConcurrency::spawn_from_conf( self.conf.get_vectored_concurrent_io, self.gate .enter() .map_err(|_| PageReconstructError::Cancelled)?, ); let results = self .scan( KeySpace::single(key_range), version.get_lsn(), ctx, io_concurrency, ) .await?; let mut rels = HashSet::new(); for (key, val) in results { let val = RelDirExists::decode(&val?).map_err(|_| { PageReconstructError::Other(anyhow::anyhow!( "invalid reldir key: decode failed, {}", key )) })?; if key.field6 != 1 { return Err(PageReconstructError::Other(anyhow::anyhow!( "invalid reldir key: field6 != 1, {}", key ))); } if key.field2 != spcnode { return Err(PageReconstructError::Other(anyhow::anyhow!( "invalid reldir key: field2 != spcnode, {}", key ))); } if key.field3 != dbnode { return Err(PageReconstructError::Other(anyhow::anyhow!( "invalid reldir key: field3 != dbnode, {}", key ))); } let tag = RelTag { spcnode, dbnode, relnode: key.field4, forknum: key.field5, }; if val == RelDirExists::Removed { debug_assert!(!rels.contains(&tag), "removed reltag in v2"); continue; } let did_not_contain = rels.insert(tag); debug_assert!(did_not_contain, "duplicate reltag in v2"); } Ok(rels) } /// Get a list of all existing relations in given tablespace and database. /// /// Only shard 0 has a full view of the relations. Other shards only know about relations that /// the shard stores pages for. /// /// # Cancel-Safety /// /// This method is cancellation-safe.
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/config.rs
pageserver/src/tenant/config.rs
//! Functions for handling per-tenant configuration options //! //! If tenant is created with --config option, //! the tenant-specific config will be stored in tenant's directory. //! Otherwise, global pageserver's config is used. //! //! If the tenant config file is corrupted, the tenant will be disabled. //! We cannot use global or default config instead, because wrong settings //! may lead to a data loss. //! use pageserver_api::models; use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}; use serde::{Deserialize, Serialize}; use utils::critical; use utils::generation::Generation; #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] pub(crate) enum AttachmentMode { /// Our generation is current as far as we know, and as far as we know we are the only attached /// pageserver. This is the "normal" attachment mode. Single, /// Our generation number is current as far as we know, but we are advised that another /// pageserver is still attached, and therefore to avoid executing deletions. This is /// the attachment mode of a pagesever that is the destination of a migration. Multi, /// Our generation number is superseded, or about to be superseded. We are advised /// to avoid remote storage writes if possible, and to avoid sending billing data. This /// is the attachment mode of a pageserver that is the origin of a migration. Stale, } #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] pub(crate) struct AttachedLocationConfig { pub(crate) generation: Generation, pub(crate) attach_mode: AttachmentMode, // TODO: add a flag to override AttachmentMode's policies under // disk pressure (i.e. unblock uploads under disk pressure in Stale // state, unblock deletions after timeout in Multi state) } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub(crate) struct SecondaryLocationConfig { /// If true, keep the local cache warm by polling remote storage pub(crate) warm: bool, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub(crate) enum LocationMode { Attached(AttachedLocationConfig), Secondary(SecondaryLocationConfig), } /// Per-tenant, per-pageserver configuration. All pageservers use the same TenantConf, /// but have distinct LocationConf. #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub(crate) struct LocationConf { /// The location-specific part of the configuration, describes the operating /// mode of this pageserver for this tenant. pub(crate) mode: LocationMode, /// The detailed shard identity. This structure is already scoped within /// a TenantShardId, but we need the full ShardIdentity to enable calculating /// key->shard mappings. /// /// NB: we store this even for unsharded tenants, so that we agree with storcon on the intended /// stripe size. Otherwise, a split request that does not specify a stripe size may use a /// different default than storcon, which can lead to incorrect stripe sizes and corruption. pub(crate) shard: ShardIdentity, /// The pan-cluster tenant configuration, the same on all locations pub(crate) tenant_conf: pageserver_api::models::TenantConfig, } impl std::fmt::Debug for LocationConf { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.mode { LocationMode::Attached(conf) => { write!( f, "Attached {:?}, gen={:?}", conf.attach_mode, conf.generation ) } LocationMode::Secondary(conf) => { write!(f, "Secondary, warm={}", conf.warm) } } } } impl AttachedLocationConfig { /// Consult attachment mode to determine whether we are currently permitted /// to delete layers. This is only advisory, not required for data safety. /// See [`AttachmentMode`] for more context. pub(crate) fn may_delete_layers_hint(&self) -> bool { // TODO: add an override for disk pressure in AttachedLocationConfig, // and respect it here. match &self.attach_mode { AttachmentMode::Single => true, AttachmentMode::Multi | AttachmentMode::Stale => { // In Multi mode we avoid doing deletions because some other // attached pageserver might get 404 while trying to read // a layer we delete which is still referenced in their metadata. // // In Stale mode, we avoid doing deletions because we expect // that they would ultimately fail validation in the deletion // queue due to our stale generation. false } } } /// Whether we are currently hinted that it is worthwhile to upload layers. /// This is only advisory, not required for data safety. /// See [`AttachmentMode`] for more context. pub(crate) fn may_upload_layers_hint(&self) -> bool { // TODO: add an override for disk pressure in AttachedLocationConfig, // and respect it here. match &self.attach_mode { AttachmentMode::Single | AttachmentMode::Multi => true, AttachmentMode::Stale => { // In Stale mode, we avoid doing uploads because we expect that // our replacement pageserver will already have started its own // IndexPart that will never reference layers we upload: it is // wasteful. false } } } } impl LocationConf { /// For use when loading from a legacy configuration: presence of a tenant /// implies it is in AttachmentMode::Single, which used to be the only /// possible state. This function should eventually be removed. pub(crate) fn attached_single( tenant_conf: pageserver_api::models::TenantConfig, generation: Generation, shard_params: models::ShardParameters, ) -> Self { Self { mode: LocationMode::Attached(AttachedLocationConfig { generation, attach_mode: AttachmentMode::Single, }), shard: ShardIdentity::from_params(ShardNumber(0), shard_params), tenant_conf, } } /// For use when attaching/re-attaching: update the generation stored in this /// structure. If we were in a secondary state, promote to attached (posession /// of a fresh generation implies this). pub(crate) fn attach_in_generation( &mut self, mode: AttachmentMode, generation: Generation, stripe_size: ShardStripeSize, ) { match &mut self.mode { LocationMode::Attached(attach_conf) => { attach_conf.generation = generation; attach_conf.attach_mode = mode; } LocationMode::Secondary(_) => { // We are promoted to attached by the control plane's re-attach response self.mode = LocationMode::Attached(AttachedLocationConfig { generation, attach_mode: mode, }) } } // This should never happen. // TODO: turn this into a proper assertion. if stripe_size != self.shard.stripe_size { critical!( "stripe size mismatch: {} != {}", self.shard.stripe_size, stripe_size, ); } self.shard.stripe_size = stripe_size; } pub(crate) fn try_from(conf: &'_ models::LocationConfig) -> anyhow::Result<Self> { let tenant_conf = conf.tenant_conf.clone(); fn get_generation(conf: &'_ models::LocationConfig) -> Result<Generation, anyhow::Error> { conf.generation .map(Generation::new) .ok_or_else(|| anyhow::anyhow!("Generation must be set when attaching")) } let mode = match &conf.mode { models::LocationConfigMode::AttachedMulti => { LocationMode::Attached(AttachedLocationConfig { generation: get_generation(conf)?, attach_mode: AttachmentMode::Multi, }) } models::LocationConfigMode::AttachedSingle => { LocationMode::Attached(AttachedLocationConfig { generation: get_generation(conf)?, attach_mode: AttachmentMode::Single, }) } models::LocationConfigMode::AttachedStale => { LocationMode::Attached(AttachedLocationConfig { generation: get_generation(conf)?, attach_mode: AttachmentMode::Stale, }) } models::LocationConfigMode::Secondary => { anyhow::ensure!(conf.generation.is_none()); let warm = conf .secondary_conf .as_ref() .map(|c| c.warm) .unwrap_or(false); LocationMode::Secondary(SecondaryLocationConfig { warm }) } models::LocationConfigMode::Detached => { // Should not have been called: API code should translate this mode // into a detach rather than trying to decode it as a LocationConf return Err(anyhow::anyhow!("Cannot decode a Detached configuration")); } }; let shard = if conf.shard_count == 0 { // NB: carry over the persisted stripe size instead of using the default. This doesn't // matter for most practical purposes, since unsharded tenants don't use the stripe // size, but can cause inconsistencies between storcon and Pageserver and cause manual // splits without `new_stripe_size` to use an unintended stripe size. ShardIdentity::unsharded_with_stripe_size(ShardStripeSize(conf.shard_stripe_size)) } else { ShardIdentity::new( ShardNumber(conf.shard_number), ShardCount::new(conf.shard_count), ShardStripeSize(conf.shard_stripe_size), )? }; Ok(Self { shard, mode, tenant_conf, }) } } impl Default for LocationConf { // TODO: this should be removed once tenant loading can guarantee that we are never // loading from a directory without a configuration. // => tech debt since https://github.com/neondatabase/neon/issues/1555 fn default() -> Self { Self { mode: LocationMode::Attached(AttachedLocationConfig { generation: Generation::none(), attach_mode: AttachmentMode::Single, }), tenant_conf: pageserver_api::models::TenantConfig::default(), shard: ShardIdentity::unsharded(), } } } #[cfg(test)] mod tests { #[test] fn serde_roundtrip_tenant_conf_opt() { let small_conf = pageserver_api::models::TenantConfig { gc_horizon: Some(42), ..Default::default() }; let toml_form = toml_edit::ser::to_string(&small_conf).unwrap(); assert_eq!(toml_form, "gc_horizon = 42\n"); assert_eq!(small_conf, toml_edit::de::from_str(&toml_form).unwrap()); let json_form = serde_json::to_string(&small_conf).unwrap(); assert_eq!(json_form, "{\"gc_horizon\":42}"); assert_eq!(small_conf, serde_json::from_str(&json_form).unwrap()); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer.rs
pageserver/src/tenant/storage_layer.rs
//! Common traits and structs for layers pub mod batch_split_writer; pub mod delta_layer; pub mod errors; pub mod filter_iterator; pub mod image_layer; pub mod inmemory_layer; pub(crate) mod layer; mod layer_desc; mod layer_name; pub mod merge_iterator; use std::cmp::Ordering; use std::collections::hash_map::Entry; use std::collections::{BinaryHeap, HashMap}; use std::ops::Range; use std::pin::Pin; use std::sync::Arc; use std::sync::atomic::AtomicUsize; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use crate::PERF_TRACE_TARGET; pub use batch_split_writer::{BatchLayerWriter, SplitDeltaLayerWriter, SplitImageLayerWriter}; use bytes::Bytes; pub use delta_layer::{DeltaLayer, DeltaLayerWriter, ValueRef}; use futures::StreamExt; use futures::stream::FuturesUnordered; pub use image_layer::{ImageLayer, ImageLayerWriter}; pub use inmemory_layer::InMemoryLayer; pub(crate) use layer::{EvictionError, Layer, ResidentLayer}; pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey}; pub use layer_name::{DeltaLayerName, ImageLayerName, LayerName}; use pageserver_api::config::GetVectoredConcurrentIo; use pageserver_api::key::Key; use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum}; use tracing::{Instrument, info_span, trace}; use utils::lsn::Lsn; use utils::sync::gate::GateGuard; use wal_decoder::models::record::NeonWalRecord; use wal_decoder::models::value::Value; use self::inmemory_layer::InMemoryLayerFileId; use super::PageReconstructError; use super::layer_map::InMemoryLayerDesc; use super::timeline::{GetVectoredError, ReadPath}; use crate::context::{ AccessStatsBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder, }; pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool where T: PartialOrd<T>, { if a.start < b.start { a.end > b.start } else { b.end > a.start } } /// Struct used to communicate across calls to 'get_value_reconstruct_data'. /// /// Before first call, you can fill in 'page_img' if you have an older cached /// version of the page available. That can save work in /// 'get_value_reconstruct_data', as it can stop searching for page versions /// when all the WAL records going back to the cached image have been collected. /// /// When get_value_reconstruct_data returns Complete, 'img' is set to an image /// of the page, or the oldest WAL record in 'records' is a will_init-type /// record that initializes the page without requiring a previous image. /// /// If 'get_page_reconstruct_data' returns Continue, some 'records' may have /// been collected, but there are more records outside the current layer. Pass /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data' /// call, to collect more records. /// #[derive(Debug, Default, Clone)] pub(crate) struct ValueReconstructState { pub(crate) records: Vec<(Lsn, NeonWalRecord)>, pub(crate) img: Option<(Lsn, Bytes)>, } impl ValueReconstructState { /// Returns the number of page deltas applied to the page image. pub fn num_deltas(&self) -> usize { match self.img { Some(_) => self.records.len(), None => self.records.len() - 1, // omit will_init record } } } #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub(crate) enum ValueReconstructSituation { Complete, #[default] Continue, } /// On disk representation of a value loaded in a buffer #[derive(Debug)] pub(crate) enum OnDiskValue { /// Unencoded [`Value::Image`] RawImage(Bytes), /// Encoded [`Value`]. Can deserialize into an image or a WAL record WalRecordOrImage(Bytes), } /// Reconstruct data accumulated for a single key during a vectored get #[derive(Debug, Default)] pub struct VectoredValueReconstructState { pub(crate) on_disk_values: Vec<(Lsn, OnDiskValueIoWaiter)>, pub(crate) situation: ValueReconstructSituation, } #[derive(Debug)] pub(crate) struct OnDiskValueIoWaiter { rx: tokio::sync::oneshot::Receiver<OnDiskValueIoResult>, } #[derive(Debug)] #[must_use] pub(crate) enum OnDiskValueIo { /// Traversal identified this IO as required to complete the vectored get. Required { num_active_ios: Arc<AtomicUsize>, tx: tokio::sync::oneshot::Sender<OnDiskValueIoResult>, }, /// Sparse keyspace reads always read all the values for a given key, /// even though only the first value is needed. /// /// This variant represents the unnecessary IOs for those values at lower LSNs /// that aren't needed, but are currently still being done. /// /// The execution of unnecessary IOs was a pre-existing behavior before concurrent IO. /// We added this explicit representation here so that we can drop /// unnecessary IO results immediately, instead of buffering them in /// `oneshot` channels inside [`VectoredValueReconstructState`] until /// [`VectoredValueReconstructState::collect_pending_ios`] gets called. Unnecessary, } type OnDiskValueIoResult = Result<OnDiskValue, std::io::Error>; impl OnDiskValueIo { pub(crate) fn complete(self, res: OnDiskValueIoResult) { match self { OnDiskValueIo::Required { num_active_ios, tx } => { num_active_ios.fetch_sub(1, std::sync::atomic::Ordering::Release); let _ = tx.send(res); } OnDiskValueIo::Unnecessary => { // Nobody cared, see variant doc comment. } } } } #[derive(Debug, thiserror::Error)] pub(crate) enum WaitCompletionError { #[error("OnDiskValueIo was dropped without completing, likely the sidecar task panicked")] IoDropped, } impl OnDiskValueIoWaiter { pub(crate) async fn wait_completion(self) -> Result<OnDiskValueIoResult, WaitCompletionError> { // NB: for Unnecessary IOs, this method never gets called because we don't add them to `on_disk_values`. self.rx.await.map_err(|_| WaitCompletionError::IoDropped) } } impl VectoredValueReconstructState { /// # Cancel-Safety /// /// Technically fine to stop polling this future, but, the IOs will still /// be executed to completion by the sidecar task and hold on to / consume resources. /// Better not do it to make reasonsing about the system easier. pub(crate) async fn collect_pending_ios( self, ) -> Result<ValueReconstructState, PageReconstructError> { use utils::bin_ser::BeSer; let mut res = Ok(ValueReconstructState::default()); // We should try hard not to bail early, so that by the time we return from this // function, all IO for this value is done. It's not required -- we could totally // stop polling the IO futures in the sidecar task, they need to support that, // but just stopping to poll doesn't reduce the IO load on the disk. It's easier // to reason about the system if we just wait for all IO to complete, even if // we're no longer interested in the result. // // Revisit this when IO futures are replaced with a more sophisticated IO system // and an IO scheduler, where we know which IOs were submitted and which ones // just queued. Cf the comment on IoConcurrency::spawn_io. for (lsn, waiter) in self.on_disk_values { let value_recv_res = waiter .wait_completion() // we rely on the caller to poll us to completion, so this is not a bail point .await; // Force not bailing early by wrapping the code into a closure. #[allow(clippy::redundant_closure_call)] let _: () = (|| { match (&mut res, value_recv_res) { (Err(_), _) => { // We've already failed, no need to process more. } (Ok(_), Err(wait_err)) => { // This shouldn't happen - likely the sidecar task panicked. res = Err(PageReconstructError::Other(wait_err.into())); } (Ok(_), Ok(Err(err))) => { let err: std::io::Error = err; // TODO: returning IO error here will fail a compute query. // Probably not what we want, we're not doing `maybe_fatal_err` // in the IO futures. // But it's been like that for a long time, not changing it // as part of concurrent IO. // => https://github.com/neondatabase/neon/issues/10454 res = Err(PageReconstructError::Other(err.into())); } (Ok(ok), Ok(Ok(OnDiskValue::RawImage(img)))) => { assert!(ok.img.is_none()); ok.img = Some((lsn, img)); } (Ok(ok), Ok(Ok(OnDiskValue::WalRecordOrImage(buf)))) => { match Value::des(&buf) { Ok(Value::WalRecord(rec)) => { ok.records.push((lsn, rec)); } Ok(Value::Image(img)) => { assert!(ok.img.is_none()); ok.img = Some((lsn, img)); } Err(err) => { res = Err(PageReconstructError::Other(err.into())); } } } } })(); } res } /// Benchmarking utility to await for the completion of all pending ios /// /// # Cancel-Safety /// /// Technically fine to stop polling this future, but, the IOs will still /// be executed to completion by the sidecar task and hold on to / consume resources. /// Better not do it to make reasonsing about the system easier. #[cfg(feature = "benchmarking")] pub async fn sink_pending_ios(self) -> Result<(), std::io::Error> { let mut res = Ok(()); // We should try hard not to bail early, so that by the time we return from this // function, all IO for this value is done. It's not required -- we could totally // stop polling the IO futures in the sidecar task, they need to support that, // but just stopping to poll doesn't reduce the IO load on the disk. It's easier // to reason about the system if we just wait for all IO to complete, even if // we're no longer interested in the result. // // Revisit this when IO futures are replaced with a more sophisticated IO system // and an IO scheduler, where we know which IOs were submitted and which ones // just queued. Cf the comment on IoConcurrency::spawn_io. for (_lsn, waiter) in self.on_disk_values { let value_recv_res = waiter .wait_completion() // we rely on the caller to poll us to completion, so this is not a bail point .await; match (&mut res, value_recv_res) { (Err(_), _) => { // We've already failed, no need to process more. } (Ok(_), Err(_wait_err)) => { // This shouldn't happen - likely the sidecar task panicked. unreachable!(); } (Ok(_), Ok(Err(err))) => { let err: std::io::Error = err; res = Err(err); } (Ok(_ok), Ok(Ok(OnDiskValue::RawImage(_img)))) => {} (Ok(_ok), Ok(Ok(OnDiskValue::WalRecordOrImage(_buf)))) => {} } } res } } /// Bag of data accumulated during a vectored get.. pub struct ValuesReconstructState { /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline` /// should not expect to get anything from this hashmap. pub keys: HashMap<Key, VectoredValueReconstructState>, /// The keys which are already retrieved keys_done: KeySpaceRandomAccum, /// The keys covered by the image layers keys_with_image_coverage: Option<Range<Key>>, // Statistics that are still accessible as a caller of `get_vectored_impl`. layers_visited: u32, delta_layers_visited: u32, pub(crate) enable_debug: bool, pub(crate) debug_state: ValueReconstructState, pub(crate) io_concurrency: IoConcurrency, num_active_ios: Arc<AtomicUsize>, pub(crate) read_path: Option<ReadPath>, } /// The level of IO concurrency to be used on the read path /// /// The desired end state is that we always do parallel IO. /// This struct and the dispatching in the impl will be removed once /// we've built enough confidence. pub enum IoConcurrency { Sequential, SidecarTask { task_id: usize, ios_tx: tokio::sync::mpsc::UnboundedSender<IoFuture>, }, } type IoFuture = Pin<Box<dyn Send + Future<Output = ()>>>; pub(crate) enum SelectedIoConcurrency { Sequential, SidecarTask(GateGuard), } impl std::fmt::Debug for IoConcurrency { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { IoConcurrency::Sequential => write!(f, "Sequential"), IoConcurrency::SidecarTask { .. } => write!(f, "SidecarTask"), } } } impl std::fmt::Debug for SelectedIoConcurrency { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { SelectedIoConcurrency::Sequential => write!(f, "Sequential"), SelectedIoConcurrency::SidecarTask(_) => write!(f, "SidecarTask"), } } } impl IoConcurrency { /// Force sequential IO. This is a temporary workaround until we have /// moved plumbing-through-the-call-stack /// of IoConcurrency into `RequestContextq. /// /// DO NOT USE for new code. /// /// Tracking issue: <https://github.com/neondatabase/neon/issues/10460>. pub(crate) fn sequential() -> Self { Self::spawn(SelectedIoConcurrency::Sequential) } pub fn spawn_from_conf(conf: GetVectoredConcurrentIo, gate_guard: GateGuard) -> IoConcurrency { let selected = match conf { GetVectoredConcurrentIo::Sequential => SelectedIoConcurrency::Sequential, GetVectoredConcurrentIo::SidecarTask => SelectedIoConcurrency::SidecarTask(gate_guard), }; Self::spawn(selected) } pub(crate) fn spawn(io_concurrency: SelectedIoConcurrency) -> Self { match io_concurrency { SelectedIoConcurrency::Sequential => IoConcurrency::Sequential, SelectedIoConcurrency::SidecarTask(gate_guard) => { let (ios_tx, ios_rx) = tokio::sync::mpsc::unbounded_channel(); static TASK_ID: AtomicUsize = AtomicUsize::new(0); let task_id = TASK_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed); // TODO: enrich the span with more context (tenant,shard,timeline) + (basebackup|pagestream|...) let span = tracing::info_span!(parent: None, "IoConcurrency_sidecar", task_id = task_id); trace!(task_id, "spawning sidecar task"); tokio::spawn(async move { trace!("start"); scopeguard::defer!{ trace!("end") }; type IosRx = tokio::sync::mpsc::UnboundedReceiver<IoFuture>; enum State { Waiting { // invariant: is_empty(), but we recycle the allocation empty_futures: FuturesUnordered<IoFuture>, ios_rx: IosRx, }, Executing { futures: FuturesUnordered<IoFuture>, ios_rx: IosRx, }, ShuttingDown { futures: FuturesUnordered<IoFuture>, }, } let mut state = State::Waiting { empty_futures: FuturesUnordered::new(), ios_rx, }; loop { match state { State::Waiting { empty_futures, mut ios_rx, } => { assert!(empty_futures.is_empty()); tokio::select! { fut = ios_rx.recv() => { if let Some(fut) = fut { trace!("received new io future"); empty_futures.push(fut); state = State::Executing { futures: empty_futures, ios_rx }; } else { state = State::ShuttingDown { futures: empty_futures } } } } } State::Executing { mut futures, mut ios_rx, } => { tokio::select! { res = futures.next() => { trace!("io future completed"); assert!(res.is_some()); if futures.is_empty() { state = State::Waiting { empty_futures: futures, ios_rx}; } else { state = State::Executing { futures, ios_rx }; } } fut = ios_rx.recv() => { if let Some(fut) = fut { trace!("received new io future"); futures.push(fut); state = State::Executing { futures, ios_rx}; } else { state = State::ShuttingDown { futures }; } } } } State::ShuttingDown { mut futures, } => { trace!("shutting down"); while let Some(()) = futures.next().await { trace!("io future completed (shutdown)"); // drain } trace!("shutdown complete"); break; } } } drop(gate_guard); // drop it right before we exit }.instrument(span)); IoConcurrency::SidecarTask { task_id, ios_tx } } } } /// Submit an IO to be executed in the background. DEADLOCK RISK, read the full doc string. /// /// The IO is represented as an opaque future. /// IO completion must be handled inside the future, e.g., through a oneshot channel. /// /// The API seems simple but there are multiple **pitfalls** involving /// DEADLOCK RISK. /// /// First, there are no guarantees about the exexecution of the IO. /// It may be `await`ed in-place before this function returns. /// It may be polled partially by this task and handed off to another task to be finished. /// It may be polled and then dropped before returning ready. /// /// This means that submitted IOs must not be interedependent. /// Interdependence may be through shared limited resources, e.g., /// - VirtualFile file descriptor cache slot acquisition /// - tokio-epoll-uring slot /// /// # Why current usage is safe from deadlocks /// /// Textbook condition for a deadlock is that _all_ of the following be given /// - Mutual exclusion /// - Hold and wait /// - No preemption /// - Circular wait /// /// The current usage is safe because: /// - Mutual exclusion: IO futures definitely use mutexes, no way around that for now /// - Hold and wait: IO futures currently hold two kinds of locks/resources while waiting /// for acquisition of other resources: /// - VirtualFile file descriptor cache slot tokio mutex /// - tokio-epoll-uring slot (uses tokio notify => wait queue, much like mutex) /// - No preemption: there's no taking-away of acquired locks/resources => given /// - Circular wait: this is the part of the condition that isn't met: all IO futures /// first acquire VirtualFile mutex, then tokio-epoll-uring slot. /// There is no IO future that acquires slot before VirtualFile. /// Hence there can be no circular waiting. /// Hence there cannot be a deadlock. /// /// This is a very fragile situation and must be revisited whenver any code called from /// inside the IO futures is changed. /// /// We will move away from opaque IO futures towards well-defined IOs at some point in /// the future when we have shipped this first version of concurrent IO to production /// and are ready to retire the Sequential mode which runs the futures in place. /// Right now, while brittle, the opaque IO approach allows us to ship the feature /// with minimal changes to the code and minimal changes to existing behavior in Sequential mode. /// /// Also read the comment in `collect_pending_ios`. pub(crate) async fn spawn_io<F>(&mut self, fut: F) where F: std::future::Future<Output = ()> + Send + 'static, { match self { IoConcurrency::Sequential => fut.await, IoConcurrency::SidecarTask { ios_tx, .. } => { let fut = Box::pin(fut); // NB: experiments showed that doing an opportunistic poll of `fut` here was bad for throughput // while insignificant for latency. // It would make sense to revisit the tokio-epoll-uring API in the future such that we can try // a submission here, but never poll the future. That way, io_uring can make proccess while // the future sits in the ios_tx queue. match ios_tx.send(fut) { Ok(()) => {} Err(_) => { unreachable!("the io task must have exited, likely it panicked") } } } } } #[cfg(test)] pub(crate) fn spawn_for_test() -> impl std::ops::DerefMut<Target = Self> { use std::ops::{Deref, DerefMut}; use tracing::info; use utils::sync::gate::Gate; // Spawn needs a Gate, give it one. struct Wrapper { inner: IoConcurrency, #[allow(dead_code)] gate: Box<Gate>, } impl Deref for Wrapper { type Target = IoConcurrency; fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for Wrapper { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } let gate = Box::new(Gate::default()); // The default behavior when running Rust unit tests without any further // flags is to use the new behavior. // The CI uses the following environment variable to unit test both old // and new behavior. // NB: the Python regression & perf tests take the `else` branch // below and have their own defaults management. let selected = { // The pageserver_api::config type is unsuitable because it's internally tagged. #[derive(serde::Deserialize)] #[serde(rename_all = "kebab-case")] enum TestOverride { Sequential, SidecarTask, } use once_cell::sync::Lazy; static TEST_OVERRIDE: Lazy<TestOverride> = Lazy::new(|| { utils::env::var_serde_json_string( "NEON_PAGESERVER_UNIT_TEST_GET_VECTORED_CONCURRENT_IO", ) .unwrap_or(TestOverride::SidecarTask) }); match *TEST_OVERRIDE { TestOverride::Sequential => SelectedIoConcurrency::Sequential, TestOverride::SidecarTask => { SelectedIoConcurrency::SidecarTask(gate.enter().expect("just created it")) } } }; info!(?selected, "get_vectored_concurrent_io test"); Wrapper { inner: Self::spawn(selected), gate, } } } impl Clone for IoConcurrency { fn clone(&self) -> Self { match self { IoConcurrency::Sequential => IoConcurrency::Sequential, IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask { task_id: *task_id, ios_tx: ios_tx.clone(), }, } } } /// Make noise in case the [`ValuesReconstructState`] gets dropped while /// there are still IOs in flight. /// Refer to `collect_pending_ios` for why we prefer not to do that. // /// We log from here instead of from the sidecar task because the [`ValuesReconstructState`] /// gets dropped in a tracing span with more context. /// We repeat the sidecar tasks's `task_id` so we can correlate what we emit here with /// the logs / panic handler logs from the sidecar task, which also logs the `task_id`. impl Drop for ValuesReconstructState { fn drop(&mut self) { let num_active_ios = self .num_active_ios .load(std::sync::atomic::Ordering::Acquire); if num_active_ios == 0 { return; } let sidecar_task_id = match &self.io_concurrency { IoConcurrency::Sequential => None, IoConcurrency::SidecarTask { task_id, .. } => Some(*task_id), }; tracing::warn!( num_active_ios, ?sidecar_task_id, backtrace=%std::backtrace::Backtrace::force_capture(), "dropping ValuesReconstructState while some IOs have not been completed", ); } } impl ValuesReconstructState { pub fn new(io_concurrency: IoConcurrency) -> Self { Self { keys: HashMap::new(), keys_done: KeySpaceRandomAccum::new(), keys_with_image_coverage: None, layers_visited: 0, delta_layers_visited: 0, io_concurrency, enable_debug: false, debug_state: ValueReconstructState::default(), num_active_ios: Arc::new(AtomicUsize::new(0)), read_path: None, } } pub(crate) fn new_with_debug(io_concurrency: IoConcurrency) -> Self { Self { keys: HashMap::new(), keys_done: KeySpaceRandomAccum::new(), keys_with_image_coverage: None, layers_visited: 0, delta_layers_visited: 0, io_concurrency, enable_debug: true, debug_state: ValueReconstructState::default(), num_active_ios: Arc::new(AtomicUsize::new(0)), read_path: None, } } /// Absolutely read [`IoConcurrency::spawn_io`] to learn about assumptions & pitfalls. pub(crate) async fn spawn_io<F>(&mut self, fut: F) where F: std::future::Future<Output = ()> + Send + 'static, { self.io_concurrency.spawn_io(fut).await; } pub(crate) fn set_debug_state(&mut self, debug_state: &ValueReconstructState) { if self.enable_debug { self.debug_state = debug_state.clone(); } } pub(crate) fn on_layer_visited(&mut self, layer: &ReadableLayer) { self.layers_visited += 1; if let ReadableLayer::PersistentLayer(layer) = layer { if layer.layer_desc().is_delta() { self.delta_layers_visited += 1; } } } pub(crate) fn get_delta_layers_visited(&self) -> u32 { self.delta_layers_visited } pub(crate) fn get_layers_visited(&self) -> u32 { self.layers_visited } /// On hitting image layer, we can mark all keys in this range as done, because /// if the image layer does not contain a key, it is deleted/never added. pub(crate) fn on_image_layer_visited(&mut self, key_range: &Range<Key>) { let prev_val = self.keys_with_image_coverage.replace(key_range.clone()); assert_eq!( prev_val, None, "should consume the keyspace before the next iteration" ); } /// Update the state collected for a given key. /// Returns true if this was the last value needed for the key and false otherwise. /// /// If the key is done after the update, mark it as such. /// /// If the key is in the sparse keyspace (i.e., aux files), we do not track them in /// `key_done`. // TODO: rename this method & update description. pub(crate) fn update_key(&mut self, key: &Key, lsn: Lsn, completes: bool) -> OnDiskValueIo { let state = self.keys.entry(*key).or_default(); let is_sparse_key = key.is_sparse(); let required_io = match state.situation { ValueReconstructSituation::Complete => { if is_sparse_key { // Sparse keyspace might be visited multiple times because // we don't track unmapped keyspaces. return OnDiskValueIo::Unnecessary; } else { unreachable!() } } ValueReconstructSituation::Continue => { self.num_active_ios .fetch_add(1, std::sync::atomic::Ordering::Release); let (tx, rx) = tokio::sync::oneshot::channel(); state.on_disk_values.push((lsn, OnDiskValueIoWaiter { rx })); OnDiskValueIo::Required { tx, num_active_ios: Arc::clone(&self.num_active_ios), } } }; if completes && state.situation == ValueReconstructSituation::Continue { state.situation = ValueReconstructSituation::Complete; if !is_sparse_key { self.keys_done.add_key(*key); } } required_io } /// Returns the key space describing the keys that have /// been marked as completed since the last call to this function. /// Returns individual keys done, and the image layer coverage. pub(crate) fn consume_done_keys(&mut self) -> (KeySpace, Option<Range<Key>>) { ( self.keys_done.consume_keyspace(), self.keys_with_image_coverage.take(), ) } } /// A key that uniquely identifies a layer in a timeline #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub(crate) enum LayerId { PersitentLayerId(PersistentLayerKey), InMemoryLayerId(InMemoryLayerFileId), } /// Uniquely identify a layer visit by the layer /// and LSN range of the reads. Note that the end of the range is exclusive. /// /// The layer itself is not enough since we may have different LSN lower /// bounds for delta layer reads. Scenarios where this can happen are: /// /// 1. Layer overlaps: imagine an image layer inside and in-memory layer /// and a query that only partially hits the image layer. Part of the query /// needs to read the whole in-memory layer and the other part needs to read /// only up to the image layer. Hence, they'll have different LSN floor values /// for the read. /// /// 2. Scattered reads: the read path supports starting at different LSNs. Imagine /// The start LSN for one range is inside a layer and the start LSN for another range /// Is above the layer (includes all of it). Both ranges need to read the layer all the
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/throttle.rs
pageserver/src/tenant/throttle.rs
use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::Instant; use arc_swap::ArcSwap; use utils::leaky_bucket::{LeakyBucketConfig, RateLimiter}; /// Throttle for `async` functions. /// /// Runtime reconfigurable. /// /// To share a throttle among multiple entities, wrap it in an [`Arc`]. /// /// The intial use case for this is tenant-wide throttling of getpage@lsn requests. pub struct Throttle { inner: ArcSwap<Inner>, /// will be turned into [`Stats::count_accounted_start`] count_accounted_start: AtomicU64, /// will be turned into [`Stats::count_accounted_finish`] count_accounted_finish: AtomicU64, /// will be turned into [`Stats::count_throttled`] count_throttled: AtomicU64, /// will be turned into [`Stats::sum_throttled_usecs`] sum_throttled_usecs: AtomicU64, } pub struct Inner { enabled: bool, rate_limiter: Arc<RateLimiter>, } pub type Config = pageserver_api::models::ThrottleConfig; /// See [`Throttle::reset_stats`]. pub struct Stats { /// Number of requests that started [`Throttle::throttle`] calls. pub count_accounted_start: u64, /// Number of requests that finished [`Throttle::throttle`] calls. pub count_accounted_finish: u64, /// Subset of the `accounted` requests that were actually throttled. /// Note that the numbers are stored as two independent atomics, so, there might be a slight drift. pub count_throttled: u64, /// Sum of microseconds that throttled requests spent waiting for throttling. pub sum_throttled_usecs: u64, } pub enum ThrottleResult { NotThrottled { end: Instant }, Throttled { end: Instant }, } impl Throttle { pub fn new(config: Config) -> Self { Self { inner: ArcSwap::new(Arc::new(Self::new_inner(config))), count_accounted_start: AtomicU64::new(0), count_accounted_finish: AtomicU64::new(0), count_throttled: AtomicU64::new(0), sum_throttled_usecs: AtomicU64::new(0), } } fn new_inner(config: Config) -> Inner { let Config { enabled, initial, refill_interval, refill_amount, max, } = config; // steady rate, we expect `refill_amount` requests per `refill_interval`. // dividing gives us the rps. let rps = f64::from(refill_amount.get()) / refill_interval.as_secs_f64(); let config = LeakyBucketConfig::new(rps, f64::from(max)); // initial tracks how many tokens are available to put in the bucket // we want how many tokens are currently in the bucket let initial_tokens = max - initial; let rate_limiter = RateLimiter::with_initial_tokens(config, f64::from(initial_tokens)); Inner { enabled: enabled.is_enabled(), rate_limiter: Arc::new(rate_limiter), } } pub fn reconfigure(&self, config: Config) { self.inner.store(Arc::new(Self::new_inner(config))); } /// The [`Throttle`] keeps an internal flag that is true if there was ever any actual throttling. /// This method allows retrieving & resetting that flag. /// Useful for periodic reporting. pub fn reset_stats(&self) -> Stats { let count_accounted_start = self.count_accounted_start.swap(0, Ordering::Relaxed); let count_accounted_finish = self.count_accounted_finish.swap(0, Ordering::Relaxed); let count_throttled = self.count_throttled.swap(0, Ordering::Relaxed); let sum_throttled_usecs = self.sum_throttled_usecs.swap(0, Ordering::Relaxed); Stats { count_accounted_start, count_accounted_finish, count_throttled, sum_throttled_usecs, } } /// See [`Config::steady_rps`]. pub fn steady_rps(&self) -> f64 { self.inner.load().rate_limiter.steady_rps() } /// `start` must be [`Instant::now`] or earlier. pub async fn throttle(&self, key_count: usize, start: Instant) -> ThrottleResult { let inner = self.inner.load_full(); // clones the `Inner` Arc if !inner.enabled { return ThrottleResult::NotThrottled { end: start }; } self.count_accounted_start.fetch_add(1, Ordering::Relaxed); let did_throttle = inner.rate_limiter.acquire(key_count).await; self.count_accounted_finish.fetch_add(1, Ordering::Relaxed); if did_throttle { self.count_throttled.fetch_add(1, Ordering::Relaxed); let end = Instant::now(); let wait_time = end - start; self.sum_throttled_usecs .fetch_add(wait_time.as_micros() as u64, Ordering::Relaxed); ThrottleResult::Throttled { end } } else { ThrottleResult::NotThrottled { end: start } } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/tasks.rs
pageserver/src/tenant/tasks.rs
//! This module contains per-tenant background processes, e.g. compaction and GC. use std::cmp::max; use std::future::Future; use std::ops::{ControlFlow, RangeInclusive}; use std::pin::pin; use std::sync::Arc; use std::time::{Duration, Instant}; use once_cell::sync::Lazy; use pageserver_api::config::tenant_conf_defaults::DEFAULT_COMPACTION_PERIOD; use rand::Rng; use scopeguard::defer; use tokio::sync::{Semaphore, SemaphorePermit}; use tokio_util::sync::CancellationToken; use tracing::*; use utils::backoff::exponential_backoff_duration; use utils::completion::Barrier; use utils::pausable_failpoint; use crate::context::{DownloadBehavior, RequestContext}; use crate::metrics::{self, BackgroundLoopSemaphoreMetricsRecorder, TENANT_TASK_EVENTS}; use crate::task_mgr::{self, BACKGROUND_RUNTIME, TOKIO_WORKER_THREADS, TaskKind}; use crate::tenant::throttle::Stats; use crate::tenant::timeline::CompactionError; use crate::tenant::timeline::compaction::CompactionOutcome; use crate::tenant::{TenantShard, TenantState}; /// Semaphore limiting concurrent background tasks (across all tenants). /// /// We use 3/4 Tokio threads, to avoid blocking all threads in case we do any CPU-heavy work. static CONCURRENT_BACKGROUND_TASKS: Lazy<Semaphore> = Lazy::new(|| { let total_threads = TOKIO_WORKER_THREADS.get(); /*BEGIN_HADRON*/ // ideally we should run at least one compaction task per tenant in order to (1) maximize // compaction throughput (2) avoid head-of-line blocking of large compactions. However doing // that may create too many compaction tasks with lots of memory overheads. So we limit the // number of compaction tasks based on the available CPU core count. // Need to revisit. // let tasks_per_thread = std::env::var("BG_TASKS_PER_THREAD") // .ok() // .and_then(|s| s.parse().ok()) // .unwrap_or(4); // let permits = usize::max(1, total_threads * tasks_per_thread); // // assert!(permits < total_threads, "need threads for other work"); /*END_HADRON*/ let permits = max(1, (total_threads * 3).checked_div(4).unwrap_or(0)); assert_ne!(permits, 0, "we will not be adding in permits later"); assert!(permits < total_threads, "need threads for other work"); Semaphore::new(permits) }); /// Semaphore limiting concurrent L0 compaction tasks (across all tenants). This is only used if /// both `compaction_l0_semaphore` and `compaction_l0_first` are enabled. /// /// This is a separate semaphore from background tasks, because L0 compaction needs to be responsive /// to avoid high read amp during heavy write workloads. Regular image/GC compaction is less /// important (e.g. due to page images in delta layers) and can wait for other background tasks. /// /// We use 3/4 Tokio threads, to avoid blocking all threads in case we do any CPU-heavy work. Note /// that this runs on the same Tokio runtime as `CONCURRENT_BACKGROUND_TASKS`, and shares the same /// thread pool. static CONCURRENT_L0_COMPACTION_TASKS: Lazy<Semaphore> = Lazy::new(|| { let total_threads = TOKIO_WORKER_THREADS.get(); let permits = max(1, (total_threads * 3).checked_div(4).unwrap_or(0)); assert_ne!(permits, 0, "we will not be adding in permits later"); assert!(permits < total_threads, "need threads for other work"); Semaphore::new(permits) }); /// Background jobs. /// /// NB: not all of these acquire a CONCURRENT_BACKGROUND_TASKS semaphore permit, only the ones that /// do any significant IO or CPU work. #[derive( Debug, PartialEq, Eq, Clone, Copy, strum_macros::IntoStaticStr, strum_macros::Display, enum_map::Enum, )] #[strum(serialize_all = "snake_case")] pub(crate) enum BackgroundLoopKind { /// L0Compaction runs as a separate pass within the Compaction loop, not a separate loop. It is /// used to request the `CONCURRENT_L0_COMPACTION_TASKS` semaphore and associated metrics. L0Compaction, Compaction, Gc, Eviction, TenantHouseKeeping, ConsumptionMetricsCollectMetrics, ConsumptionMetricsSyntheticSizeWorker, InitialLogicalSizeCalculation, HeatmapUpload, SecondaryDownload, } pub struct BackgroundLoopSemaphorePermit<'a> { _permit: SemaphorePermit<'static>, _recorder: BackgroundLoopSemaphoreMetricsRecorder<'a>, } /// Acquires a semaphore permit, to limit concurrent background jobs. pub(crate) async fn acquire_concurrency_permit( loop_kind: BackgroundLoopKind, _ctx: &RequestContext, ) -> BackgroundLoopSemaphorePermit<'static> { let mut recorder = metrics::BACKGROUND_LOOP_SEMAPHORE.record(loop_kind); if loop_kind == BackgroundLoopKind::InitialLogicalSizeCalculation { pausable_failpoint!("initial-size-calculation-permit-pause"); } // TODO: assert that we run on BACKGROUND_RUNTIME; requires tokio_unstable Handle::id(); let semaphore = match loop_kind { BackgroundLoopKind::L0Compaction => &CONCURRENT_L0_COMPACTION_TASKS, _ => &CONCURRENT_BACKGROUND_TASKS, }; let permit = semaphore.acquire().await.expect("should never close"); recorder.acquired(); BackgroundLoopSemaphorePermit { _permit: permit, _recorder: recorder, } } /// Start per tenant background loops: compaction, GC, and ingest housekeeping. pub fn start_background_loops(tenant: &Arc<TenantShard>, can_start: Option<&Barrier>) { let tenant_shard_id = tenant.tenant_shard_id; task_mgr::spawn( BACKGROUND_RUNTIME.handle(), TaskKind::Compaction, tenant_shard_id, None, &format!("compactor for tenant {tenant_shard_id}"), { let tenant = Arc::clone(tenant); let can_start = can_start.cloned(); async move { let cancel = task_mgr::shutdown_token(); // NB: must be in async context tokio::select! { _ = cancel.cancelled() => return Ok(()), _ = Barrier::maybe_wait(can_start) => {} }; TENANT_TASK_EVENTS.with_label_values(&["start"]).inc(); defer!(TENANT_TASK_EVENTS.with_label_values(&["stop"]).inc()); compaction_loop(tenant, cancel) // If you rename this span, change the RUST_LOG env variable in test_runner/performance/test_branch_creation.py .instrument(info_span!("compaction_loop", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug())) .await; Ok(()) } }, ); task_mgr::spawn( BACKGROUND_RUNTIME.handle(), TaskKind::GarbageCollector, tenant_shard_id, None, &format!("garbage collector for tenant {tenant_shard_id}"), { let tenant = Arc::clone(tenant); let can_start = can_start.cloned(); async move { let cancel = task_mgr::shutdown_token(); // NB: must be in async context tokio::select! { _ = cancel.cancelled() => return Ok(()), _ = Barrier::maybe_wait(can_start) => {} }; TENANT_TASK_EVENTS.with_label_values(&["start"]).inc(); defer!(TENANT_TASK_EVENTS.with_label_values(&["stop"]).inc()); gc_loop(tenant, cancel) .instrument(info_span!("gc_loop", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug())) .await; Ok(()) } }, ); task_mgr::spawn( BACKGROUND_RUNTIME.handle(), TaskKind::TenantHousekeeping, tenant_shard_id, None, &format!("housekeeping for tenant {tenant_shard_id}"), { let tenant = Arc::clone(tenant); let can_start = can_start.cloned(); async move { let cancel = task_mgr::shutdown_token(); // NB: must be in async context tokio::select! { _ = cancel.cancelled() => return Ok(()), _ = Barrier::maybe_wait(can_start) => {} }; TENANT_TASK_EVENTS.with_label_values(&["start"]).inc(); defer!(TENANT_TASK_EVENTS.with_label_values(&["stop"]).inc()); tenant_housekeeping_loop(tenant, cancel) .instrument(info_span!("tenant_housekeeping_loop", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug())) .await; Ok(()) } }, ); } /// Compaction task's main loop. async fn compaction_loop(tenant: Arc<TenantShard>, cancel: CancellationToken) { const BASE_BACKOFF_SECS: f64 = 1.0; const MAX_BACKOFF_SECS: f64 = 300.0; const RECHECK_CONFIG_INTERVAL: Duration = Duration::from_secs(10); let ctx = RequestContext::todo_child(TaskKind::Compaction, DownloadBehavior::Download); let mut period = tenant.get_compaction_period(); let mut error_run = 0; // consecutive errors // Stagger the compaction loop across tenants. if wait_for_active_tenant(&tenant, &cancel).await.is_break() { return; } if sleep_random(period, &cancel).await.is_err() { return; } loop { // Recheck that we're still active. if wait_for_active_tenant(&tenant, &cancel).await.is_break() { return; } // Refresh the period. If compaction is disabled, check again in a bit. period = tenant.get_compaction_period(); if period == Duration::ZERO { #[cfg(not(feature = "testing"))] info!("automatic compaction is disabled"); tokio::select! { _ = tokio::time::sleep(RECHECK_CONFIG_INTERVAL) => {}, _ = cancel.cancelled() => return, } continue; } // Wait for the next compaction run. let backoff = exponential_backoff_duration(error_run, BASE_BACKOFF_SECS, MAX_BACKOFF_SECS); tokio::select! { _ = tokio::time::sleep(backoff), if error_run > 0 => {}, _ = tokio::time::sleep(period), if error_run == 0 => {}, _ = tenant.l0_compaction_trigger.notified(), if error_run == 0 => {}, _ = cancel.cancelled() => return, } // Run compaction. let iteration = Iteration { started_at: Instant::now(), period, kind: BackgroundLoopKind::Compaction, }; let IterationResult { output, elapsed } = iteration .run(tenant.compaction_iteration(&cancel, &ctx)) .await; match output { Ok(outcome) => { error_run = 0; // If there's more compaction work, L0 or not, schedule an immediate run. match outcome { CompactionOutcome::Done => {} CompactionOutcome::Skipped => {} CompactionOutcome::YieldForL0 => tenant.l0_compaction_trigger.notify_one(), CompactionOutcome::Pending => tenant.l0_compaction_trigger.notify_one(), } } Err(err) => { error_run += 1; let backoff = exponential_backoff_duration(error_run, BASE_BACKOFF_SECS, MAX_BACKOFF_SECS); log_compaction_error( &err, Some((error_run, backoff)), cancel.is_cancelled(), false, ); continue; } } // NB: this log entry is recorded by performance tests. debug!( elapsed_ms = elapsed.as_millis(), "compaction iteration complete" ); } } pub(crate) fn log_compaction_error( err: &CompactionError, retry_info: Option<(u32, Duration)>, task_cancelled: bool, degrade_to_warning: bool, ) { let is_cancel = err.is_cancel(); let level = if is_cancel || task_cancelled { Level::INFO } else { Level::ERROR }; if let Some((error_count, sleep_duration)) = retry_info { match level { Level::ERROR => { error!( "Compaction failed {error_count} times, retrying in {sleep_duration:?}: {err:#}" ) } Level::INFO => { info!( "Compaction failed {error_count} times, retrying in {sleep_duration:?}: {err:#}" ) } level => unimplemented!("unexpected level {level:?}"), } } else { match level { Level::ERROR if degrade_to_warning => warn!("Compaction failed and discarded: {err:#}"), Level::ERROR => error!("Compaction failed: {err:?}"), Level::INFO => info!("Compaction failed: {err:#}"), level => unimplemented!("unexpected level {level:?}"), } } } /// GC task's main loop. async fn gc_loop(tenant: Arc<TenantShard>, cancel: CancellationToken) { const MAX_BACKOFF_SECS: f64 = 300.0; let mut error_run = 0; // consecutive errors // GC might require downloading, to find the cutoff LSN that corresponds to the // cutoff specified as time. let ctx = RequestContext::todo_child(TaskKind::GarbageCollector, DownloadBehavior::Download); let mut first = true; loop { if wait_for_active_tenant(&tenant, &cancel).await.is_break() { return; } let period = tenant.get_gc_period(); if first { first = false; if sleep_random(period, &cancel).await.is_err() { break; } } let gc_horizon = tenant.get_gc_horizon(); let sleep_duration; if period == Duration::ZERO || gc_horizon == 0 { #[cfg(not(feature = "testing"))] info!("automatic GC is disabled"); // check again in 10 seconds, in case it's been enabled again. sleep_duration = Duration::from_secs(10); } else { let iteration = Iteration { started_at: Instant::now(), period, kind: BackgroundLoopKind::Gc, }; // Run gc let IterationResult { output, elapsed: _ } = iteration .run(tenant.gc_iteration( None, gc_horizon, tenant.get_pitr_interval(), &cancel, &ctx, )) .await; match output { Ok(_) => { error_run = 0; sleep_duration = period; } Err(crate::tenant::GcError::TenantCancelled) => { return; } Err(e) => { error_run += 1; let wait_duration = exponential_backoff_duration(error_run, 1.0, MAX_BACKOFF_SECS); if matches!(e, crate::tenant::GcError::TimelineCancelled) { // Timeline was cancelled during gc. We might either be in an event // that affects the entire tenant (tenant deletion, pageserver shutdown), // or in one that affects the timeline only (timeline deletion). // Therefore, don't exit the loop. info!("Gc failed {error_run} times, retrying in {wait_duration:?}: {e:?}"); } else { error!("Gc failed {error_run} times, retrying in {wait_duration:?}: {e:?}"); } sleep_duration = wait_duration; } } }; if tokio::time::timeout(sleep_duration, cancel.cancelled()) .await .is_ok() { break; } } } /// Tenant housekeeping's main loop. async fn tenant_housekeeping_loop(tenant: Arc<TenantShard>, cancel: CancellationToken) { let mut last_throttle_flag_reset_at = Instant::now(); loop { if wait_for_active_tenant(&tenant, &cancel).await.is_break() { return; } // Use the same period as compaction; it's not worth a separate setting. But if it's set to // zero (to disable compaction), then use a reasonable default. Jitter it by 5%. let period = match tenant.get_compaction_period() { Duration::ZERO => humantime::parse_duration(DEFAULT_COMPACTION_PERIOD).unwrap(), period => period, }; let Ok(period) = sleep_jitter(period, period * 5 / 100, &cancel).await else { break; }; // Do tenant housekeeping. let iteration = Iteration { started_at: Instant::now(), period, kind: BackgroundLoopKind::TenantHouseKeeping, }; iteration.run(tenant.housekeeping()).await; // Log any getpage throttling. info_span!(parent: None, "pagestream_throttle", tenant_id=%tenant.tenant_shard_id, shard_id=%tenant.tenant_shard_id.shard_slug()).in_scope(|| { let now = Instant::now(); let prev = std::mem::replace(&mut last_throttle_flag_reset_at, now); let Stats { count_accounted_start, count_accounted_finish, count_throttled, sum_throttled_usecs} = tenant.pagestream_throttle.reset_stats(); if count_throttled == 0 { return; } let allowed_rps = tenant.pagestream_throttle.steady_rps(); let delta = now - prev; info!( n_seconds=%format_args!("{:.3}", delta.as_secs_f64()), count_accounted = count_accounted_finish, // don't break existing log scraping count_throttled, sum_throttled_usecs, count_accounted_start, // log after pre-existing fields to not break existing log scraping allowed_rps=%format_args!("{allowed_rps:.0}"), "shard was throttled in the last n_seconds" ); }); } } /// Waits until the tenant becomes active, or returns `ControlFlow::Break()` to shut down. async fn wait_for_active_tenant( tenant: &Arc<TenantShard>, cancel: &CancellationToken, ) -> ControlFlow<()> { if tenant.current_state() == TenantState::Active { return ControlFlow::Continue(()); } let mut update_rx = tenant.subscribe_for_state_updates(); tokio::select! { result = update_rx.wait_for(|s| s == &TenantState::Active) => { if result.is_err() { return ControlFlow::Break(()); } debug!("Tenant state changed to active, continuing the task loop"); ControlFlow::Continue(()) }, _ = cancel.cancelled() => ControlFlow::Break(()), } } #[derive(thiserror::Error, Debug)] #[error("cancelled")] pub(crate) struct Cancelled; /// Sleeps for a random interval up to the given max value. /// /// This delay prevents a thundering herd of background tasks and will likely keep them running on /// different periods for more stable load. pub(crate) async fn sleep_random( max: Duration, cancel: &CancellationToken, ) -> Result<Duration, Cancelled> { sleep_random_range(Duration::ZERO..=max, cancel).await } /// Sleeps for a random interval in the given range. Returns the duration. pub(crate) async fn sleep_random_range( interval: RangeInclusive<Duration>, cancel: &CancellationToken, ) -> Result<Duration, Cancelled> { let delay = rand::rng().random_range(interval); if delay == Duration::ZERO { return Ok(delay); } tokio::select! { _ = cancel.cancelled() => Err(Cancelled), _ = tokio::time::sleep(delay) => Ok(delay), } } /// Sleeps for an interval with a random jitter. pub(crate) async fn sleep_jitter( duration: Duration, jitter: Duration, cancel: &CancellationToken, ) -> Result<Duration, Cancelled> { let from = duration.saturating_sub(jitter); let to = duration.saturating_add(jitter); sleep_random_range(from..=to, cancel).await } struct Iteration { started_at: Instant, period: Duration, kind: BackgroundLoopKind, } struct IterationResult<O> { output: O, elapsed: Duration, } impl Iteration { #[instrument(skip_all)] pub(crate) async fn run<F: Future<Output = O>, O>(self, fut: F) -> IterationResult<O> { let mut fut = pin!(fut); // Wrap `fut` into a future that logs a message every `period` so that we get a // very obvious breadcrumb in the logs _while_ a slow iteration is happening. let output = loop { match tokio::time::timeout(self.period, &mut fut).await { Ok(r) => break r, Err(_) => info!("still running"), } }; let elapsed = self.started_at.elapsed(); warn_when_period_overrun(elapsed, self.period, self.kind); IterationResult { output, elapsed } } } // NB: the `task` and `period` are used for metrics labels. pub(crate) fn warn_when_period_overrun( elapsed: Duration, period: Duration, task: BackgroundLoopKind, ) { // Duration::ZERO will happen because it's the "disable [bgtask]" value. if elapsed >= period && period != Duration::ZERO { // humantime does no significant digits clamping whereas Duration's debug is a bit more // intelligent. however it makes sense to keep the "configuration format" for period, even // though there's no way to output the actual config value. info!( ?elapsed, period = %humantime::format_duration(period), ?task, "task iteration took longer than the configured period" ); metrics::BACKGROUND_LOOP_PERIOD_OVERRUN_COUNT .with_label_values(&[task.into(), &format!("{}", period.as_secs())]) .inc(); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/gc_result.rs
pageserver/src/tenant/gc_result.rs
use std::ops::AddAssign; use std::time::Duration; use anyhow::Result; use serde::Serialize; /// /// Result of performing GC /// #[derive(Default, Serialize, Debug)] pub struct GcResult { pub layers_total: u64, pub layers_needed_by_cutoff: u64, pub layers_needed_by_pitr: u64, pub layers_needed_by_branches: u64, pub layers_needed_by_leases: u64, pub layers_not_updated: u64, pub layers_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files. #[serde(serialize_with = "serialize_duration_as_millis")] pub elapsed: Duration, /// The layers which were garbage collected. /// /// Used in `/v1/tenant/:tenant_id/timeline/:timeline_id/do_gc` to wait for the layers to be /// dropped in tests. #[cfg(feature = "testing")] #[serde(skip)] pub(crate) doomed_layers: Vec<crate::tenant::storage_layer::Layer>, } // helper function for `GcResult`, serializing a `Duration` as an integer number of milliseconds fn serialize_duration_as_millis<S>(d: &Duration, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { d.as_millis().serialize(serializer) } impl AddAssign for GcResult { fn add_assign(&mut self, other: Self) { self.layers_total += other.layers_total; self.layers_needed_by_pitr += other.layers_needed_by_pitr; self.layers_needed_by_cutoff += other.layers_needed_by_cutoff; self.layers_needed_by_branches += other.layers_needed_by_branches; self.layers_needed_by_leases += other.layers_needed_by_leases; self.layers_not_updated += other.layers_not_updated; self.layers_removed += other.layers_removed; self.elapsed += other.elapsed; #[cfg(feature = "testing")] { let mut other = other; self.doomed_layers.append(&mut other.doomed_layers); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/mgr.rs
pageserver/src/tenant/mgr.rs
//! This module acts as a switchboard to access different repositories managed by this //! page server. use std::borrow::Cow; use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap, HashSet}; use std::ops::Deref; use std::sync::Arc; use std::time::Duration; use anyhow::Context; use camino::{Utf8DirEntry, Utf8Path, Utf8PathBuf}; use futures::StreamExt; use itertools::Itertools; use pageserver_api::key::Key; use pageserver_api::models::{DetachBehavior, LocationConfigMode}; use pageserver_api::shard::{ ShardCount, ShardIdentity, ShardIndex, ShardNumber, ShardStripeSize, TenantShardId, }; use pageserver_api::upcall_api::ReAttachResponseTenant; use rand::Rng; use rand::distr::Alphanumeric; use remote_storage::TimeoutOrCancel; use sysinfo::SystemExt; use tokio::fs; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use tracing::*; use utils::crashsafe::path_with_suffix_extension; use utils::fs_ext::PathExt; use utils::generation::Generation; use utils::id::{TenantId, TimelineId}; use utils::{backoff, completion, crashsafe}; use super::remote_timeline_client::remote_tenant_path; use super::secondary::SecondaryTenant; use super::timeline::detach_ancestor::{self, PreparedTimelineDetach}; use super::{GlobalShutDown, TenantSharedResources}; use crate::config::PageServerConf; use crate::context::{DownloadBehavior, RequestContext}; use crate::controller_upcall_client::{ RetryForeverError, StorageControllerUpcallApi, StorageControllerUpcallClient, }; use crate::deletion_queue::DeletionQueueClient; use crate::http::routes::ACTIVE_TENANT_TIMEOUT; use crate::metrics::{LOCAL_DATA_LOSS_SUSPECTED, TENANT, TENANT_MANAGER as METRICS}; use crate::task_mgr::{BACKGROUND_RUNTIME, TaskKind}; use crate::tenant::config::{ AttachedLocationConfig, AttachmentMode, LocationConf, LocationMode, SecondaryLocationConfig, }; use crate::tenant::span::debug_assert_current_span_has_tenant_id; use crate::tenant::storage_layer::inmemory_layer; use crate::tenant::timeline::ShutdownMode; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::{ AttachedTenantConf, GcError, LoadConfigError, SpawnMode, TenantShard, TenantState, }; use crate::virtual_file::MaybeFatalIo; use crate::{InitializationOrder, TEMP_FILE_SUFFIX}; /// For a tenant that appears in TenantsMap, it may either be /// - `Attached`: has a full Tenant object, is elegible to service /// reads and ingest WAL. /// - `Secondary`: is only keeping a local cache warm. /// /// Secondary is a totally distinct state rather than being a mode of a `Tenant`, because /// that way we avoid having to carefully switch a tenant's ingestion etc on and off during /// its lifetime, and we can preserve some important safety invariants like `Tenant` always /// having a properly acquired generation (Secondary doesn't need a generation) #[derive(Clone)] pub(crate) enum TenantSlot { Attached(Arc<TenantShard>), Secondary(Arc<SecondaryTenant>), /// In this state, other administrative operations acting on the TenantId should /// block, or return a retry indicator equivalent to HTTP 503. InProgress(utils::completion::Barrier), } impl std::fmt::Debug for TenantSlot { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Attached(tenant) => write!(f, "Attached({})", tenant.current_state()), Self::Secondary(_) => write!(f, "Secondary"), Self::InProgress(_) => write!(f, "InProgress"), } } } impl TenantSlot { /// Return the `Tenant` in this slot if attached, else None fn get_attached(&self) -> Option<&Arc<TenantShard>> { match self { Self::Attached(t) => Some(t), Self::Secondary(_) => None, Self::InProgress(_) => None, } } } /// The tenants known to the pageserver. /// The enum variants are used to distinguish the different states that the pageserver can be in. pub(crate) enum TenantsMap { /// [`init_tenant_mgr`] is not done yet. Initializing, /// [`init_tenant_mgr`] is done, all on-disk tenants have been loaded. /// New tenants can be added using [`TenantManager::tenant_map_acquire_slot`]. Open(BTreeMap<TenantShardId, TenantSlot>), /// The pageserver has entered shutdown mode via [`TenantManager::shutdown`]. /// Existing tenants are still accessible, but no new tenants can be created. ShuttingDown(BTreeMap<TenantShardId, TenantSlot>), } /// When resolving a TenantId to a shard, we may be looking for the 0th /// shard, or we might be looking for whichever shard holds a particular page. #[derive(Copy, Clone)] pub(crate) enum ShardSelector { /// Only return the 0th shard, if it is present. If a non-0th shard is present, /// ignore it. Zero, /// Pick the shard that holds this key Page(Key), /// The shard ID is known: pick the given shard Known(ShardIndex), } /// A convenience for use with the re_attach ControllerUpcallClient function: rather /// than the serializable struct, we build this enum that encapsulates /// the invariant that attached tenants always have generations. /// /// This represents the subset of a LocationConfig that we receive during re-attach. pub(crate) enum TenantStartupMode { Attached((AttachmentMode, Generation, ShardStripeSize)), Secondary, } impl TenantStartupMode { /// Return the generation & mode that should be used when starting /// this tenant. /// /// If this returns None, the re-attach struct is in an invalid state and /// should be ignored in the response. fn from_reattach_tenant(rart: ReAttachResponseTenant) -> Option<Self> { match (rart.mode, rart.r#gen) { (LocationConfigMode::Detached, _) => None, (LocationConfigMode::Secondary, _) => Some(Self::Secondary), (LocationConfigMode::AttachedMulti, Some(g)) => Some(Self::Attached(( AttachmentMode::Multi, Generation::new(g), rart.stripe_size, ))), (LocationConfigMode::AttachedSingle, Some(g)) => Some(Self::Attached(( AttachmentMode::Single, Generation::new(g), rart.stripe_size, ))), (LocationConfigMode::AttachedStale, Some(g)) => Some(Self::Attached(( AttachmentMode::Stale, Generation::new(g), rart.stripe_size, ))), _ => { tracing::warn!( "Received invalid re-attach state for tenant {}: {rart:?}", rart.id ); None } } } } /// Result type for looking up a TenantId to a specific shard pub(crate) enum ShardResolveResult { NotFound, Found(Arc<TenantShard>), // Wait for this barrrier, then query again InProgress(utils::completion::Barrier), } impl TenantsMap { /// Convenience function for typical usage, where we want to get a `Tenant` object, for /// working with attached tenants. If the TenantId is in the map but in Secondary state, /// None is returned. pub(crate) fn get(&self, tenant_shard_id: &TenantShardId) -> Option<&Arc<TenantShard>> { match self { TenantsMap::Initializing => None, TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => { m.get(tenant_shard_id).and_then(|slot| slot.get_attached()) } } } #[cfg(all(debug_assertions, not(test)))] pub(crate) fn len(&self) -> usize { match self { TenantsMap::Initializing => 0, TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m.len(), } } } /// Precursor to deletion of a tenant dir: we do a fast rename to a tmp path, and then /// the slower actual deletion in the background. /// /// This is "safe" in that that it won't leave behind a partially deleted directory /// at the original path, because we rename with TEMP_FILE_SUFFIX before starting deleting /// the contents. /// /// This is pageserver-specific, as it relies on future processes after a crash to check /// for TEMP_FILE_SUFFIX when loading things. async fn safe_rename_tenant_dir(path: impl AsRef<Utf8Path>) -> std::io::Result<Utf8PathBuf> { let parent = path .as_ref() .parent() // It is invalid to call this function with a relative path. Tenant directories // should always have a parent. .ok_or(std::io::Error::new( std::io::ErrorKind::InvalidInput, "Path must be absolute", ))?; let rand_suffix = rand::rng() .sample_iter(&Alphanumeric) .take(8) .map(char::from) .collect::<String>() + TEMP_FILE_SUFFIX; let tmp_path = path_with_suffix_extension(&path, &rand_suffix); fs::rename(path.as_ref(), &tmp_path).await?; fs::File::open(parent) .await? .sync_all() .await .maybe_fatal_err("safe_rename_tenant_dir")?; Ok(tmp_path) } /// See [`Self::spawn`]. #[derive(Clone, Default)] pub struct BackgroundPurges(tokio_util::task::TaskTracker); impl BackgroundPurges { /// When we have moved a tenant's content to a temporary directory, we may delete it lazily in /// the background, and thereby avoid blocking any API requests on this deletion completing. /// /// Although we are cleaning up the tenant, this task is not meant to be bound by the lifetime of the tenant in memory. /// Thus the [`BackgroundPurges`] type to keep track of these tasks. pub fn spawn(&self, tmp_path: Utf8PathBuf) { // because on shutdown we close and wait, we are misusing TaskTracker a bit. // // so first acquire a token, then check if the tracker has been closed. the tracker might get closed // right after, but at least the shutdown will wait for what we are spawning next. let token = self.0.token(); if self.0.is_closed() { warn!( %tmp_path, "trying to spawn background purge during shutdown, ignoring" ); return; } let span = info_span!(parent: None, "background_purge", %tmp_path); let task = move || { let _token = token; let _entered = span.entered(); if let Err(error) = std::fs::remove_dir_all(tmp_path.as_path()) { // should we fatal_io_error here? warn!(%error, "failed to purge tenant directory"); } }; BACKGROUND_RUNTIME.spawn_blocking(task); } /// When this future completes, all background purges have completed. /// The first poll of the future will already lock out new background purges spawned via [`Self::spawn`]. /// /// Concurrent calls will coalesce. /// /// # Cancellation-Safety /// /// If this future is dropped before polled to completion, concurrent and subsequent /// instances of this future will continue to be correct. #[instrument(skip_all)] pub async fn shutdown(&self) { // forbid new tasks (can be called many times) self.0.close(); self.0.wait().await; } } /// Responsible for storing and mutating the collection of all tenants /// that this pageserver has state for. /// /// Every Tenant and SecondaryTenant instance lives inside the TenantManager. /// /// The most important role of the TenantManager is to prevent conflicts: e.g. trying to attach /// the same tenant twice concurrently, or trying to configure the same tenant into secondary /// and attached modes concurrently. pub struct TenantManager { conf: &'static PageServerConf, tenants: std::sync::RwLock<TenantsMap>, resources: TenantSharedResources, // Long-running operations that happen outside of a [`Tenant`] lifetime should respect this token. // This is for edge cases like tenant deletion. In normal cases (within a Tenant lifetime), // tenants have their own cancellation tokens, which we fire individually in [`Self::shutdown`], or // when the tenant detaches. cancel: CancellationToken, background_purges: BackgroundPurges, } fn emergency_generations( tenant_confs: &HashMap<TenantShardId, Result<LocationConf, LoadConfigError>>, ) -> HashMap<TenantShardId, TenantStartupMode> { tenant_confs .iter() .filter_map(|(tid, lc)| { let lc = match lc { Ok(lc) => lc, Err(_) => return None, }; Some(( *tid, match &lc.mode { LocationMode::Attached(alc) => TenantStartupMode::Attached(( alc.attach_mode, alc.generation, lc.shard.stripe_size, )), LocationMode::Secondary(_) => TenantStartupMode::Secondary, }, )) }) .collect() } async fn init_load_generations( conf: &'static PageServerConf, tenant_confs: &HashMap<TenantShardId, Result<LocationConf, LoadConfigError>>, resources: &TenantSharedResources, cancel: &CancellationToken, ) -> anyhow::Result<Option<HashMap<TenantShardId, TenantStartupMode>>> { let generations = if conf.control_plane_emergency_mode { error!( "Emergency mode! Tenants will be attached unsafely using their last known generation" ); emergency_generations(tenant_confs) } else { let client = StorageControllerUpcallClient::new(conf, cancel); info!("Calling {} API to re-attach tenants", client.base_url()); // If we are configured to use the control plane API, then it is the source of truth for what tenants to load. let empty_local_disk = tenant_confs.is_empty(); match client.re_attach(conf, empty_local_disk).await { Ok(tenants) => tenants .into_iter() .flat_map(|(id, rart)| { TenantStartupMode::from_reattach_tenant(rart).map(|tsm| (id, tsm)) }) .collect(), Err(RetryForeverError::ShuttingDown) => { anyhow::bail!("Shut down while waiting for control plane re-attach response") } } }; // The deletion queue needs to know about the startup attachment state to decide which (if any) stored // deletion list entries may still be valid. We provide that by pushing a recovery operation into // the queue. Sequential processing of te queue ensures that recovery is done before any new tenant deletions // are processed, even though we don't block on recovery completing here. let attached_tenants = generations .iter() .flat_map(|(id, start_mode)| { match start_mode { TenantStartupMode::Attached((_mode, generation, _stripe_size)) => Some(generation), TenantStartupMode::Secondary => None, } .map(|gen_| (*id, *gen_)) }) .collect(); resources.deletion_queue_client.recover(attached_tenants)?; Ok(Some(generations)) } /// Given a directory discovered in the pageserver's tenants/ directory, attempt /// to load a tenant config from it. /// /// If we cleaned up something expected (like an empty dir or a temp dir), return None. fn load_tenant_config( conf: &'static PageServerConf, tenant_shard_id: TenantShardId, dentry: Utf8DirEntry, ) -> Option<Result<LocationConf, LoadConfigError>> { let tenant_dir_path = dentry.path().to_path_buf(); if crate::is_temporary(&tenant_dir_path) { info!("Found temporary tenant directory, removing: {tenant_dir_path}"); // No need to use safe_remove_tenant_dir_all because this is already // a temporary path std::fs::remove_dir_all(&tenant_dir_path).fatal_err("delete temporary tenant dir"); return None; } // This case happens if we crash during attachment before writing a config into the dir let is_empty = tenant_dir_path .is_empty_dir() .fatal_err("Checking for empty tenant dir"); if is_empty { info!("removing empty tenant directory {tenant_dir_path:?}"); std::fs::remove_dir(&tenant_dir_path).fatal_err("delete empty tenant dir"); return None; } Some(TenantShard::load_tenant_config(conf, &tenant_shard_id)) } /// Initial stage of load: walk the local tenants directory, clean up any temp files, /// and load configurations for the tenants we found. /// /// Do this in parallel, because we expect 10k+ tenants, so serial execution can take /// seconds even on reasonably fast drives. async fn init_load_tenant_configs( conf: &'static PageServerConf, ) -> HashMap<TenantShardId, Result<LocationConf, LoadConfigError>> { let tenants_dir = conf.tenants_path(); let dentries = tokio::task::spawn_blocking(move || -> Vec<Utf8DirEntry> { let context = format!("read tenants dir {tenants_dir}"); let dir_entries = tenants_dir.read_dir_utf8().fatal_err(&context); dir_entries .collect::<Result<Vec<_>, std::io::Error>>() .fatal_err(&context) }) .await .expect("Config load task panicked"); let mut configs = HashMap::new(); let mut join_set = JoinSet::new(); for dentry in dentries { let tenant_shard_id = match dentry.file_name().parse::<TenantShardId>() { Ok(id) => id, Err(_) => { warn!( "Invalid tenant path (garbage in our repo directory?): '{}'", dentry.file_name() ); continue; } }; join_set.spawn_blocking(move || { ( tenant_shard_id, load_tenant_config(conf, tenant_shard_id, dentry), ) }); } while let Some(r) = join_set.join_next().await { let (tenant_shard_id, tenant_config) = r.expect("Panic in config load task"); if let Some(tenant_config) = tenant_config { configs.insert(tenant_shard_id, tenant_config); } } configs } #[derive(Debug, thiserror::Error)] pub(crate) enum DeleteTenantError { #[error("Tenant map slot error {0}")] SlotError(#[from] TenantSlotError), #[error("Cancelled")] Cancelled, #[error(transparent)] Other(#[from] anyhow::Error), } /// Initialize repositories at `Initializing` state. pub fn init( conf: &'static PageServerConf, background_purges: BackgroundPurges, resources: TenantSharedResources, cancel: CancellationToken, ) -> TenantManager { TenantManager { conf, tenants: std::sync::RwLock::new(TenantsMap::Initializing), resources, cancel, background_purges, } } /// Transition repositories from `Initializing` state to `Open` state with locally available timelines. /// Timelines that are only partially available locally (remote storage has more data than this pageserver) /// are scheduled for download and added to the tenant once download is completed. #[instrument(skip_all)] pub async fn init_tenant_mgr( tenant_manager: Arc<TenantManager>, init_order: InitializationOrder, ) -> anyhow::Result<()> { debug_assert!(matches!( *tenant_manager.tenants.read().unwrap(), TenantsMap::Initializing )); let mut tenants = BTreeMap::new(); let ctx = RequestContext::todo_child(TaskKind::Startup, DownloadBehavior::Warn); let conf = tenant_manager.conf; let resources = &tenant_manager.resources; let cancel = &tenant_manager.cancel; let background_purges = &tenant_manager.background_purges; // Initialize dynamic limits that depend on system resources let system_memory = sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_memory()) .total_memory(); let max_ephemeral_layer_bytes = conf.ephemeral_bytes_per_memory_kb as u64 * (system_memory / 1024); tracing::info!( "Initialized ephemeral layer size limit to {max_ephemeral_layer_bytes}, for {system_memory} bytes of memory" ); inmemory_layer::GLOBAL_RESOURCES.max_dirty_bytes.store( max_ephemeral_layer_bytes, std::sync::atomic::Ordering::Relaxed, ); // Scan local filesystem for attached tenants let tenant_configs = init_load_tenant_configs(conf).await; // Determine which tenants are to be secondary or attached, and in which generation let tenant_modes = init_load_generations(conf, &tenant_configs, resources, cancel).await?; // Hadron local SSD check: Raise an alert if our local filesystem does not contain any tenants but the re-attach request returned tenants. // This can happen if the PS suffered a Kubernetes node failure resulting in loss of all local data, but recovered quickly on another node // so the Storage Controller has not had the time to move tenants out. let data_loss_suspected = if let Some(tenant_modes) = &tenant_modes { tenant_configs.is_empty() && !tenant_modes.is_empty() } else { false }; if data_loss_suspected { tracing::error!( "Local data loss suspected: no tenants found on local filesystem, but re-attach request returned tenants" ); } LOCAL_DATA_LOSS_SUSPECTED.set(if data_loss_suspected { 1 } else { 0 }); tracing::info!( "Attaching {} tenants at startup, warming up {} at a time", tenant_configs.len(), conf.concurrent_tenant_warmup.initial_permits() ); TENANT.startup_scheduled.inc_by(tenant_configs.len() as u64); // Accumulate futures for writing tenant configs, so that we can execute in parallel let mut config_write_futs = Vec::new(); // Update the location configs according to the re-attach response and persist them to disk tracing::info!("Updating {} location configs", tenant_configs.len()); for (tenant_shard_id, location_conf) in tenant_configs { let tenant_dir_path = conf.tenant_path(&tenant_shard_id); let mut location_conf = match location_conf { Ok(l) => l, Err(e) => { // This should only happen in the case of a serialization bug or critical local I/O error: we cannot load this tenant error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Failed to load tenant config, failed to {e:#}"); continue; } }; // FIXME: if we were attached, and get demoted to secondary on re-attach, we // don't have a place to get a config. // (https://github.com/neondatabase/neon/issues/5377) const DEFAULT_SECONDARY_CONF: SecondaryLocationConfig = SecondaryLocationConfig { warm: true }; if let Some(tenant_modes) = &tenant_modes { // We have a generation map: treat it as the authority for whether // this tenant is really attached. match tenant_modes.get(&tenant_shard_id) { None => { info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Detaching tenant, control plane omitted it in re-attach response"); match safe_rename_tenant_dir(&tenant_dir_path).await { Ok(tmp_path) => { background_purges.spawn(tmp_path); } Err(e) => { error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Failed to move detached tenant directory '{tenant_dir_path}': {e:?}"); } }; // We deleted local content: move on to next tenant, don't try and spawn this one. continue; } Some(TenantStartupMode::Secondary) => { if !matches!(location_conf.mode, LocationMode::Secondary(_)) { location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF); } } Some(TenantStartupMode::Attached((attach_mode, generation, stripe_size))) => { let old_gen_higher = match &location_conf.mode { LocationMode::Attached(AttachedLocationConfig { generation: old_generation, attach_mode: _attach_mode, }) => { if old_generation > generation { Some(old_generation) } else { None } } _ => None, }; if let Some(old_generation) = old_gen_higher { tracing::error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Control plane gave decreasing generation ({generation:?}) in re-attach response for tenant that was attached in generation {:?}, demoting to secondary", old_generation ); // We cannot safely attach this tenant given a bogus generation number, but let's avoid throwing away // local disk content: demote to secondary rather than detaching. location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF); } else { location_conf.attach_in_generation(*attach_mode, *generation, *stripe_size); } } } } else { // Legacy mode: no generation information, any tenant present // on local disk may activate info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Starting tenant in legacy mode, no generation",); }; // Presence of a generation number implies attachment: attach the tenant // if it wasn't already, and apply the generation number. config_write_futs.push(async move { let r = TenantShard::persist_tenant_config(conf, &tenant_shard_id, &location_conf).await; (tenant_shard_id, location_conf, r) }); } // Execute config writes with concurrency, to avoid bottlenecking on local FS write latency tracing::info!( "Writing {} location config files...", config_write_futs.len() ); let config_write_results = futures::stream::iter(config_write_futs) .buffer_unordered(16) .collect::<Vec<_>>() .await; tracing::info!( "Spawning {} tenant shard locations...", config_write_results.len() ); // For those shards that have live configurations, construct `Tenant` or `SecondaryTenant` objects and start them running for (tenant_shard_id, location_conf, config_write_result) in config_write_results { // Writing a config to local disk is foundational to startup up tenants: panic if we can't. config_write_result.fatal_err("write tenant shard config file"); let tenant_dir_path = conf.tenant_path(&tenant_shard_id); let shard_identity = location_conf.shard; let slot = match location_conf.mode { LocationMode::Attached(attached_conf) => TenantSlot::Attached( tenant_spawn( conf, tenant_shard_id, &tenant_dir_path, resources.clone(), AttachedTenantConf::new(conf, location_conf.tenant_conf, attached_conf), shard_identity, Some(init_order.clone()), SpawnMode::Lazy, &ctx, ) .expect("global shutdown during init_tenant_mgr cannot happen"), ), LocationMode::Secondary(secondary_conf) => { info!( tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), "Starting secondary tenant" ); TenantSlot::Secondary(SecondaryTenant::new( tenant_shard_id, shard_identity, location_conf.tenant_conf, &secondary_conf, )) } }; METRICS.slot_inserted(&slot); tenants.insert(tenant_shard_id, slot); } info!("Processed {} local tenants at startup", tenants.len()); let mut tenant_map = tenant_manager.tenants.write().unwrap(); *tenant_map = TenantsMap::Open(tenants); Ok(()) } /// Wrapper for Tenant::spawn that checks invariants before running #[allow(clippy::too_many_arguments)] fn tenant_spawn( conf: &'static PageServerConf, tenant_shard_id: TenantShardId, tenant_path: &Utf8Path, resources: TenantSharedResources, location_conf: AttachedTenantConf, shard_identity: ShardIdentity, init_order: Option<InitializationOrder>, mode: SpawnMode, ctx: &RequestContext, ) -> Result<Arc<TenantShard>, GlobalShutDown> { // All these conditions should have been satisfied by our caller: the tenant dir exists, is a well formed // path, and contains a configuration file. Assertions that do synchronous I/O are limited to debug mode // to avoid impacting prod runtime performance. assert!(!crate::is_temporary(tenant_path)); debug_assert!(tenant_path.is_dir()); debug_assert!( conf.tenant_location_config_path(&tenant_shard_id) .try_exists() .unwrap() ); TenantShard::spawn( conf, tenant_shard_id, resources, location_conf, shard_identity, init_order, mode, ctx, ) } #[derive(thiserror::Error, Debug)] pub(crate) enum UpsertLocationError { #[error("Bad config request: {0}")] BadRequest(anyhow::Error), #[error("Cannot change config in this state: {0}")] Unavailable(#[from] TenantMapError), #[error("Tenant is already being modified")] InProgress, #[error("Failed to flush: {0}")] Flush(anyhow::Error), /// This error variant is for unexpected situations (soft assertions) where the system is in an unexpected state. #[error("Internal error: {0}")] InternalError(anyhow::Error), } impl TenantManager { /// Convenience function so that anyone with a TenantManager can get at the global configuration, without /// having to pass it around everywhere as a separate object. pub(crate) fn get_conf(&self) -> &'static PageServerConf { self.conf } /// Gets the attached tenant from the in-memory data, erroring if it's absent, in secondary mode, or currently /// undergoing a state change (i.e. slot is InProgress). /// /// The return TenantShard is not guaranteed to be active: check its status after obtaing it, or /// use [`TenantShard::wait_to_become_active`] before using it if you will do I/O on it. pub(crate) fn get_attached_tenant_shard( &self, tenant_shard_id: TenantShardId, ) -> Result<Arc<TenantShard>, GetTenantError> { let locked = self.tenants.read().unwrap(); let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)?; match peek_slot { Some(TenantSlot::Attached(tenant)) => Ok(Arc::clone(tenant)), Some(TenantSlot::InProgress(_)) => Err(GetTenantError::NotActive(tenant_shard_id)), None | Some(TenantSlot::Secondary(_)) => { Err(GetTenantError::ShardNotFound(tenant_shard_id)) } } } pub(crate) fn get_secondary_tenant_shard( &self, tenant_shard_id: TenantShardId, ) -> Option<Arc<SecondaryTenant>> { let locked = self.tenants.read().unwrap(); let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read) .ok() .flatten(); match peek_slot { Some(TenantSlot::Secondary(s)) => Some(s.clone()), _ => None, } } /// Whether the `TenantManager` is responsible for the tenant shard pub(crate) fn manages_tenant_shard(&self, tenant_shard_id: TenantShardId) -> bool { let locked = self.tenants.read().unwrap();
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/remote_timeline_client.rs
pageserver/src/tenant/remote_timeline_client.rs
//! This module manages synchronizing local FS with remote storage. //! //! # Overview //! //! * [`RemoteTimelineClient`] provides functions related to upload/download of a particular timeline. //! It contains a queue of pending uploads, and manages the queue, performing uploads in parallel //! when it's safe to do so. //! //! * Stand-alone function, [`list_remote_timelines`], to get list of timelines of a tenant. //! //! These functions use the low-level remote storage client, [`remote_storage::RemoteStorage`]. //! //! # APIs & How To Use Them //! //! There is a [RemoteTimelineClient] for each [Timeline][`crate::tenant::Timeline`] in the system, //! unless the pageserver is configured without remote storage. //! //! We allocate the client instance in [Timeline][`crate::tenant::Timeline`], i.e., //! either in [`crate::tenant::mgr`] during startup or when creating a new //! timeline. //! However, the client does not become ready for use until we've initialized its upload queue: //! //! - For timelines that already have some state on the remote storage, we use //! [`RemoteTimelineClient::init_upload_queue`] . //! - For newly created timelines, we use //! [`RemoteTimelineClient::init_upload_queue_for_empty_remote`]. //! //! The former takes the remote's [`IndexPart`] as an argument, possibly retrieved //! using [`list_remote_timelines`]. We'll elaborate on [`IndexPart`] in the next section. //! //! Whenever we've created/updated/deleted a file in a timeline directory, we schedule //! the corresponding remote operation with the timeline's [`RemoteTimelineClient`]: //! //! - [`RemoteTimelineClient::schedule_layer_file_upload`] when we've created a new layer file. //! - [`RemoteTimelineClient::schedule_index_upload_for_metadata_update`] when we've updated the timeline metadata file. //! - [`RemoteTimelineClient::schedule_index_upload_for_file_changes`] to upload an updated index file, after we've scheduled file uploads //! - [`RemoteTimelineClient::schedule_layer_file_deletion`] when we've deleted one or more layer files. //! //! Internally, these functions create [`UploadOp`]s and put them in a queue. //! //! There are also APIs for downloading files. //! These are not part of the aforementioned queuing and will not be discussed //! further here, except in the section covering tenant attach. //! //! # Remote Storage Structure & [`IndexPart`] Index File //! //! The "directory structure" in the remote storage mirrors the local directory structure, with paths //! like `tenants/<tenant_id>/timelines/<timeline_id>/<layer filename>`. //! Yet instead of keeping the `metadata` file remotely, we wrap it with more //! data in an "index file" aka [`IndexPart`], containing the list of **all** remote //! files for a given timeline. //! If a file is not referenced from [`IndexPart`], it's not part of the remote storage state. //! //! Having the `IndexPart` also avoids expensive and slow `S3 list` commands. //! //! # Consistency //! //! To have a consistent remote structure, it's important that uploads and //! deletions are performed in the right order. For example, the index file //! contains a list of layer files, so it must not be uploaded until all the //! layer files that are in its list have been successfully uploaded. //! //! The contract between client and its user is that the user is responsible of //! scheduling operations in an order that keeps the remote consistent as //! described above. //! //! From the user's perspective, the operations are executed sequentially. //! Internally, the client knows which operations can be performed in parallel, //! and which operations act like a "barrier" that require preceding operations //! to finish. The calling code just needs to call the schedule-functions in the //! correct order, and the client will parallelize the operations in a way that //! is safe. For more details, see `UploadOp::can_bypass`. //! //! All of this relies on the following invariants: //! //! - We rely on read-after write consistency in the remote storage. //! - Layer files are immutable. //! //! NB: Pageserver assumes that it has exclusive write access to the tenant in remote //! storage. Different tenants can be attached to different pageservers, but if the //! same tenant is attached to two pageservers at the same time, they will overwrite //! each other's index file updates, and confusion will ensue. There's no interlock or //! mechanism to detect that in the pageserver, we rely on the control plane to ensure //! that that doesn't happen. //! //! ## Implementation Note //! //! The *actual* remote state lags behind the *desired* remote state while //! there are in-flight operations. //! We keep track of the desired remote state in [`UploadQueueInitialized::dirty`]. //! It is initialized based on the [`IndexPart`] that was passed during init //! and updated with every `schedule_*` function call. //! All this is necessary necessary to compute the future [`IndexPart`]s //! when scheduling an operation while other operations that also affect the //! remote [`IndexPart`] are in flight. //! //! # Retries & Error Handling //! //! The client retries operations indefinitely, using exponential back-off. //! There is no way to force a retry, i.e., interrupt the back-off. //! This could be built easily. //! //! # Cancellation //! //! The operations execute as plain [`task_mgr`] tasks, scoped to //! the client's tenant and timeline. //! Dropping the client will drop queued operations but not executing operations. //! These will complete unless the `task_mgr` tasks are cancelled using `task_mgr` //! APIs, e.g., during pageserver shutdown, timeline delete, or tenant detach. //! //! # Completion //! //! Once an operation has completed, we update [`UploadQueueInitialized::clean`] immediately, //! and submit a request through the DeletionQueue to update //! [`UploadQueueInitialized::visible_remote_consistent_lsn`] after it has //! validated that our generation is not stale. It is this visible value //! that is advertized to safekeepers as a signal that that they can //! delete the WAL up to that LSN. //! //! The [`RemoteTimelineClient::wait_completion`] method can be used to wait //! for all pending operations to complete. It does not prevent more //! operations from getting scheduled. //! //! # Crash Consistency //! //! We do not persist the upload queue state. //! If we drop the client, or crash, all unfinished operations are lost. //! //! To recover, the following steps need to be taken: //! - Retrieve the current remote [`IndexPart`]. This gives us a //! consistent remote state, assuming the user scheduled the operations in //! the correct order. //! - Initiate upload queue with that [`IndexPart`]. //! - Reschedule all lost operations by comparing the local filesystem state //! and remote state as per [`IndexPart`]. This is done in //! [`TenantShard::timeline_init_and_sync`]. //! //! Note that if we crash during file deletion between the index update //! that removes the file from the list of files, and deleting the remote file, //! the file is leaked in the remote storage. Similarly, if a new file is created //! and uploaded, but the pageserver dies permanently before updating the //! remote index file, the new file is leaked in remote storage. We accept and //! tolerate that for now. //! Note further that we cannot easily fix this by scheduling deletes for every //! file that is present only on the remote, because we cannot distinguish the //! following two cases: //! - (1) We had the file locally, deleted it locally, scheduled a remote delete, //! but crashed before it finished remotely. //! - (2) We never had the file locally because we haven't on-demand downloaded //! it yet. //! //! # Downloads //! //! In addition to the upload queue, [`RemoteTimelineClient`] has functions for //! downloading files from the remote storage. Downloads are performed immediately //! against the `RemoteStorage`, independently of the upload queue. //! //! When we attach a tenant, we perform the following steps: //! - create `Tenant` object in `TenantState::Attaching` state //! - List timelines that are present in remote storage, and for each: //! - download their remote [`IndexPart`]s //! - create `Timeline` struct and a `RemoteTimelineClient` //! - initialize the client's upload queue with its `IndexPart` //! - schedule uploads for layers that are only present locally. //! - After the above is done for each timeline, open the tenant for business by //! transitioning it from `TenantState::Attaching` to `TenantState::Active` state. //! This starts the timelines' WAL-receivers and the tenant's GC & Compaction loops. //! //! # Operating Without Remote Storage //! //! If no remote storage configuration is provided, the [`RemoteTimelineClient`] is //! not created and the uploads are skipped. //! //! [`TenantShard::timeline_init_and_sync`]: super::TenantShard::timeline_init_and_sync //! [`Timeline::load_layer_map`]: super::Timeline::load_layer_map pub(crate) mod download; pub mod index; pub mod manifest; pub(crate) mod upload; use std::collections::{HashMap, HashSet, VecDeque}; use std::ops::DerefMut; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, Mutex, OnceLock}; use std::time::Duration; use anyhow::Context; use camino::Utf8Path; use chrono::{NaiveDateTime, Utc}; pub(crate) use download::{ download_index_part, download_initdb_tar_zst, download_tenant_manifest, is_temp_download_file, list_remote_tenant_shards, list_remote_timelines, }; use index::GcCompactionState; pub(crate) use index::LayerFileMetadata; use pageserver_api::models::{RelSizeMigration, TimelineArchivalState, TimelineVisibilityState}; use pageserver_api::shard::{ShardIndex, TenantShardId}; use regex::Regex; use remote_storage::{ DownloadError, GenericRemoteStorage, ListingMode, RemotePath, TimeoutOrCancel, }; use scopeguard::ScopeGuard; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; pub(crate) use upload::upload_initdb_dir; use utils::backoff::{ self, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, exponential_backoff, }; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; use utils::pausable_failpoint; use utils::shard::ShardNumber; use self::index::IndexPart; use super::config::AttachedLocationConfig; use super::metadata::MetadataUpdate; use super::storage_layer::{Layer, LayerName, ResidentLayer}; use super::timeline::import_pgdata; use super::upload_queue::{NotInitialized, SetDeletedFlagProgress}; use super::{DeleteTimelineError, Generation}; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::deletion_queue::{DeletionQueueClient, DeletionQueueError}; use crate::metrics::{ MeasureRemoteOp, REMOTE_ONDEMAND_DOWNLOADED_BYTES, REMOTE_ONDEMAND_DOWNLOADED_LAYERS, RemoteOpFileKind, RemoteOpKind, RemoteTimelineClientMetrics, RemoteTimelineClientMetricsCallTrackSize, }; use crate::task_mgr::{BACKGROUND_RUNTIME, TaskKind, shutdown_token}; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::remote_timeline_client::download::download_retry; use crate::tenant::storage_layer::AsLayerDesc; use crate::tenant::upload_queue::{ Delete, OpType, UploadOp, UploadQueue, UploadQueueInitialized, UploadQueueStopped, UploadQueueStoppedDeletable, UploadTask, }; use crate::tenant::{TIMELINES_SEGMENT_NAME, debug_assert_current_span_has_tenant_and_timeline_id}; use crate::{TENANT_HEATMAP_BASENAME, task_mgr}; // Occasional network issues and such can cause remote operations to fail, and // that's expected. If a download fails, we log it at info-level, and retry. // But after FAILED_DOWNLOAD_WARN_THRESHOLD retries, we start to log it at WARN // level instead, as repeated failures can mean a more serious problem. If it // fails more than FAILED_DOWNLOAD_RETRIES times, we give up pub(crate) const FAILED_DOWNLOAD_WARN_THRESHOLD: u32 = 3; pub(crate) const FAILED_REMOTE_OP_RETRIES: u32 = 10; // Similarly log failed uploads and deletions at WARN level, after this many // retries. Uploads and deletions are retried forever, though. pub(crate) const FAILED_UPLOAD_WARN_THRESHOLD: u32 = 3; pub(crate) const INITDB_PATH: &str = "initdb.tar.zst"; pub(crate) const INITDB_PRESERVED_PATH: &str = "initdb-preserved.tar.zst"; /// Default buffer size when interfacing with [`tokio::fs::File`]. pub(crate) const BUFFER_SIZE: usize = 32 * 1024; /// Doing non-essential flushes of deletion queue is subject to this timeout, after /// which we warn and skip. const DELETION_QUEUE_FLUSH_TIMEOUT: Duration = Duration::from_secs(10); pub enum MaybeDeletedIndexPart { IndexPart(IndexPart), Deleted(IndexPart), } #[derive(Debug, thiserror::Error)] pub enum PersistIndexPartWithDeletedFlagError { #[error("another task is already setting the deleted_flag, started at {0:?}")] AlreadyInProgress(NaiveDateTime), #[error("the deleted_flag was already set, value is {0:?}")] AlreadyDeleted(NaiveDateTime), #[error(transparent)] Other(#[from] anyhow::Error), } #[derive(Debug, thiserror::Error)] pub enum WaitCompletionError { #[error(transparent)] NotInitialized(NotInitialized), #[error("wait_completion aborted because upload queue was stopped")] UploadQueueShutDownOrStopped, } #[derive(Debug, thiserror::Error)] #[error("Upload queue either in unexpected state or hasn't downloaded manifest yet")] pub struct UploadQueueNotReadyError; #[derive(Debug, thiserror::Error)] pub enum ShutdownIfArchivedError { #[error(transparent)] NotInitialized(NotInitialized), #[error("timeline is not archived")] NotArchived, } /// Behavioral modes that enable seamless live migration. /// /// See docs/rfcs/028-pageserver-migration.md to understand how these fit in. struct RemoteTimelineClientConfig { /// If this is false, then update to remote_consistent_lsn are dropped rather /// than being submitted to DeletionQueue for validation. This behavior is /// used when a tenant attachment is known to have a stale generation number, /// such that validation attempts will always fail. This is not necessary /// for correctness, but avoids spamming error statistics with failed validations /// when doing migrations of tenants. process_remote_consistent_lsn_updates: bool, /// If this is true, then object deletions are held in a buffer in RemoteTimelineClient /// rather than being submitted to the DeletionQueue. This behavior is used when a tenant /// is known to be multi-attached, in order to avoid disrupting other attached tenants /// whose generations' metadata refers to the deleted objects. block_deletions: bool, } /// RemoteTimelineClientConfig's state is entirely driven by LocationConf, but we do /// not carry the entire LocationConf structure: it's much more than we need. The From /// impl extracts the subset of the LocationConf that is interesting to RemoteTimelineClient. impl From<&AttachedLocationConfig> for RemoteTimelineClientConfig { fn from(lc: &AttachedLocationConfig) -> Self { Self { block_deletions: !lc.may_delete_layers_hint(), process_remote_consistent_lsn_updates: lc.may_upload_layers_hint(), } } } /// A client for accessing a timeline's data in remote storage. /// /// This takes care of managing the number of connections, and balancing them /// across tenants. This also handles retries of failed uploads. /// /// Upload and delete requests are ordered so that before a deletion is /// performed, we wait for all preceding uploads to finish. This ensures sure /// that if you perform a compaction operation that reshuffles data in layer /// files, we don't have a transient state where the old files have already been /// deleted, but new files have not yet been uploaded. /// /// Similarly, this enforces an order between index-file uploads, and layer /// uploads. Before an index-file upload is performed, all preceding layer /// uploads must be finished. /// /// This also maintains a list of remote files, and automatically includes that /// in the index part file, whenever timeline metadata is uploaded. /// /// Downloads are not queued, they are performed immediately. pub(crate) struct RemoteTimelineClient { conf: &'static PageServerConf, runtime: tokio::runtime::Handle, tenant_shard_id: TenantShardId, timeline_id: TimelineId, generation: Generation, upload_queue: Mutex<UploadQueue>, pub(crate) metrics: Arc<RemoteTimelineClientMetrics>, storage_impl: GenericRemoteStorage, deletion_queue_client: DeletionQueueClient, /// Subset of tenant configuration used to control upload behaviors during migrations config: std::sync::RwLock<RemoteTimelineClientConfig>, cancel: CancellationToken, } impl Drop for RemoteTimelineClient { fn drop(&mut self) { debug!("dropping RemoteTimelineClient"); } } impl RemoteTimelineClient { /// /// Create a remote storage client for given timeline /// /// Note: the caller must initialize the upload queue before any uploads can be scheduled, /// by calling init_upload_queue. /// pub(crate) fn new( remote_storage: GenericRemoteStorage, deletion_queue_client: DeletionQueueClient, conf: &'static PageServerConf, tenant_shard_id: TenantShardId, timeline_id: TimelineId, generation: Generation, location_conf: &AttachedLocationConfig, ) -> RemoteTimelineClient { RemoteTimelineClient { conf, runtime: if cfg!(test) { // remote_timeline_client.rs tests rely on current-thread runtime tokio::runtime::Handle::current() } else { BACKGROUND_RUNTIME.handle().clone() }, tenant_shard_id, timeline_id, generation, storage_impl: remote_storage, deletion_queue_client, upload_queue: Mutex::new(UploadQueue::Uninitialized), metrics: Arc::new(RemoteTimelineClientMetrics::new( &tenant_shard_id, &timeline_id, )), config: std::sync::RwLock::new(RemoteTimelineClientConfig::from(location_conf)), cancel: CancellationToken::new(), } } /// Initialize the upload queue for a remote storage that already received /// an index file upload, i.e., it's not empty. /// The given `index_part` must be the one on the remote. pub fn init_upload_queue(&self, index_part: &IndexPart) -> anyhow::Result<()> { // Set the maximum number of inprogress tasks to the remote storage concurrency. There's // certainly no point in starting more upload tasks than this. let inprogress_limit = self .conf .remote_storage_config .as_ref() .map_or(0, |r| r.concurrency_limit()); let mut upload_queue = self.upload_queue.lock().unwrap(); upload_queue.initialize_with_current_remote_index_part(index_part, inprogress_limit)?; self.update_remote_physical_size_gauge(Some(index_part)); info!( "initialized upload queue from remote index with {} layer files", index_part.layer_metadata.len() ); Ok(()) } /// Initialize the upload queue for the case where the remote storage is empty, /// i.e., it doesn't have an `IndexPart`. /// /// `rel_size_v2_status` needs to be carried over during branching, and that's why /// it's passed in here. pub fn init_upload_queue_for_empty_remote( &self, local_metadata: &TimelineMetadata, rel_size_v2_migration: Option<RelSizeMigration>, rel_size_migrated_at: Option<Lsn>, ) -> anyhow::Result<()> { // Set the maximum number of inprogress tasks to the remote storage concurrency. There's // certainly no point in starting more upload tasks than this. let inprogress_limit = self .conf .remote_storage_config .as_ref() .map_or(0, |r| r.concurrency_limit()); let mut upload_queue = self.upload_queue.lock().unwrap(); let initialized_queue = upload_queue.initialize_empty_remote(local_metadata, inprogress_limit)?; initialized_queue.dirty.rel_size_migration = rel_size_v2_migration; initialized_queue.dirty.rel_size_migrated_at = rel_size_migrated_at; self.update_remote_physical_size_gauge(None); info!("initialized upload queue as empty"); Ok(()) } /// Initialize the queue in stopped state. Used in startup path /// to continue deletion operation interrupted by pageserver crash or restart. pub fn init_upload_queue_stopped_to_continue_deletion( &self, index_part: &IndexPart, ) -> anyhow::Result<()> { // FIXME: consider newtype for DeletedIndexPart. let deleted_at = index_part.deleted_at.ok_or(anyhow::anyhow!( "bug: it is responsibility of the caller to provide index part from MaybeDeletedIndexPart::Deleted" ))?; let inprogress_limit = self .conf .remote_storage_config .as_ref() .map_or(0, |r| r.concurrency_limit()); let mut upload_queue = self.upload_queue.lock().unwrap(); upload_queue.initialize_with_current_remote_index_part(index_part, inprogress_limit)?; self.update_remote_physical_size_gauge(Some(index_part)); self.stop_impl(&mut upload_queue); upload_queue .stopped_mut() .expect("stopped above") .deleted_at = SetDeletedFlagProgress::Successful(deleted_at); Ok(()) } /// Notify this client of a change to its parent tenant's config, as this may cause us to /// take action (unblocking deletions when transitioning from AttachedMulti to AttachedSingle) pub(super) fn update_config(&self, location_conf: &AttachedLocationConfig) { let new_conf = RemoteTimelineClientConfig::from(location_conf); let unblocked = !new_conf.block_deletions; // Update config before draining deletions, so that we don't race with more being // inserted. This can result in deletions happening our of order, but that does not // violate any invariants: deletions only need to be ordered relative to upload of the index // that dereferences the deleted objects, and we are not changing that order. *self.config.write().unwrap() = new_conf; if unblocked { // If we may now delete layers, drain any that were blocked in our old // configuration state let mut queue_locked = self.upload_queue.lock().unwrap(); if let Ok(queue) = queue_locked.initialized_mut() { let blocked_deletions = std::mem::take(&mut queue.blocked_deletions); for d in blocked_deletions { if let Err(e) = self.deletion_queue_client.push_layers( self.tenant_shard_id, self.timeline_id, self.generation, d.layers, ) { // This could happen if the pageserver is shut down while a tenant // is transitioning from a deletion-blocked state: we will leak some // S3 objects in this case. warn!("Failed to drain blocked deletions: {}", e); break; } } } } } /// Returns `None` if nothing is yet uplodaded, `Some(disk_consistent_lsn)` otherwise. pub fn remote_consistent_lsn_projected(&self) -> Option<Lsn> { match &mut *self.upload_queue.lock().unwrap() { UploadQueue::Uninitialized => None, UploadQueue::Initialized(q) => q.get_last_remote_consistent_lsn_projected(), UploadQueue::Stopped(UploadQueueStopped::Uninitialized) => None, UploadQueue::Stopped(UploadQueueStopped::Deletable(q)) => q .upload_queue_for_deletion .get_last_remote_consistent_lsn_projected(), } } pub fn remote_consistent_lsn_visible(&self) -> Option<Lsn> { match &mut *self.upload_queue.lock().unwrap() { UploadQueue::Uninitialized => None, UploadQueue::Initialized(q) => Some(q.get_last_remote_consistent_lsn_visible()), UploadQueue::Stopped(UploadQueueStopped::Uninitialized) => None, UploadQueue::Stopped(UploadQueueStopped::Deletable(q)) => Some( q.upload_queue_for_deletion .get_last_remote_consistent_lsn_visible(), ), } } /// Returns true if this timeline was previously detached at this Lsn and the remote timeline /// client is currently initialized. pub(crate) fn is_previous_ancestor_lsn(&self, lsn: Lsn) -> bool { self.upload_queue .lock() .unwrap() .initialized_mut() .map(|uq| uq.clean.0.lineage.is_previous_ancestor_lsn(lsn)) .unwrap_or(false) } /// Returns whether the timeline is archived. /// Return None if the remote index_part hasn't been downloaded yet. pub(crate) fn is_archived(&self) -> Option<bool> { self.upload_queue .lock() .unwrap() .initialized_mut() .map(|q| q.clean.0.archived_at.is_some()) .ok() } /// Returns true if the timeline is invisible in synthetic size calculations. pub(crate) fn is_invisible(&self) -> Option<bool> { self.upload_queue .lock() .unwrap() .initialized_mut() .map(|q| q.clean.0.marked_invisible_at.is_some()) .ok() } /// Returns `Ok(Some(timestamp))` if the timeline has been archived, `Ok(None)` if the timeline hasn't been archived. /// /// Return Err(_) if the remote index_part hasn't been downloaded yet, or the timeline hasn't been stopped yet. pub(crate) fn archived_at_stopped_queue( &self, ) -> Result<Option<NaiveDateTime>, UploadQueueNotReadyError> { self.upload_queue .lock() .unwrap() .stopped_mut() .map(|q| q.upload_queue_for_deletion.clean.0.archived_at) .map_err(|_| UploadQueueNotReadyError) } fn update_remote_physical_size_gauge(&self, current_remote_index_part: Option<&IndexPart>) { let size: u64 = if let Some(current_remote_index_part) = current_remote_index_part { current_remote_index_part .layer_metadata .values() .map(|ilmd| ilmd.file_size) .sum() } else { 0 }; self.metrics.remote_physical_size_gauge.set(size); } pub fn get_remote_physical_size(&self) -> u64 { self.metrics.remote_physical_size_gauge.get() } // // Download operations. // // These don't use the per-timeline queue. They do use the global semaphore in // S3Bucket, to limit the total number of concurrent operations, though. // /// Download index file pub async fn download_index_file( &self, cancel: &CancellationToken, ) -> Result<MaybeDeletedIndexPart, DownloadError> { let _unfinished_gauge_guard = self.metrics.call_begin( &RemoteOpFileKind::Index, &RemoteOpKind::Download, crate::metrics::RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: "no need for a downloads gauge", }, ); let (index_part, index_generation, index_last_modified) = download::download_index_part( &self.storage_impl, &self.tenant_shard_id, &self.timeline_id, self.generation, cancel, ) .measure_remote_op( Option::<TaskKind>::None, RemoteOpFileKind::Index, RemoteOpKind::Download, Arc::clone(&self.metrics), ) .await?; // Defense in depth: monotonicity of generation numbers is an important correctness guarantee, so when we see a very // old index, we do extra checks in case this is the result of backward time-travel of the generation number (e.g. // in case of a bug in the service that issues generation numbers). Indices are allowed to be old, but we expect that // when we load an old index we are loading the _latest_ index: if we are asked to load an old index and there is // also a newer index available, that is surprising. const INDEX_AGE_CHECKS_THRESHOLD: Duration = Duration::from_secs(14 * 24 * 3600); let index_age = index_last_modified.elapsed().unwrap_or_else(|e| { if e.duration() > Duration::from_secs(5) { // We only warn if the S3 clock and our local clock are >5s out: because this is a low resolution // timestamp, it is common to be out by at least 1 second. tracing::warn!("Index has modification time in the future: {e}"); } Duration::ZERO }); if index_age > INDEX_AGE_CHECKS_THRESHOLD { tracing::info!( ?index_generation, age = index_age.as_secs_f64(), "Loaded an old index, checking for other indices..." ); // Find the highest-generation index let (_latest_index_part, latest_index_generation, latest_index_mtime) = download::download_index_part( &self.storage_impl, &self.tenant_shard_id, &self.timeline_id, Generation::MAX, cancel, ) .await?; if latest_index_generation > index_generation { // Unexpected! Why are we loading such an old index if a more recent one exists? // We will refuse to proceed, as there is no reasonable scenario where this should happen, but // there _is_ a clear bug/corruption scenario where it would happen (controller sets the generation // backwards). tracing::error!( ?index_generation, ?latest_index_generation, ?latest_index_mtime, "Found a newer index while loading an old one" ); return Err(DownloadError::Fatal( "Index age exceeds threshold and a newer index exists".into(), )); } } if index_part.deleted_at.is_some() { Ok(MaybeDeletedIndexPart::Deleted(index_part)) } else { Ok(MaybeDeletedIndexPart::IndexPart(index_part)) } } /// Download a (layer) file from `path`, into local filesystem. /// /// 'layer_metadata' is the metadata from the remote index file. /// /// On success, returns the size of the downloaded file. pub async fn download_layer_file( &self, layer_file_name: &LayerName, layer_metadata: &LayerFileMetadata, local_path: &Utf8Path, gate: &utils::sync::gate::Gate, cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<u64, DownloadError> { let downloaded_size = { let _unfinished_gauge_guard = self.metrics.call_begin( &RemoteOpFileKind::Layer, &RemoteOpKind::Download, crate::metrics::RemoteTimelineClientMetricsCallTrackSize::DontTrackSize { reason: "no need for a downloads gauge", }, ); download::download_layer_file( self.conf, &self.storage_impl, self.tenant_shard_id, self.timeline_id, layer_file_name, layer_metadata, local_path, gate, cancel, ctx, ) .measure_remote_op( Some(ctx.task_kind()), RemoteOpFileKind::Layer, RemoteOpKind::Download, Arc::clone(&self.metrics), ) .await? }; REMOTE_ONDEMAND_DOWNLOADED_LAYERS.inc(); REMOTE_ONDEMAND_DOWNLOADED_BYTES.inc_by(downloaded_size);
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/upload_queue.rs
pageserver/src/tenant/upload_queue.rs
use std::collections::{HashMap, HashSet, VecDeque}; use std::fmt::Debug; use std::sync::Arc; use std::sync::atomic::AtomicU32; use chrono::NaiveDateTime; use once_cell::sync::Lazy; use tracing::info; use utils::generation::Generation; use utils::lsn::{AtomicLsn, Lsn}; use super::remote_timeline_client::is_same_remote_layer_path; use super::storage_layer::{AsLayerDesc as _, LayerName, ResidentLayer}; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::remote_timeline_client::index::{IndexPart, LayerFileMetadata}; /// Kill switch for upload queue reordering in case it causes problems. /// TODO: remove this once we have confidence in it. static DISABLE_UPLOAD_QUEUE_REORDERING: Lazy<bool> = Lazy::new(|| std::env::var("DISABLE_UPLOAD_QUEUE_REORDERING").as_deref() == Ok("true")); /// Kill switch for index upload coalescing in case it causes problems. /// TODO: remove this once we have confidence in it. static DISABLE_UPLOAD_QUEUE_INDEX_COALESCING: Lazy<bool> = Lazy::new(|| std::env::var("DISABLE_UPLOAD_QUEUE_INDEX_COALESCING").as_deref() == Ok("true")); // clippy warns that Uninitialized is much smaller than Initialized, which wastes // memory for Uninitialized variants. Doesn't matter in practice, there are not // that many upload queues in a running pageserver, and most of them are initialized // anyway. #[allow(clippy::large_enum_variant)] pub enum UploadQueue { Uninitialized, Initialized(UploadQueueInitialized), Stopped(UploadQueueStopped), } impl UploadQueue { pub fn as_str(&self) -> &'static str { match self { UploadQueue::Uninitialized => "Uninitialized", UploadQueue::Initialized(_) => "Initialized", UploadQueue::Stopped(_) => "Stopped", } } } #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum OpType { MayReorder, FlushDeletion, } /// This keeps track of queued and in-progress tasks. pub struct UploadQueueInitialized { /// Maximum number of inprogress tasks to schedule. 0 is no limit. pub(crate) inprogress_limit: usize, /// Counter to assign task IDs pub(crate) task_counter: u64, /// The next uploaded index_part.json; assumed to be dirty. /// /// Should not be read, directly except for layer file updates. Instead you should add a /// projected field. pub(crate) dirty: IndexPart, /// The latest remote persisted IndexPart. /// /// Each completed metadata upload will update this. The second item is the task_id which last /// updated the value, used to ensure we never store an older value over a newer one. pub(crate) clean: (IndexPart, Option<u64>), /// How many file uploads or deletions been scheduled, since the /// last (scheduling of) metadata index upload? pub(crate) latest_files_changes_since_metadata_upload_scheduled: u64, /// The Lsn is only updated after our generation has been validated with /// the control plane (unlesss a timeline's generation is None, in which case /// we skip validation) pub(crate) visible_remote_consistent_lsn: Arc<AtomicLsn>, /// Tasks that are currently in-progress. In-progress means that a tokio Task /// has been launched for it. An in-progress task can be busy uploading, but it can /// also be waiting on the `concurrency_limiter` Semaphore in S3Bucket, or it can /// be waiting for retry in `exponential_backoff`. pub inprogress_tasks: HashMap<u64, Arc<UploadTask>>, /// Queued operations that have not been launched yet. They might depend on previous /// tasks to finish. For example, metadata upload cannot be performed before all /// preceding layer file uploads have completed. pub queued_operations: VecDeque<UploadOp>, /// Files which have been unlinked but not yet had scheduled a deletion for. Only kept around /// for error logging. /// /// Putting this behind a testing feature to catch problems in tests, but assuming we could have a /// bug causing leaks, then it's better to not leave this enabled for production builds. #[cfg(feature = "testing")] pub(crate) dangling_files: HashMap<LayerName, Generation>, /// Ensure we order file operations correctly. pub(crate) recently_deleted: HashSet<(LayerName, Generation)>, /// Deletions that are blocked by the tenant configuration pub(crate) blocked_deletions: Vec<Delete>, /// Set to true when we have inserted the `UploadOp::Shutdown` into the `inprogress_tasks`. pub(crate) shutting_down: bool, /// Permitless semaphore on which any number of `RemoteTimelineClient::shutdown` futures can /// wait on until one of them stops the queue. The semaphore is closed when /// `RemoteTimelineClient::launch_queued_tasks` encounters `UploadOp::Shutdown`. pub(crate) shutdown_ready: Arc<tokio::sync::Semaphore>, } impl UploadQueueInitialized { pub(super) fn no_pending_work(&self) -> bool { self.inprogress_tasks.is_empty() && self.queued_operations.is_empty() } pub(super) fn get_last_remote_consistent_lsn_visible(&self) -> Lsn { self.visible_remote_consistent_lsn.load() } pub(super) fn get_last_remote_consistent_lsn_projected(&self) -> Option<Lsn> { let lsn = self.clean.0.metadata.disk_consistent_lsn(); self.clean.1.map(|_| lsn) } /// Returns and removes the next ready operation from the queue, if any. This isn't necessarily /// the first operation in the queue, to avoid head-of-line blocking -- an operation can jump /// the queue if it doesn't conflict with operations ahead of it. /// /// Also returns any operations that were coalesced into this one, e.g. multiple index uploads. /// /// None may be returned even if the queue isn't empty, if no operations are ready yet. /// /// NB: this is quadratic, but queues are expected to be small, and bounded by inprogress_limit. pub fn next_ready(&mut self) -> Option<(UploadOp, Vec<UploadOp>)> { // If inprogress_tasks is already at limit, don't schedule anything more. if self.inprogress_limit > 0 && self.inprogress_tasks.len() >= self.inprogress_limit { return None; } for (i, candidate) in self.queued_operations.iter().enumerate() { // If this candidate is ready, go for it. Otherwise, try the next one. if self.is_ready(i) { // Shutdown operations are left at the head of the queue, to prevent further // operations from starting. Signal that we're ready to shut down. if matches!(candidate, UploadOp::Shutdown) { assert!(self.inprogress_tasks.is_empty(), "shutdown with tasks"); assert_eq!(i, 0, "shutdown not at head of queue"); self.shutdown_ready.close(); return None; } let mut op = self.queued_operations.remove(i).expect("i can't disappear"); // Coalesce any back-to-back index uploads by only uploading the newest one that's // ready. This typically happens with layer/index/layer/index/... sequences, where // the layers bypass the indexes, leaving the indexes queued. // // If other operations are interleaved between index uploads we don't try to // coalesce them, since we may as well update the index concurrently with them. // This keeps the index fresh and avoids starvation. // // NB: we assume that all uploaded indexes have the same remote path. This // is true at the time of writing: the path only depends on the tenant, // timeline and generation, all of which are static for a timeline instance. // Otherwise, we must be careful not to coalesce different paths. let mut coalesced_ops = Vec::new(); if matches!(op, UploadOp::UploadMetadata { .. }) { while let Some(UploadOp::UploadMetadata { .. }) = self.queued_operations.get(i) { if *DISABLE_UPLOAD_QUEUE_INDEX_COALESCING { break; } if !self.is_ready(i) { break; } coalesced_ops.push(op); op = self.queued_operations.remove(i).expect("i can't disappear"); } } return Some((op, coalesced_ops)); } // Nothing can bypass a barrier or shutdown. If it wasn't scheduled above, give up. if matches!(candidate, UploadOp::Barrier(_) | UploadOp::Shutdown) { return None; } // If upload queue reordering is disabled, bail out after the first operation. if *DISABLE_UPLOAD_QUEUE_REORDERING { return None; } } None } /// Returns true if the queued operation at the given position is ready to be uploaded, i.e. if /// it doesn't conflict with any in-progress or queued operations ahead of it. Operations are /// allowed to skip the queue when it's safe to do so, to increase parallelism. /// /// The position must be valid for the queue size. fn is_ready(&self, pos: usize) -> bool { let candidate = self.queued_operations.get(pos).expect("invalid position"); self // Look at in-progress operations, in random order. .inprogress_tasks .values() .map(|task| &task.op) // Then queued operations ahead of the candidate, front-to-back. .chain(self.queued_operations.iter().take(pos)) // Keep track of the active index ahead of each operation. This is used to ensure that // an upload doesn't skip the queue too far, such that it modifies a layer that's // referenced by an active index. // // It's okay that in-progress operations are emitted in random order above, since at // most one of them can be an index upload (enforced by can_bypass). .scan(&self.clean.0, |next_active_index, op| { let active_index = *next_active_index; if let UploadOp::UploadMetadata { uploaded } = op { *next_active_index = uploaded; // stash index for next operation after this } Some((op, active_index)) }) // Check if the candidate can bypass all of them. .all(|(op, active_index)| candidate.can_bypass(op, active_index)) } /// Returns the number of in-progress deletion operations. #[cfg(test)] pub(crate) fn num_inprogress_deletions(&self) -> usize { self.inprogress_tasks .iter() .filter(|(_, t)| matches!(t.op, UploadOp::Delete(_))) .count() } /// Returns the number of in-progress layer uploads. #[cfg(test)] pub(crate) fn num_inprogress_layer_uploads(&self) -> usize { self.inprogress_tasks .iter() .filter(|(_, t)| matches!(t.op, UploadOp::UploadLayer(_, _, _))) .count() } /// Test helper that schedules all ready operations into inprogress_tasks, and returns /// references to them. /// /// TODO: the corresponding production logic should be moved from RemoteTimelineClient into /// UploadQueue, so we can use the same code path. #[cfg(test)] fn schedule_ready(&mut self) -> Vec<Arc<UploadTask>> { let mut tasks = Vec::new(); // NB: schedule operations one by one, to handle conflicts with inprogress_tasks. while let Some((op, coalesced_ops)) = self.next_ready() { self.task_counter += 1; let task = Arc::new(UploadTask { task_id: self.task_counter, op, coalesced_ops, retries: 0.into(), }); self.inprogress_tasks.insert(task.task_id, task.clone()); tasks.push(task); } tasks } /// Test helper that marks an operation as completed, removing it from inprogress_tasks. /// /// TODO: the corresponding production logic should be moved from RemoteTimelineClient into /// UploadQueue, so we can use the same code path. #[cfg(test)] fn complete(&mut self, task_id: u64) { let Some(task) = self.inprogress_tasks.remove(&task_id) else { return; }; // Update the clean index on uploads. if let UploadOp::UploadMetadata { ref uploaded } = task.op { if task.task_id > self.clean.1.unwrap_or_default() { self.clean = (*uploaded.clone(), Some(task.task_id)); } } } } #[derive(Clone, Copy)] pub(super) enum SetDeletedFlagProgress { NotRunning, InProgress(NaiveDateTime), Successful(NaiveDateTime), } pub struct UploadQueueStoppedDeletable { pub(super) upload_queue_for_deletion: UploadQueueInitialized, pub(super) deleted_at: SetDeletedFlagProgress, } #[allow(clippy::large_enum_variant, reason = "TODO")] pub enum UploadQueueStopped { Deletable(UploadQueueStoppedDeletable), Uninitialized, } #[derive(thiserror::Error, Debug)] pub enum NotInitialized { #[error("queue is in state Uninitialized")] Uninitialized, #[error("queue is in state Stopped")] Stopped, #[error("queue is shutting down")] ShuttingDown, } impl NotInitialized { pub(crate) fn is_stopping(&self) -> bool { use NotInitialized::*; match self { Uninitialized => false, Stopped => true, ShuttingDown => true, } } } impl UploadQueue { pub fn initialize_empty_remote( &mut self, metadata: &TimelineMetadata, inprogress_limit: usize, ) -> anyhow::Result<&mut UploadQueueInitialized> { match self { UploadQueue::Uninitialized => (), UploadQueue::Initialized(_) | UploadQueue::Stopped(_) => { anyhow::bail!("already initialized, state {}", self.as_str()) } } info!("initializing upload queue for empty remote"); let index_part = IndexPart::empty(metadata.clone()); let state = UploadQueueInitialized { inprogress_limit, dirty: index_part.clone(), clean: (index_part, None), latest_files_changes_since_metadata_upload_scheduled: 0, visible_remote_consistent_lsn: Arc::new(AtomicLsn::new(0)), // what follows are boring default initializations task_counter: 0, inprogress_tasks: HashMap::new(), queued_operations: VecDeque::new(), #[cfg(feature = "testing")] dangling_files: HashMap::new(), recently_deleted: HashSet::new(), blocked_deletions: Vec::new(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), }; *self = UploadQueue::Initialized(state); Ok(self.initialized_mut().expect("we just set it")) } pub fn initialize_with_current_remote_index_part( &mut self, index_part: &IndexPart, inprogress_limit: usize, ) -> anyhow::Result<&mut UploadQueueInitialized> { match self { UploadQueue::Uninitialized => (), UploadQueue::Initialized(_) | UploadQueue::Stopped(_) => { anyhow::bail!("already initialized, state {}", self.as_str()) } } info!( "initializing upload queue with remote index_part.disk_consistent_lsn: {}", index_part.metadata.disk_consistent_lsn() ); let state = UploadQueueInitialized { inprogress_limit, dirty: index_part.clone(), clean: (index_part.clone(), None), latest_files_changes_since_metadata_upload_scheduled: 0, visible_remote_consistent_lsn: Arc::new( index_part.metadata.disk_consistent_lsn().into(), ), // what follows are boring default initializations task_counter: 0, inprogress_tasks: HashMap::new(), queued_operations: VecDeque::new(), #[cfg(feature = "testing")] dangling_files: HashMap::new(), recently_deleted: HashSet::new(), blocked_deletions: Vec::new(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), }; *self = UploadQueue::Initialized(state); Ok(self.initialized_mut().expect("we just set it")) } pub fn initialized_mut(&mut self) -> Result<&mut UploadQueueInitialized, NotInitialized> { use UploadQueue::*; match self { Uninitialized => Err(NotInitialized::Uninitialized), Initialized(x) => { if x.shutting_down { Err(NotInitialized::ShuttingDown) } else { Ok(x) } } Stopped(_) => Err(NotInitialized::Stopped), } } pub(crate) fn stopped_mut(&mut self) -> anyhow::Result<&mut UploadQueueStoppedDeletable> { match self { UploadQueue::Initialized(_) | UploadQueue::Uninitialized => { anyhow::bail!("queue is in state {}", self.as_str()) } UploadQueue::Stopped(UploadQueueStopped::Uninitialized) => { anyhow::bail!("queue is in state Stopped(Uninitialized)") } UploadQueue::Stopped(UploadQueueStopped::Deletable(deletable)) => Ok(deletable), } } } /// An in-progress upload or delete task. #[derive(Debug)] pub struct UploadTask { /// Unique ID of this task. Used as the key in `inprogress_tasks` above. pub task_id: u64, /// Number of task retries. pub retries: AtomicU32, /// The upload operation. pub op: UploadOp, /// Any upload operations that were coalesced into this operation. This typically happens with /// back-to-back index uploads, see `UploadQueueInitialized::next_ready()`. pub coalesced_ops: Vec<UploadOp>, } /// A deletion of some layers within the lifetime of a timeline. This is not used /// for timeline deletion, which skips this queue and goes directly to DeletionQueue. #[derive(Debug, Clone)] pub struct Delete { pub layers: Vec<(LayerName, LayerFileMetadata)>, } #[derive(Clone, Debug)] pub enum UploadOp { /// Upload a layer file. The last field indicates the last operation for thie file. UploadLayer(ResidentLayer, LayerFileMetadata, Option<OpType>), /// Upload a index_part.json file UploadMetadata { /// The next [`UploadQueueInitialized::clean`] after this upload succeeds. uploaded: Box<IndexPart>, }, /// Delete layer files Delete(Delete), /// Barrier. When the barrier operation is reached, the channel is closed. Barrier(tokio::sync::watch::Sender<()>), /// Shutdown; upon encountering this operation no new operations will be spawned, otherwise /// this is the same as a Barrier. Shutdown, } impl std::fmt::Display for UploadOp { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { UploadOp::UploadLayer(layer, metadata, mode) => { write!( f, "UploadLayer({}, size={:?}, gen={:?}, mode={:?})", layer, metadata.file_size, metadata.generation, mode ) } UploadOp::UploadMetadata { uploaded, .. } => { write!( f, "UploadMetadata(lsn: {})", uploaded.metadata.disk_consistent_lsn() ) } UploadOp::Delete(delete) => { write!(f, "Delete({} layers)", delete.layers.len()) } UploadOp::Barrier(_) => write!(f, "Barrier"), UploadOp::Shutdown => write!(f, "Shutdown"), } } } impl UploadOp { /// Returns true if self can bypass other, i.e. if the operations don't conflict. index is the /// active index when other would be uploaded -- if we allow self to bypass other, this would /// be the active index when self is uploaded. pub fn can_bypass(&self, other: &UploadOp, index: &IndexPart) -> bool { match (self, other) { // Nothing can bypass a barrier or shutdown, and it can't bypass anything. (UploadOp::Barrier(_), _) | (_, UploadOp::Barrier(_)) => false, (UploadOp::Shutdown, _) | (_, UploadOp::Shutdown) => false, // Uploads and deletes can bypass each other unless they're for the same file. (UploadOp::UploadLayer(a, ameta, _), UploadOp::UploadLayer(b, bmeta, _)) => { let aname = &a.layer_desc().layer_name(); let bname = &b.layer_desc().layer_name(); !is_same_remote_layer_path(aname, ameta, bname, bmeta) } (UploadOp::UploadLayer(u, umeta, _), UploadOp::Delete(d)) | (UploadOp::Delete(d), UploadOp::UploadLayer(u, umeta, _)) => { d.layers.iter().all(|(dname, dmeta)| { !is_same_remote_layer_path(&u.layer_desc().layer_name(), umeta, dname, dmeta) }) } // Deletes are idempotent and can always bypass each other. (UploadOp::Delete(_), UploadOp::Delete(_)) => true, // Uploads and deletes can bypass an index upload as long as neither the uploaded index // nor the active index below it references the file. A layer can't be modified or // deleted while referenced by an index. // // Similarly, index uploads can bypass uploads and deletes as long as neither the // uploaded index nor the active index references the file (the latter would be // incorrect use by the caller). (UploadOp::UploadLayer(u, umeta, _), UploadOp::UploadMetadata { uploaded: i }) | (UploadOp::UploadMetadata { uploaded: i }, UploadOp::UploadLayer(u, umeta, _)) => { let uname = u.layer_desc().layer_name(); !i.references(&uname, umeta) && !index.references(&uname, umeta) } (UploadOp::Delete(d), UploadOp::UploadMetadata { uploaded: i }) | (UploadOp::UploadMetadata { uploaded: i }, UploadOp::Delete(d)) => { d.layers.iter().all(|(dname, dmeta)| { !i.references(dname, dmeta) && !index.references(dname, dmeta) }) } // Indexes can never bypass each other. They can coalesce though, and // `UploadQueue::next_ready()` currently does this when possible. (UploadOp::UploadMetadata { .. }, UploadOp::UploadMetadata { .. }) => false, } } } #[cfg(test)] mod tests { use std::str::FromStr as _; use itertools::Itertools as _; use utils::shard::{ShardCount, ShardIndex, ShardNumber}; use super::*; use crate::DEFAULT_PG_VERSION; use crate::tenant::Timeline; use crate::tenant::harness::{TIMELINE_ID, TenantHarness}; use crate::tenant::storage_layer::Layer; use crate::tenant::storage_layer::layer::local_layer_path; /// Test helper which asserts that two operations are the same, in lieu of UploadOp PartialEq. #[track_caller] fn assert_same_op(a: &UploadOp, b: &UploadOp) { use UploadOp::*; match (a, b) { (UploadLayer(a, ameta, atype), UploadLayer(b, bmeta, btype)) => { assert_eq!(a.layer_desc().layer_name(), b.layer_desc().layer_name()); assert_eq!(ameta, bmeta); assert_eq!(atype, btype); } (Delete(a), Delete(b)) => assert_eq!(a.layers, b.layers), (UploadMetadata { uploaded: a }, UploadMetadata { uploaded: b }) => assert_eq!(a, b), (Barrier(_), Barrier(_)) => {} (Shutdown, Shutdown) => {} (a, b) => panic!("{a:?} != {b:?}"), } } /// Test helper which asserts that two sets of operations are the same. #[track_caller] fn assert_same_ops<'a>( a: impl IntoIterator<Item = &'a UploadOp>, b: impl IntoIterator<Item = &'a UploadOp>, ) { a.into_iter() .zip_eq(b) .for_each(|(a, b)| assert_same_op(a, b)) } /// Test helper to construct a test timeline. /// /// TODO: it really shouldn't be necessary to construct an entire tenant and timeline just to /// test the upload queue -- decouple ResidentLayer from Timeline. /// /// TODO: the upload queue uses TimelineMetadata::example() instead, because there's no way to /// obtain a TimelineMetadata from a Timeline. fn make_timeline() -> Arc<Timeline> { // Grab the current test name from the current thread name. // TODO: TenantHarness shouldn't take a &'static str, but just leak the test name for now. let test_name = std::thread::current().name().unwrap().to_string(); let test_name = Box::leak(test_name.into_boxed_str()); let runtime = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .expect("failed to create runtime"); runtime .block_on(async { let harness = TenantHarness::create(test_name).await?; let (tenant, ctx) = harness.load().await; tenant .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx) .await }) .expect("failed to create timeline") } /// Test helper to construct an (empty) resident layer. fn make_layer(timeline: &Arc<Timeline>, name: &str) -> ResidentLayer { make_layer_with_size(timeline, name, 0) } /// Test helper to construct a resident layer with the given size. fn make_layer_with_size(timeline: &Arc<Timeline>, name: &str, size: usize) -> ResidentLayer { let metadata = LayerFileMetadata { generation: timeline.generation, shard: timeline.get_shard_index(), file_size: size as u64, }; make_layer_with_metadata(timeline, name, metadata) } /// Test helper to construct a layer with the given metadata. fn make_layer_with_metadata( timeline: &Arc<Timeline>, name: &str, metadata: LayerFileMetadata, ) -> ResidentLayer { let name = LayerName::from_str(name).expect("invalid name"); let local_path = local_layer_path( timeline.conf, &timeline.tenant_shard_id, &timeline.timeline_id, &name, &metadata.generation, ); std::fs::write(&local_path, vec![0; metadata.file_size as usize]) .expect("failed to write file"); Layer::for_resident(timeline.conf, timeline, local_path, name, metadata) } /// Test helper to add a layer to an index and return a new index. fn index_with(index: &IndexPart, layer: &ResidentLayer) -> Box<IndexPart> { let mut index = index.clone(); index .layer_metadata .insert(layer.layer_desc().layer_name(), layer.metadata()); Box::new(index) } /// Test helper to remove a layer from an index and return a new index. fn index_without(index: &IndexPart, layer: &ResidentLayer) -> Box<IndexPart> { let mut index = index.clone(); index .layer_metadata .remove(&layer.layer_desc().layer_name()); Box::new(index) } /// Nothing can bypass a barrier, and it can't bypass inprogress tasks. #[test] fn schedule_barrier() -> anyhow::Result<()> { let mut queue = UploadQueue::Uninitialized; let queue = queue.initialize_empty_remote(&TimelineMetadata::example(), 0)?; let tli = make_timeline(); let index = Box::new(queue.clean.0.clone()); // empty, doesn't matter let layer0 = make_layer( &tli, "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let layer1 = make_layer( &tli, "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let layer2 = make_layer( &tli, "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let layer3 = make_layer( &tli, "300000000000000000000000000000000000-400000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let (barrier, _) = tokio::sync::watch::channel(()); // Enqueue non-conflicting upload, delete, and index before and after a barrier. let ops = [ UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None), UploadOp::Delete(Delete { layers: vec![(layer1.layer_desc().layer_name(), layer1.metadata())], }), UploadOp::UploadMetadata { uploaded: index.clone(), }, UploadOp::Barrier(barrier), UploadOp::UploadLayer(layer2.clone(), layer2.metadata(), None), UploadOp::Delete(Delete { layers: vec![(layer3.layer_desc().layer_name(), layer3.metadata())], }), UploadOp::UploadMetadata { uploaded: index.clone(), }, ]; queue.queued_operations.extend(ops.clone()); // Schedule the initial operations ahead of the barrier. let tasks = queue.schedule_ready(); assert_same_ops(tasks.iter().map(|t| &t.op), &ops[0..3]); assert!(matches!( queue.queued_operations.front(), Some(&UploadOp::Barrier(_)) )); // Complete the initial operations. The barrier isn't scheduled while they're pending. for task in tasks { assert!(queue.schedule_ready().is_empty()); queue.complete(task.task_id); } // Schedule the barrier. The later tasks won't schedule until it completes. let tasks = queue.schedule_ready(); assert_eq!(tasks.len(), 1); assert!(matches!(tasks[0].op, UploadOp::Barrier(_))); assert_eq!(queue.queued_operations.len(), 3); // Complete the barrier. The rest of the tasks schedule immediately. queue.complete(tasks[0].task_id); let tasks = queue.schedule_ready(); assert_same_ops(tasks.iter().map(|t| &t.op), &ops[4..]); assert!(queue.queued_operations.is_empty()); Ok(()) } /// Deletes can be scheduled in parallel, even if they're for the same file. #[test] fn schedule_delete_parallel() -> anyhow::Result<()> { let mut queue = UploadQueue::Uninitialized; let queue = queue.initialize_empty_remote(&TimelineMetadata::example(), 0)?; let tli = make_timeline(); // Enqueue a bunch of deletes, some with conflicting names. let layer0 = make_layer( &tli, "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let layer1 = make_layer( &tli, "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let layer2 = make_layer( &tli, "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let layer3 = make_layer( &tli, "300000000000000000000000000000000000-400000000000000000000000000000000000__00000000016B59D8-00000000016B5A51", ); let ops = [ UploadOp::Delete(Delete { layers: vec![(layer0.layer_desc().layer_name(), layer0.metadata())], }), UploadOp::Delete(Delete { layers: vec![(layer1.layer_desc().layer_name(), layer1.metadata())], }), UploadOp::Delete(Delete { layers: vec![
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/disk_btree_test_data.rs
pageserver/src/tenant/disk_btree_test_data.rs
use hex_literal::hex; /// Test data set for the 'particular_data' test in disk_btree.rs /// /// This test contains a particular data set, representing all the keys /// generated by the 'test_random_updates' unit test. I extracted this while /// trying to debug a failure in that test. The bug turned out to be /// elsewhere, and I'm not sure if this is still useful, but keeping it for /// now... Maybe it's a useful data set to show the typical key-values used /// by a delta layer, for evaluating how well the prefix compression works. #[rustfmt::skip] pub static TEST_DATA: [([u8; 26], u64); 2000] = [ (hex!("0100000000333333334444444455000000000000000000000010"), 0x004001), (hex!("0100000000333333334444444455000000000000000000007cb0"), 0x0040a1), (hex!("0100000000333333334444444455000000010000000000000020"), 0x004141), (hex!("0100000000333333334444444455000000020000000000000030"), 0x0041e1), (hex!("01000000003333333344444444550000000200000000000051a0"), 0x004281), (hex!("0100000000333333334444444455000000030000000000000040"), 0x004321), (hex!("0100000000333333334444444455000000030000000000006cf0"), 0x0043c1), (hex!("0100000000333333334444444455000000030000000000007140"), 0x004461), (hex!("0100000000333333334444444455000000040000000000000050"), 0x004501), (hex!("01000000003333333344444444550000000400000000000047f0"), 0x0045a1), (hex!("01000000003333333344444444550000000400000000000072b0"), 0x004641), (hex!("0100000000333333334444444455000000050000000000000060"), 0x0046e1), (hex!("0100000000333333334444444455000000050000000000005550"), 0x004781), (hex!("0100000000333333334444444455000000060000000000000070"), 0x004821), (hex!("01000000003333333344444444550000000600000000000044a0"), 0x0048c1), (hex!("0100000000333333334444444455000000060000000000006870"), 0x004961), (hex!("0100000000333333334444444455000000070000000000000080"), 0x004a01), (hex!("0100000000333333334444444455000000080000000000000090"), 0x004aa1), (hex!("0100000000333333334444444455000000080000000000004150"), 0x004b41), (hex!("01000000003333333344444444550000000900000000000000a0"), 0x004be1), (hex!("01000000003333333344444444550000000a00000000000000b0"), 0x004c81), (hex!("01000000003333333344444444550000000a0000000000006680"), 0x004d21), (hex!("01000000003333333344444444550000000b00000000000000c0"), 0x004dc1), (hex!("01000000003333333344444444550000000b0000000000006230"), 0x004e61), (hex!("01000000003333333344444444550000000c00000000000000d0"), 0x004f01), (hex!("01000000003333333344444444550000000d00000000000000e0"), 0x004fa1), (hex!("01000000003333333344444444550000000e00000000000000f0"), 0x005041), (hex!("01000000003333333344444444550000000e0000000000006000"), 0x0050e1), (hex!("01000000003333333344444444550000000f0000000000000100"), 0x005181), (hex!("01000000003333333344444444550000000f00000000000053c0"), 0x005221), (hex!("01000000003333333344444444550000000f0000000000006580"), 0x0052c1), (hex!("0100000000333333334444444455000000100000000000000110"), 0x005361), (hex!("01000000003333333344444444550000001000000000000046c0"), 0x005401), (hex!("0100000000333333334444444455000000100000000000004e40"), 0x0054a1), (hex!("0100000000333333334444444455000000110000000000000120"), 0x005541), (hex!("0100000000333333334444444455000000120000000000000130"), 0x0055e1), (hex!("01000000003333333344444444550000001200000000000066d0"), 0x005681), (hex!("0100000000333333334444444455000000130000000000000140"), 0x005721), (hex!("0100000000333333334444444455000000130000000000007710"), 0x0057c1), (hex!("0100000000333333334444444455000000140000000000000150"), 0x005861), (hex!("0100000000333333334444444455000000140000000000006c40"), 0x005901), (hex!("0100000000333333334444444455000000150000000000000160"), 0x0059a1), (hex!("0100000000333333334444444455000000150000000000005990"), 0x005a41), (hex!("0100000000333333334444444455000000160000000000000170"), 0x005ae1), (hex!("0100000000333333334444444455000000160000000000005530"), 0x005b81), (hex!("0100000000333333334444444455000000170000000000000180"), 0x005c21), (hex!("0100000000333333334444444455000000170000000000004290"), 0x005cc1), (hex!("0100000000333333334444444455000000180000000000000190"), 0x005d61), (hex!("01000000003333333344444444550000001800000000000051c0"), 0x005e01), (hex!("01000000003333333344444444550000001900000000000001a0"), 0x005ea1), (hex!("0100000000333333334444444455000000190000000000005420"), 0x005f41), (hex!("0100000000333333334444444455000000190000000000005770"), 0x005fe1), (hex!("01000000003333333344444444550000001900000000000079d0"), 0x006081), (hex!("01000000003333333344444444550000001a00000000000001b0"), 0x006121), (hex!("01000000003333333344444444550000001a0000000000006f70"), 0x0061c1), (hex!("01000000003333333344444444550000001a0000000000007150"), 0x006261), (hex!("01000000003333333344444444550000001b00000000000001c0"), 0x006301), (hex!("01000000003333333344444444550000001b0000000000005070"), 0x0063a1), (hex!("01000000003333333344444444550000001c00000000000001d0"), 0x006441), (hex!("01000000003333333344444444550000001d00000000000001e0"), 0x0064e1), (hex!("01000000003333333344444444550000001e00000000000001f0"), 0x006581), (hex!("01000000003333333344444444550000001e0000000000005650"), 0x006621), (hex!("01000000003333333344444444550000001f0000000000000200"), 0x0066c1), (hex!("01000000003333333344444444550000001f0000000000006ca0"), 0x006761), (hex!("0100000000333333334444444455000000200000000000000210"), 0x006801), (hex!("0100000000333333334444444455000000200000000000005fc0"), 0x0068a1), (hex!("0100000000333333334444444455000000210000000000000220"), 0x006941), (hex!("0100000000333333334444444455000000210000000000006430"), 0x0069e1), (hex!("0100000000333333334444444455000000220000000000000230"), 0x006a81), (hex!("01000000003333333344444444550000002200000000000040e0"), 0x006b21), (hex!("0100000000333333334444444455000000230000000000000240"), 0x006bc1), (hex!("01000000003333333344444444550000002300000000000042d0"), 0x006c61), (hex!("0100000000333333334444444455000000240000000000000250"), 0x006d01), (hex!("0100000000333333334444444455000000250000000000000260"), 0x006da1), (hex!("01000000003333333344444444550000002500000000000058c0"), 0x006e41), (hex!("0100000000333333334444444455000000260000000000000270"), 0x006ee1), (hex!("0100000000333333334444444455000000260000000000004020"), 0x006f81), (hex!("0100000000333333334444444455000000270000000000000280"), 0x007021), (hex!("0100000000333333334444444455000000280000000000000290"), 0x0070c1), (hex!("0100000000333333334444444455000000280000000000007c00"), 0x007161), (hex!("01000000003333333344444444550000002900000000000002a0"), 0x007201), (hex!("01000000003333333344444444550000002a00000000000002b0"), 0x0072a1), (hex!("01000000003333333344444444550000002b00000000000002c0"), 0x007341), (hex!("01000000003333333344444444550000002c00000000000002d0"), 0x0073e1), (hex!("01000000003333333344444444550000002c00000000000041b0"), 0x007481), (hex!("01000000003333333344444444550000002c0000000000004c30"), 0x007521), (hex!("01000000003333333344444444550000002d00000000000002e0"), 0x0075c1), (hex!("01000000003333333344444444550000002d0000000000005e40"), 0x007661), (hex!("01000000003333333344444444550000002d0000000000006990"), 0x007701), (hex!("01000000003333333344444444550000002e00000000000002f0"), 0x0077a1), (hex!("01000000003333333344444444550000002f0000000000000300"), 0x007841), (hex!("01000000003333333344444444550000002f0000000000004a70"), 0x0078e1), (hex!("01000000003333333344444444550000002f0000000000006b40"), 0x007981), (hex!("0100000000333333334444444455000000300000000000000310"), 0x007a21), (hex!("0100000000333333334444444455000000310000000000000320"), 0x007ac1), (hex!("0100000000333333334444444455000000320000000000000330"), 0x007b61), (hex!("01000000003333333344444444550000003200000000000041a0"), 0x007c01), (hex!("0100000000333333334444444455000000320000000000007340"), 0x007ca1), (hex!("0100000000333333334444444455000000320000000000007730"), 0x007d41), (hex!("0100000000333333334444444455000000330000000000000340"), 0x007de1), (hex!("01000000003333333344444444550000003300000000000055a0"), 0x007e81), (hex!("0100000000333333334444444455000000340000000000000350"), 0x007f21), (hex!("0100000000333333334444444455000000350000000000000360"), 0x007fc1), (hex!("01000000003333333344444444550000003500000000000077a0"), 0x008061), (hex!("0100000000333333334444444455000000360000000000000370"), 0x008101), (hex!("0100000000333333334444444455000000370000000000000380"), 0x0081a1), (hex!("0100000000333333334444444455000000380000000000000390"), 0x008241), (hex!("01000000003333333344444444550000003900000000000003a0"), 0x0082e1), (hex!("01000000003333333344444444550000003a00000000000003b0"), 0x008381), (hex!("01000000003333333344444444550000003a00000000000071c0"), 0x008421), (hex!("01000000003333333344444444550000003b00000000000003c0"), 0x0084c1), (hex!("01000000003333333344444444550000003c00000000000003d0"), 0x008561), (hex!("01000000003333333344444444550000003d00000000000003e0"), 0x008601), (hex!("01000000003333333344444444550000003e00000000000003f0"), 0x0086a1), (hex!("01000000003333333344444444550000003e00000000000062e0"), 0x008741), (hex!("01000000003333333344444444550000003f0000000000000400"), 0x0087e1), (hex!("0100000000333333334444444455000000400000000000000410"), 0x008881), (hex!("0100000000333333334444444455000000400000000000004460"), 0x008921), (hex!("0100000000333333334444444455000000400000000000005b90"), 0x0089c1), (hex!("01000000003333333344444444550000004000000000000079b0"), 0x008a61), (hex!("0100000000333333334444444455000000410000000000000420"), 0x008b01), (hex!("0100000000333333334444444455000000420000000000000430"), 0x008ba1), (hex!("0100000000333333334444444455000000420000000000005640"), 0x008c41), (hex!("0100000000333333334444444455000000430000000000000440"), 0x008ce1), (hex!("01000000003333333344444444550000004300000000000072a0"), 0x008d81), (hex!("0100000000333333334444444455000000440000000000000450"), 0x008e21), (hex!("0100000000333333334444444455000000450000000000000460"), 0x008ec1), (hex!("0100000000333333334444444455000000450000000000005750"), 0x008f61), (hex!("01000000003333333344444444550000004500000000000077b0"), 0x009001), (hex!("0100000000333333334444444455000000460000000000000470"), 0x0090a1), (hex!("0100000000333333334444444455000000470000000000000480"), 0x009141), (hex!("0100000000333333334444444455000000480000000000000490"), 0x0091e1), (hex!("01000000003333333344444444550000004800000000000069e0"), 0x009281), (hex!("01000000003333333344444444550000004900000000000004a0"), 0x009321), (hex!("0100000000333333334444444455000000490000000000007370"), 0x0093c1), (hex!("01000000003333333344444444550000004a00000000000004b0"), 0x009461), (hex!("01000000003333333344444444550000004a0000000000005cb0"), 0x009501), (hex!("01000000003333333344444444550000004b00000000000004c0"), 0x0095a1), (hex!("01000000003333333344444444550000004c00000000000004d0"), 0x009641), (hex!("01000000003333333344444444550000004c0000000000004880"), 0x0096e1), (hex!("01000000003333333344444444550000004c0000000000007a40"), 0x009781), (hex!("01000000003333333344444444550000004d00000000000004e0"), 0x009821), (hex!("01000000003333333344444444550000004d0000000000006390"), 0x0098c1), (hex!("01000000003333333344444444550000004e00000000000004f0"), 0x009961), (hex!("01000000003333333344444444550000004e0000000000004db0"), 0x009a01), (hex!("01000000003333333344444444550000004f0000000000000500"), 0x009aa1), (hex!("0100000000333333334444444455000000500000000000000510"), 0x009b41), (hex!("0100000000333333334444444455000000510000000000000520"), 0x009be1), (hex!("01000000003333333344444444550000005100000000000069c0"), 0x009c81), (hex!("0100000000333333334444444455000000520000000000000530"), 0x009d21), (hex!("0100000000333333334444444455000000520000000000006e60"), 0x009dc1), (hex!("01000000003333333344444444550000005200000000000070c0"), 0x009e61), (hex!("0100000000333333334444444455000000530000000000000540"), 0x009f01), (hex!("0100000000333333334444444455000000530000000000005840"), 0x009fa1), (hex!("0100000000333333334444444455000000540000000000000550"), 0x00a041), (hex!("01000000003333333344444444550000005400000000000043e0"), 0x00a0e1), (hex!("01000000003333333344444444550000005400000000000074e0"), 0x00a181), (hex!("0100000000333333334444444455000000550000000000000560"), 0x00a221), (hex!("0100000000333333334444444455000000550000000000003ee0"), 0x00a2c1), (hex!("0100000000333333334444444455000000560000000000000570"), 0x00a361), (hex!("0100000000333333334444444455000000570000000000000580"), 0x00a401), (hex!("0100000000333333334444444455000000570000000000007030"), 0x00a4a1), (hex!("0100000000333333334444444455000000580000000000000590"), 0x00a541), (hex!("0100000000333333334444444455000000580000000000005340"), 0x00a5e1), (hex!("01000000003333333344444444550000005800000000000059f0"), 0x00a681), (hex!("0100000000333333334444444455000000580000000000006930"), 0x00a721), (hex!("01000000003333333344444444550000005900000000000005a0"), 0x00a7c1), (hex!("0100000000333333334444444455000000590000000000003f90"), 0x00a861), (hex!("01000000003333333344444444550000005a00000000000005b0"), 0x00a901), (hex!("01000000003333333344444444550000005b00000000000005c0"), 0x00a9a1), (hex!("01000000003333333344444444550000005b00000000000062c0"), 0x00aa41), (hex!("01000000003333333344444444550000005c00000000000005d0"), 0x00aae1), (hex!("01000000003333333344444444550000005c0000000000005a70"), 0x00ab81), (hex!("01000000003333333344444444550000005c0000000000005dd0"), 0x00ac21), (hex!("01000000003333333344444444550000005d00000000000005e0"), 0x00acc1), (hex!("01000000003333333344444444550000005d0000000000005730"), 0x00ad61), (hex!("01000000003333333344444444550000005e00000000000005f0"), 0x00ae01), (hex!("01000000003333333344444444550000005e0000000000004f40"), 0x00aea1), (hex!("01000000003333333344444444550000005f0000000000000600"), 0x00af41), (hex!("0100000000333333334444444455000000600000000000000610"), 0x00afe1), (hex!("0100000000333333334444444455000000600000000000007c40"), 0x00b081), (hex!("0100000000333333334444444455000000610000000000000620"), 0x00b121), (hex!("0100000000333333334444444455000000610000000000007860"), 0x00b1c1), (hex!("0100000000333333334444444455000000620000000000000630"), 0x00b261), (hex!("0100000000333333334444444455000000620000000000005050"), 0x00b301), (hex!("0100000000333333334444444455000000630000000000000640"), 0x00b3a1), (hex!("0100000000333333334444444455000000640000000000000650"), 0x00b441), (hex!("0100000000333333334444444455000000650000000000000660"), 0x00b4e1), (hex!("0100000000333333334444444455000000650000000000005330"), 0x00b581), (hex!("0100000000333333334444444455000000660000000000000670"), 0x00b621), (hex!("0100000000333333334444444455000000660000000000004e20"), 0x00b6c1), (hex!("0100000000333333334444444455000000660000000000005ee0"), 0x00b761), (hex!("0100000000333333334444444455000000660000000000006360"), 0x00b801), (hex!("0100000000333333334444444455000000670000000000000680"), 0x00b8a1), (hex!("0100000000333333334444444455000000670000000000004040"), 0x00b941), (hex!("0100000000333333334444444455000000680000000000000690"), 0x00b9e1), (hex!("0100000000333333334444444455000000680000000000003f80"), 0x00ba81), (hex!("01000000003333333344444444550000006800000000000041e0"), 0x00bb21), (hex!("01000000003333333344444444550000006900000000000006a0"), 0x00bbc1), (hex!("0100000000333333334444444455000000690000000000006080"), 0x00bc61), (hex!("01000000003333333344444444550000006a00000000000006b0"), 0x00bd01), (hex!("01000000003333333344444444550000006a00000000000042f0"), 0x00bda1), (hex!("01000000003333333344444444550000006b00000000000006c0"), 0x00be41), (hex!("01000000003333333344444444550000006b00000000000052f0"), 0x00bee1), (hex!("01000000003333333344444444550000006b0000000000005980"), 0x00bf81), (hex!("01000000003333333344444444550000006b0000000000006170"), 0x00c021), (hex!("01000000003333333344444444550000006c00000000000006d0"), 0x00c0c1), (hex!("01000000003333333344444444550000006d00000000000006e0"), 0x00c161), (hex!("01000000003333333344444444550000006d0000000000006fb0"), 0x00c201), (hex!("01000000003333333344444444550000006e00000000000006f0"), 0x00c2a1), (hex!("01000000003333333344444444550000006e00000000000065b0"), 0x00c341), (hex!("01000000003333333344444444550000006e0000000000007970"), 0x00c3e1), (hex!("01000000003333333344444444550000006f0000000000000700"), 0x00c481), (hex!("01000000003333333344444444550000006f0000000000005900"), 0x00c521), (hex!("01000000003333333344444444550000006f0000000000006d90"), 0x00c5c1), (hex!("0100000000333333334444444455000000700000000000000710"), 0x00c661), (hex!("01000000003333333344444444550000007000000000000045c0"), 0x00c701), (hex!("0100000000333333334444444455000000700000000000004d40"), 0x00c7a1), (hex!("0100000000333333334444444455000000710000000000000720"), 0x00c841), (hex!("0100000000333333334444444455000000710000000000004dc0"), 0x00c8e1), (hex!("0100000000333333334444444455000000710000000000007550"), 0x00c981), (hex!("0100000000333333334444444455000000720000000000000730"), 0x00ca21), (hex!("0100000000333333334444444455000000720000000000003ec0"), 0x00cac1), (hex!("01000000003333333344444444550000007200000000000045a0"), 0x00cb61), (hex!("0100000000333333334444444455000000720000000000006770"), 0x00cc01), (hex!("0100000000333333334444444455000000720000000000006bc0"), 0x00cca1), (hex!("0100000000333333334444444455000000730000000000000740"), 0x00cd41), (hex!("0100000000333333334444444455000000730000000000005250"), 0x00cde1), (hex!("01000000003333333344444444550000007300000000000075f0"), 0x00ce81), (hex!("0100000000333333334444444455000000740000000000000750"), 0x00cf21), (hex!("0100000000333333334444444455000000740000000000003ff0"), 0x00cfc1), (hex!("01000000003333333344444444550000007400000000000079e0"), 0x00d061), (hex!("0100000000333333334444444455000000750000000000000760"), 0x00d101), (hex!("0100000000333333334444444455000000750000000000004310"), 0x00d1a1), (hex!("0100000000333333334444444455000000760000000000000770"), 0x00d241), (hex!("0100000000333333334444444455000000770000000000000780"), 0x00d2e1), (hex!("01000000003333333344444444550000007700000000000062f0"), 0x00d381), (hex!("0100000000333333334444444455000000770000000000006940"), 0x00d421), (hex!("0100000000333333334444444455000000780000000000000790"), 0x00d4c1), (hex!("01000000003333333344444444550000007900000000000007a0"), 0x00d561), (hex!("0100000000333333334444444455000000790000000000007af0"), 0x00d601), (hex!("01000000003333333344444444550000007a00000000000007b0"), 0x00d6a1), (hex!("01000000003333333344444444550000007b00000000000007c0"), 0x00d741), (hex!("01000000003333333344444444550000007b00000000000067e0"), 0x00d7e1), (hex!("01000000003333333344444444550000007b0000000000007890"), 0x00d881), (hex!("01000000003333333344444444550000007c00000000000007d0"), 0x00d921), (hex!("01000000003333333344444444550000007d00000000000007e0"), 0x00d9c1), (hex!("01000000003333333344444444550000007e00000000000007f0"), 0x00da61), (hex!("01000000003333333344444444550000007f0000000000000800"), 0x00db01), (hex!("01000000003333333344444444550000007f0000000000005be0"), 0x00dba1), (hex!("0100000000333333334444444455000000800000000000000810"), 0x00dc41), (hex!("0100000000333333334444444455000000810000000000000820"), 0x00dce1), (hex!("0100000000333333334444444455000000810000000000007190"), 0x00dd81), (hex!("0100000000333333334444444455000000820000000000000830"), 0x00de21), (hex!("0100000000333333334444444455000000820000000000004ab0"), 0x00dec1), (hex!("0100000000333333334444444455000000830000000000000840"), 0x00df61), (hex!("0100000000333333334444444455000000830000000000006720"), 0x00e001), (hex!("0100000000333333334444444455000000840000000000000850"), 0x00e0a1), (hex!("0100000000333333334444444455000000850000000000000860"), 0x00e141), (hex!("01000000003333333344444444550000008500000000000054f0"), 0x00e1e1), (hex!("0100000000333333334444444455000000850000000000007920"), 0x00e281), (hex!("0100000000333333334444444455000000860000000000000870"), 0x00e321), (hex!("01000000003333333344444444550000008600000000000060e0"), 0x00e3c1), (hex!("0100000000333333334444444455000000860000000000006be0"), 0x00e461), (hex!("0100000000333333334444444455000000870000000000000880"), 0x00e501), (hex!("0100000000333333334444444455000000870000000000006820"), 0x00e5a1), (hex!("0100000000333333334444444455000000880000000000000890"), 0x00e641), (hex!("01000000003333333344444444550000008900000000000008a0"), 0x00e6e1), (hex!("0100000000333333334444444455000000890000000000007c30"), 0x00e781), (hex!("01000000003333333344444444550000008a00000000000008b0"), 0x00e821), (hex!("01000000003333333344444444550000008b00000000000008c0"), 0x00e8c1), (hex!("01000000003333333344444444550000008b0000000000005910"), 0x00e961), (hex!("01000000003333333344444444550000008b0000000000006fe0"), 0x00ea01), (hex!("01000000003333333344444444550000008c00000000000008d0"), 0x00eaa1), (hex!("01000000003333333344444444550000008c0000000000006800"), 0x00eb41), (hex!("01000000003333333344444444550000008d00000000000008e0"), 0x00ebe1), (hex!("01000000003333333344444444550000008d0000000000005810"), 0x00ec81), (hex!("01000000003333333344444444550000008d0000000000007c90"), 0x00ed21), (hex!("01000000003333333344444444550000008e00000000000008f0"), 0x00edc1), (hex!("01000000003333333344444444550000008e00000000000058f0"), 0x00ee61), (hex!("01000000003333333344444444550000008f0000000000000900"), 0x00ef01), (hex!("01000000003333333344444444550000008f0000000000005a30"), 0x00efa1), (hex!("0100000000333333334444444455000000900000000000000910"), 0x00f041), (hex!("0100000000333333334444444455000000900000000000006130"), 0x00f0e1), (hex!("0100000000333333334444444455000000900000000000006550"), 0x00f181), (hex!("0100000000333333334444444455000000910000000000000920"), 0x00f221), (hex!("01000000003333333344444444550000009100000000000079f0"), 0x00f2c1), (hex!("0100000000333333334444444455000000920000000000000930"), 0x00f361), (hex!("0100000000333333334444444455000000920000000000005620"), 0x00f401), (hex!("0100000000333333334444444455000000920000000000005e90"), 0x00f4a1), (hex!("01000000003333333344444444550000009200000000000063d0"), 0x00f541), (hex!("01000000003333333344444444550000009200000000000076c0"), 0x00f5e1), (hex!("0100000000333333334444444455000000930000000000000940"), 0x00f681), (hex!("01000000003333333344444444550000009300000000000044e0"), 0x00f721), (hex!("0100000000333333334444444455000000940000000000000950"), 0x00f7c1), (hex!("0100000000333333334444444455000000940000000000007a30"), 0x00f861), (hex!("0100000000333333334444444455000000950000000000000960"), 0x00f901), (hex!("0100000000333333334444444455000000950000000000007a70"), 0x00f9a1), (hex!("0100000000333333334444444455000000960000000000000970"), 0x00fa41), (hex!("0100000000333333334444444455000000970000000000000980"), 0x00fae1), (hex!("0100000000333333334444444455000000970000000000007330"), 0x00fb81), (hex!("0100000000333333334444444455000000980000000000000990"), 0x00fc21), (hex!("0100000000333333334444444455000000980000000000005af0"), 0x00fcc1), (hex!("0100000000333333334444444455000000980000000000007ae0"), 0x00fd61), (hex!("01000000003333333344444444550000009900000000000009a0"), 0x00fe01), (hex!("0100000000333333334444444455000000990000000000005160"), 0x00fea1), (hex!("0100000000333333334444444455000000990000000000006850"), 0x00ff41), (hex!("01000000003333333344444444550000009a00000000000009b0"), 0x00ffe1), (hex!("01000000003333333344444444550000009b00000000000009c0"), 0x010081), (hex!("01000000003333333344444444550000009b0000000000005010"), 0x010121), (hex!("01000000003333333344444444550000009c00000000000009d0"), 0x0101c1), (hex!("01000000003333333344444444550000009c00000000000042e0"), 0x010261), (hex!("01000000003333333344444444550000009d00000000000009e0"), 0x010301), (hex!("01000000003333333344444444550000009d00000000000057f0"), 0x0103a1), (hex!("01000000003333333344444444550000009e00000000000009f0"), 0x010441), (hex!("01000000003333333344444444550000009e0000000000004ef0"), 0x0104e1), (hex!("01000000003333333344444444550000009f0000000000000a00"), 0x010581), (hex!("01000000003333333344444444550000009f0000000000006110"), 0x010621), (hex!("0100000000333333334444444455000000a00000000000000a10"), 0x0106c1), (hex!("0100000000333333334444444455000000a10000000000000a20"), 0x010761), (hex!("0100000000333333334444444455000000a100000000000040d0"), 0x010801), (hex!("0100000000333333334444444455000000a10000000000007670"), 0x0108a1), (hex!("0100000000333333334444444455000000a20000000000000a30"), 0x010941), (hex!("0100000000333333334444444455000000a200000000000074d0"), 0x0109e1), (hex!("0100000000333333334444444455000000a30000000000000a40"), 0x010a81), (hex!("0100000000333333334444444455000000a30000000000004c90"), 0x010b21), (hex!("0100000000333333334444444455000000a40000000000000a50"), 0x010bc1), (hex!("0100000000333333334444444455000000a50000000000000a60"), 0x010c61), (hex!("0100000000333333334444444455000000a60000000000000a70"), 0x010d01), (hex!("0100000000333333334444444455000000a60000000000006d80"), 0x010da1), (hex!("0100000000333333334444444455000000a60000000000007830"), 0x010e41), (hex!("0100000000333333334444444455000000a70000000000000a80"), 0x010ee1), (hex!("0100000000333333334444444455000000a700000000000064f0"), 0x010f81), (hex!("0100000000333333334444444455000000a80000000000000a90"), 0x011021), (hex!("0100000000333333334444444455000000a90000000000000aa0"), 0x0110c1), (hex!("0100000000333333334444444455000000a90000000000005e30"), 0x011161), (hex!("0100000000333333334444444455000000aa0000000000000ab0"), 0x011201), (hex!("0100000000333333334444444455000000ab0000000000000ac0"), 0x0112a1), (hex!("0100000000333333334444444455000000ac0000000000000ad0"), 0x011341), (hex!("0100000000333333334444444455000000ac0000000000006d20"), 0x0113e1), (hex!("0100000000333333334444444455000000ac0000000000007000"), 0x011481), (hex!("0100000000333333334444444455000000ad0000000000000ae0"), 0x011521), (hex!("0100000000333333334444444455000000ae0000000000000af0"), 0x0115c1), (hex!("0100000000333333334444444455000000ae0000000000004a10"), 0x011661), (hex!("0100000000333333334444444455000000af0000000000000b00"), 0x011701), (hex!("0100000000333333334444444455000000af0000000000004e10"), 0x0117a1), (hex!("0100000000333333334444444455000000b00000000000000b10"), 0x011841), (hex!("0100000000333333334444444455000000b00000000000004280"), 0x0118e1), (hex!("0100000000333333334444444455000000b000000000000077e0"), 0x011981), (hex!("0100000000333333334444444455000000b10000000000000b20"), 0x011a21), (hex!("0100000000333333334444444455000000b20000000000000b30"), 0x011ac1), (hex!("0100000000333333334444444455000000b30000000000000b40"), 0x011b61), (hex!("0100000000333333334444444455000000b30000000000004bc0"), 0x011c01), (hex!("0100000000333333334444444455000000b40000000000000b50"), 0x011ca1), (hex!("0100000000333333334444444455000000b50000000000000b60"), 0x011d41), (hex!("0100000000333333334444444455000000b50000000000004fa0"), 0x011de1), (hex!("0100000000333333334444444455000000b50000000000006a60"), 0x011e81), (hex!("0100000000333333334444444455000000b60000000000000b70"), 0x011f21), (hex!("0100000000333333334444444455000000b60000000000005630"), 0x011fc1), (hex!("0100000000333333334444444455000000b70000000000000b80"), 0x012061), (hex!("0100000000333333334444444455000000b80000000000000b90"), 0x012101), (hex!("0100000000333333334444444455000000b80000000000006f80"), 0x0121a1), (hex!("0100000000333333334444444455000000b90000000000000ba0"), 0x012241), (hex!("0100000000333333334444444455000000ba0000000000000bb0"), 0x0122e1), (hex!("0100000000333333334444444455000000bb0000000000000bc0"), 0x012381), (hex!("0100000000333333334444444455000000bb00000000000047c0"), 0x012421), (hex!("0100000000333333334444444455000000bb0000000000006060"), 0x0124c1), (hex!("0100000000333333334444444455000000bc0000000000000bd0"), 0x012561), (hex!("0100000000333333334444444455000000bd0000000000000be0"), 0x012601), (hex!("0100000000333333334444444455000000bd0000000000004e80"), 0x0126a1), (hex!("0100000000333333334444444455000000be0000000000000bf0"), 0x012741), (hex!("0100000000333333334444444455000000bf0000000000000c00"), 0x0127e1), (hex!("0100000000333333334444444455000000bf00000000000047a0"), 0x012881), (hex!("0100000000333333334444444455000000bf0000000000006da0"), 0x012921), (hex!("0100000000333333334444444455000000c00000000000000c10"), 0x0129c1), (hex!("0100000000333333334444444455000000c10000000000000c20"), 0x012a61), (hex!("0100000000333333334444444455000000c20000000000000c30"), 0x012b01), (hex!("0100000000333333334444444455000000c20000000000004bd0"), 0x012ba1), (hex!("0100000000333333334444444455000000c20000000000006ac0"), 0x012c41), (hex!("0100000000333333334444444455000000c30000000000000c40"), 0x012ce1), (hex!("0100000000333333334444444455000000c30000000000004660"), 0x012d81), (hex!("0100000000333333334444444455000000c40000000000000c50"), 0x012e21), (hex!("0100000000333333334444444455000000c50000000000000c60"), 0x012ec1), (hex!("0100000000333333334444444455000000c60000000000000c70"), 0x012f61), (hex!("0100000000333333334444444455000000c60000000000005880"), 0x013001), (hex!("0100000000333333334444444455000000c60000000000006b70"), 0x0130a1), (hex!("0100000000333333334444444455000000c70000000000000c80"), 0x013141), (hex!("0100000000333333334444444455000000c80000000000000c90"), 0x0131e1), (hex!("0100000000333333334444444455000000c80000000000005310"), 0x013281), (hex!("0100000000333333334444444455000000c80000000000005db0"), 0x013321), (hex!("0100000000333333334444444455000000c80000000000007040"), 0x0133c1), (hex!("0100000000333333334444444455000000c80000000000007290"), 0x013461), (hex!("0100000000333333334444444455000000c90000000000000ca0"), 0x013501), (hex!("0100000000333333334444444455000000c90000000000004fe0"), 0x0135a1), (hex!("0100000000333333334444444455000000ca0000000000000cb0"), 0x013641), (hex!("0100000000333333334444444455000000ca0000000000006140"), 0x0136e1), (hex!("0100000000333333334444444455000000ca0000000000007700"), 0x013781), (hex!("0100000000333333334444444455000000cb0000000000000cc0"), 0x013821), (hex!("0100000000333333334444444455000000cc0000000000000cd0"), 0x0138c1), (hex!("0100000000333333334444444455000000cd0000000000000ce0"), 0x013961), (hex!("0100000000333333334444444455000000cd0000000000003f20"), 0x013a01), (hex!("0100000000333333334444444455000000cd00000000000040f0"), 0x013aa1), (hex!("0100000000333333334444444455000000cd0000000000004ec0"), 0x013b41), (hex!("0100000000333333334444444455000000ce0000000000000cf0"), 0x013be1), (hex!("0100000000333333334444444455000000ce0000000000007200"), 0x013c81), (hex!("0100000000333333334444444455000000cf0000000000000d00"), 0x013d21), (hex!("0100000000333333334444444455000000cf00000000000046a0"), 0x013dc1), (hex!("0100000000333333334444444455000000cf0000000000005960"), 0x013e61), (hex!("0100000000333333334444444455000000d00000000000000d10"), 0x013f01), (hex!("0100000000333333334444444455000000d00000000000005f30"), 0x013fa1), (hex!("0100000000333333334444444455000000d10000000000000d20"), 0x014041), (hex!("0100000000333333334444444455000000d10000000000007a00"), 0x0140e1),
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/checks.rs
pageserver/src/tenant/checks.rs
use std::collections::BTreeSet; use itertools::Itertools; use pageserver_compaction::helpers::overlaps_with; use super::storage_layer::LayerName; /// Checks whether a layer map is valid (i.e., is a valid result of the current compaction algorithm if nothing goes wrong). /// /// The function implements a fast path check and a slow path check. /// /// The fast path checks if we can split the LSN range of a delta layer only at the LSNs of the delta layers. For example, /// /// ```plain /// | | | | /// | 1 | | 2 | | 3 | /// | | | | | | /// ``` /// /// This is not a valid layer map because the LSN range of layer 1 intersects with the LSN range of layer 2. 1 and 2 should have /// the same LSN range. /// /// The exception is that when layer 2 only contains a single key, it could be split over the LSN range. For example, /// /// ```plain /// | | | 2 | | | /// | 1 | |-------| | 3 | /// | | | 4 | | | /// /// If layer 2 and 4 contain the same single key, this is also a valid layer map. /// /// However, if a partial compaction is still going on, it is possible that we get a layer map not satisfying the above condition. /// Therefore, we fallback to simply check if any of the two delta layers overlap. (See "A slow path...") pub fn check_valid_layermap(metadata: &[LayerName]) -> Option<String> { let mut lsn_split_point = BTreeSet::new(); // TODO: use a better data structure (range tree / range set?) let mut all_delta_layers = Vec::new(); for name in metadata { if let LayerName::Delta(layer) = name { all_delta_layers.push(layer.clone()); } } for layer in &all_delta_layers { if layer.key_range.start.next() != layer.key_range.end { let lsn_range = &layer.lsn_range; lsn_split_point.insert(lsn_range.start); lsn_split_point.insert(lsn_range.end); } } for (idx, layer) in all_delta_layers.iter().enumerate() { if layer.key_range.start.next() == layer.key_range.end { continue; } let lsn_range = layer.lsn_range.clone(); let intersects = lsn_split_point.range(lsn_range).collect_vec(); if intersects.len() > 1 { // A slow path to check if the layer intersects with any other delta layer. for (other_idx, other_layer) in all_delta_layers.iter().enumerate() { if other_idx == idx { // do not check self intersects with self continue; } if overlaps_with(&layer.lsn_range, &other_layer.lsn_range) && overlaps_with(&layer.key_range, &other_layer.key_range) { let err = format!( "layer violates the layer map LSN split assumption: layer {layer} intersects with layer {other_layer}" ); return Some(err); } } } } None }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/debug.rs
pageserver/src/tenant/debug.rs
use std::{ops::Range, str::FromStr, sync::Arc}; use crate::walredo::RedoAttemptType; use base64::{Engine as _, engine::general_purpose::STANDARD}; use bytes::{Bytes, BytesMut}; use camino::Utf8PathBuf; use clap::Parser; use itertools::Itertools; use pageserver_api::{ key::Key, keyspace::KeySpace, shard::{ShardIdentity, ShardStripeSize}, }; use postgres_ffi::PgMajorVersion; use postgres_ffi::{BLCKSZ, page_is_new, page_set_lsn}; use tracing::Instrument; use utils::{ generation::Generation, id::{TenantId, TimelineId}, lsn::Lsn, shard::{ShardCount, ShardIndex, ShardNumber}, }; use wal_decoder::models::record::NeonWalRecord; use crate::{ context::{DownloadBehavior, RequestContext}, task_mgr::TaskKind, tenant::storage_layer::ValueReconstructState, walredo::harness::RedoHarness, }; use super::{ WalRedoManager, WalredoManagerId, harness::TenantHarness, remote_timeline_client::LayerFileMetadata, storage_layer::{AsLayerDesc, IoConcurrency, Layer, LayerName, ValuesReconstructState}, }; fn process_page_image(next_record_lsn: Lsn, is_fpw: bool, img_bytes: Bytes) -> Bytes { // To match the logic in libs/wal_decoder/src/serialized_batch.rs let mut new_image: BytesMut = img_bytes.into(); if is_fpw && !page_is_new(&new_image) { page_set_lsn(&mut new_image, next_record_lsn); } assert_eq!(new_image.len(), BLCKSZ as usize); new_image.freeze() } async fn redo_wals(input: &str, key: Key) -> anyhow::Result<()> { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let redo_harness = RedoHarness::new()?; let span = redo_harness.span(); let tenant_conf = pageserver_api::models::TenantConfig { ..Default::default() }; let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error); let tenant = TenantHarness::create_custom( "search_key", tenant_conf, tenant_id, ShardIdentity::unsharded(), Generation::new(1), ) .await? .do_try_load_with_redo( Arc::new(WalRedoManager::Prod( WalredoManagerId::next(), redo_harness.manager, )), &ctx, ) .await .unwrap(); let timeline = tenant .create_test_timeline(timeline_id, Lsn(0x10), PgMajorVersion::PG16, &ctx) .await?; let contents = tokio::fs::read_to_string(input) .await .map_err(|e| anyhow::Error::msg(format!("Failed to read input file {input}: {e}"))) .unwrap(); let lines = contents.lines(); let mut last_wal_lsn: Option<Lsn> = None; let state = { let mut state = ValueReconstructState::default(); let mut is_fpw = false; let mut is_first_line = true; for line in lines { if is_first_line { is_first_line = false; if line.trim() == "FPW" { is_fpw = true; } continue; // Skip the first line. } // Each input line is in the "<next_record_lsn>,<base64>" format. let (lsn_str, payload_b64) = line .split_once(',') .expect("Invalid input format: expected '<lsn>,<base64>'"); // Parse the LSN and decode the payload. let lsn = Lsn::from_str(lsn_str.trim()).expect("Invalid LSN format"); let bytes = Bytes::from( STANDARD .decode(payload_b64.trim()) .expect("Invalid base64 payload"), ); // The first line is considered the base image, the rest are WAL records. if state.img.is_none() { state.img = Some((lsn, process_page_image(lsn, is_fpw, bytes))); } else { let wal_record = NeonWalRecord::Postgres { will_init: false, rec: bytes, }; state.records.push((lsn, wal_record)); last_wal_lsn.replace(lsn); } } state }; assert!(state.img.is_some(), "No base image found"); assert!(!state.records.is_empty(), "No WAL records found"); let result = timeline .reconstruct_value(key, last_wal_lsn.unwrap(), state, RedoAttemptType::ReadPage) .instrument(span.clone()) .await?; eprintln!("final image: {:?}", STANDARD.encode(result)); Ok(()) } async fn search_key( tenant_id: TenantId, timeline_id: TimelineId, dir: String, key: Key, lsn: Lsn, ) -> anyhow::Result<()> { let shard_index = ShardIndex { shard_number: ShardNumber(0), shard_count: ShardCount(4), }; let redo_harness = RedoHarness::new()?; let span = redo_harness.span(); let tenant_conf = pageserver_api::models::TenantConfig { ..Default::default() }; let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error); let tenant = TenantHarness::create_custom( "search_key", tenant_conf, tenant_id, ShardIdentity::new( shard_index.shard_number, shard_index.shard_count, ShardStripeSize(32768), ) .unwrap(), Generation::new(1), ) .await? .do_try_load_with_redo( Arc::new(WalRedoManager::Prod( WalredoManagerId::next(), redo_harness.manager, )), &ctx, ) .await .unwrap(); let timeline = tenant .create_test_timeline(timeline_id, Lsn(0x10), PgMajorVersion::PG16, &ctx) .await?; let mut delta_layers: Vec<Layer> = Vec::new(); let mut img_layer: Option<Layer> = Option::None; let mut dir = tokio::fs::read_dir(dir).await?; loop { let entry = dir.next_entry().await?; if entry.is_none() || !entry.as_ref().unwrap().file_type().await?.is_file() { break; } let path = Utf8PathBuf::from_path_buf(entry.unwrap().path()).unwrap(); let layer_name = match LayerName::from_str(path.file_name().unwrap()) { Ok(name) => name, Err(_) => { eprintln!("Skipped invalid layer: {path}"); continue; } }; let layer = Layer::for_resident( tenant.conf, &timeline, path.clone(), layer_name, LayerFileMetadata::new( tokio::fs::metadata(path.clone()).await?.len(), Generation::new(1), shard_index, ), ); if layer.layer_desc().is_delta() { delta_layers.push(layer.into()); } else if img_layer.is_none() { img_layer = Some(layer.into()); } else { anyhow::bail!("Found multiple image layers"); } } // sort delta layers based on the descending order of LSN delta_layers.sort_by(|a, b| { b.layer_desc() .get_lsn_range() .start .cmp(&a.layer_desc().get_lsn_range().start) }); let mut state = ValuesReconstructState::new(IoConcurrency::Sequential); let key_space = KeySpace::single(Range { start: key, end: key.next(), }); let lsn_range = Range { start: img_layer .as_ref() .map_or(Lsn(0x00), |img| img.layer_desc().image_layer_lsn()), end: lsn, }; for delta_layer in delta_layers.iter() { delta_layer .get_values_reconstruct_data(key_space.clone(), lsn_range.clone(), &mut state, &ctx) .await?; } img_layer .as_ref() .unwrap() .get_values_reconstruct_data(key_space.clone(), lsn_range.clone(), &mut state, &ctx) .await?; for (_key, result) in std::mem::take(&mut state.keys) { let state = result.collect_pending_ios().await?; if state.img.is_some() { eprintln!( "image: {}: {:x?}", state.img.as_ref().unwrap().0, STANDARD.encode(state.img.as_ref().unwrap().1.clone()) ); } for delta in state.records.iter() { match &delta.1 { NeonWalRecord::Postgres { will_init, rec } => { eprintln!( "delta: {}: will_init: {}, {:x?}", delta.0, will_init, STANDARD.encode(rec) ); } _ => { eprintln!("delta: {}: {:x?}", delta.0, delta.1); } } } let result = timeline .reconstruct_value(key, lsn_range.end, state, RedoAttemptType::ReadPage) .instrument(span.clone()) .await?; eprintln!("final image: {lsn} : {result:?}"); } Ok(()) } /// Redo all WALs against the base image in the input file. Return the base64 encoded final image. /// Each line in the input file must be in the form "<lsn>,<base64>" where: /// * `<lsn>` is a PostgreSQL LSN in hexadecimal notation, e.g. `0/16ABCDE`. /// * `<base64>` is the base64‐encoded page image (first line) or WAL record (subsequent lines). /// /// The first line provides the base image of a page. The LSN is the LSN of "next record" following /// the record containing the FPI. For example, if the FPI was extracted from a WAL record occuping /// [0/1, 0/200) in the WAL stream, the LSN appearing along side the page image here should be 0/200. /// /// The subsequent lines are WAL records, ordered from the oldest to the newest. The LSN is the /// record LSN of the WAL record, not the "next record" LSN. For example, if the WAL record here /// occupies [0/1, 0/200) in the WAL stream, the LSN appearing along side the WAL record here should /// be 0/1. #[derive(Parser)] struct RedoWalsCmd { #[clap(long)] input: String, #[clap(long)] key: String, } #[tokio::test] async fn test_redo_wals() -> anyhow::Result<()> { let args = std::env::args().collect_vec(); let pos = args .iter() .position(|arg| arg == "--") .unwrap_or(args.len()); let slice = &args[pos..args.len()]; let cmd = match RedoWalsCmd::try_parse_from(slice) { Ok(cmd) => cmd, Err(err) => { eprintln!("{err}"); return Ok(()); } }; let key = Key::from_hex(&cmd.key).unwrap(); redo_wals(&cmd.input, key).await?; Ok(()) } /// Search for a page at the given LSN in all layers of the data_dir. /// Return the base64-encoded image and all WAL records, as well as the final reconstructed image. #[derive(Parser)] struct SearchKeyCmd { #[clap(long)] tenant_id: String, #[clap(long)] timeline_id: String, #[clap(long)] data_dir: String, #[clap(long)] key: String, #[clap(long)] lsn: String, } #[tokio::test] async fn test_search_key() -> anyhow::Result<()> { let args = std::env::args().collect_vec(); let pos = args .iter() .position(|arg| arg == "--") .unwrap_or(args.len()); let slice = &args[pos..args.len()]; let cmd = match SearchKeyCmd::try_parse_from(slice) { Ok(cmd) => cmd, Err(err) => { eprintln!("{err}"); return Ok(()); } }; let tenant_id = TenantId::from_str(&cmd.tenant_id).unwrap(); let timeline_id = TimelineId::from_str(&cmd.timeline_id).unwrap(); let key = Key::from_hex(&cmd.key).unwrap(); let lsn = Lsn::from_str(&cmd.lsn).unwrap(); search_key(tenant_id, timeline_id, cmd.data_dir, key, lsn).await?; Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/layer_map.rs
pageserver/src/tenant/layer_map.rs
//! //! The layer map tracks what layers exist in a timeline. //! //! When the timeline is first accessed, the server lists of all layer files //! in the timelines/<timeline_id> directory, and populates this map with //! ImageLayer and DeltaLayer structs corresponding to each file. When the first //! new WAL record is received, we create an InMemoryLayer to hold the incoming //! records. Now and then, in the checkpoint() function, the in-memory layer is //! are frozen, and it is split up into new image and delta layers and the //! corresponding files are written to disk. //! //! Design overview: //! //! The `search` method of the layer map is on the read critical path, so we've //! built an efficient data structure for fast reads, stored in `LayerMap::historic`. //! Other read methods are less critical but still impact performance of background tasks. //! //! This data structure relies on a persistent/immutable binary search tree. See the //! following lecture for an introduction <https://www.youtube.com/watch?v=WqCWghETNDc&t=581s> //! Summary: A persistent/immutable BST (and persistent data structures in general) allows //! you to modify the tree in such a way that each modification creates a new "version" //! of the tree. When you modify it, you get a new version, but all previous versions are //! still accessible too. So if someone is still holding a reference to an older version, //! they continue to see the tree as it was then. The persistent BST stores all the //! different versions in an efficient way. //! //! Our persistent BST maintains a map of which layer file "covers" each key. It has only //! one dimension, the key. See `layer_coverage.rs`. We use the persistent/immutable property //! to handle the LSN dimension. //! //! To build the layer map, we insert each layer to the persistent BST in LSN.start order, //! starting from the oldest one. After each insertion, we grab a reference to that "version" //! of the tree, and store it in another tree, a BtreeMap keyed by the LSN. See //! `historic_layer_coverage.rs`. //! //! To search for a particular key-LSN pair, you first look up the right "version" in the //! BTreeMap. Then you search that version of the BST with the key. //! //! The persistent BST keeps all the versions, but there is no way to change the old versions //! afterwards. We can add layers as long as they have larger LSNs than any previous layer in //! the map, but if we need to remove a layer, or insert anything with an older LSN, we need //! to throw away most of the persistent BST and build a new one, starting from the oldest //! LSN. See [`LayerMap::flush_updates()`]. //! mod historic_layer_coverage; mod layer_coverage; use std::collections::{BTreeMap, HashMap, VecDeque}; use std::iter::Peekable; use std::ops::Range; use std::sync::Arc; use std::time::Instant; use anyhow::Result; use historic_layer_coverage::BufferedHistoricLayerCoverage; pub use historic_layer_coverage::LayerKey; use pageserver_api::key::Key; use pageserver_api::keyspace::{KeySpace, KeySpaceAccum}; use range_set_blaze::{CheckSortedDisjoint, RangeSetBlaze}; use tokio::sync::watch; use utils::lsn::Lsn; use super::storage_layer::{LayerVisibilityHint, PersistentLayerDesc}; use crate::context::RequestContext; use crate::tenant::storage_layer::{InMemoryLayer, ReadableLayerWeak}; /// /// LayerMap tracks what layers exist on a timeline. /// pub struct LayerMap { // // 'open_layer' holds the current InMemoryLayer that is accepting new // records. If it is None, 'next_open_layer_at' will be set instead, indicating // where the start LSN of the next InMemoryLayer that is to be created. // pub open_layer: Option<Arc<InMemoryLayer>>, pub next_open_layer_at: Option<Lsn>, /// /// Frozen layers, if any. Frozen layers are in-memory layers that /// are no longer added to, but haven't been written out to disk /// yet. They contain WAL older than the current 'open_layer' or /// 'next_open_layer_at', but newer than any historic layer. /// The frozen layers are in order from oldest to newest, so that /// the newest one is in the 'back' of the VecDeque, and the oldest /// in the 'front'. /// pub frozen_layers: VecDeque<Arc<InMemoryLayer>>, /// Index of the historic layers optimized for search historic: BufferedHistoricLayerCoverage<Arc<PersistentLayerDesc>>, /// L0 layers have key range Key::MIN..Key::MAX, and locating them using R-Tree search is very inefficient. /// So L0 layers are held in l0_delta_layers vector, in addition to the R-tree. /// /// NB: make sure to notify `watch_l0_deltas` on changes. l0_delta_layers: Vec<Arc<PersistentLayerDesc>>, /// Notifies about L0 delta layer changes, sending the current number of L0 layers. watch_l0_deltas: watch::Sender<usize>, } impl Default for LayerMap { fn default() -> Self { Self { open_layer: Default::default(), next_open_layer_at: Default::default(), frozen_layers: Default::default(), historic: Default::default(), l0_delta_layers: Default::default(), watch_l0_deltas: watch::channel(0).0, } } } /// The primary update API for the layer map. /// /// Batching historic layer insertions and removals is good for /// performance and this struct helps us do that correctly. #[must_use] pub struct BatchedUpdates<'a> { // While we hold this exclusive reference to the layer map the type checker // will prevent us from accidentally reading any unflushed updates. layer_map: &'a mut LayerMap, } /// Provide ability to batch more updates while hiding the read /// API so we don't accidentally read without flushing. impl BatchedUpdates<'_> { /// /// Insert an on-disk layer. /// // TODO remove the `layer` argument when `mapping` is refactored out of `LayerMap` pub fn insert_historic(&mut self, layer_desc: PersistentLayerDesc) { self.layer_map.insert_historic_noflush(layer_desc) } /// /// Remove an on-disk layer from the map. /// /// This should be called when the corresponding file on disk has been deleted. /// pub fn remove_historic(&mut self, layer_desc: &PersistentLayerDesc) { self.layer_map.remove_historic_noflush(layer_desc) } // We will flush on drop anyway, but this method makes it // more explicit that there is some work being done. /// Apply all updates pub fn flush(self) { // Flush happens on drop } } // Ideally the flush() method should be called explicitly for more // controlled execution. But if we forget we'd rather flush on drop // than panic later or read without flushing. // // TODO maybe warn if flush hasn't explicitly been called impl Drop for BatchedUpdates<'_> { fn drop(&mut self) { self.layer_map.flush_updates(); } } /// Return value of LayerMap::search #[derive(Eq, PartialEq, Debug, Hash)] pub struct SearchResult { pub layer: ReadableLayerWeak, pub lsn_floor: Lsn, } /// Return value of [`LayerMap::range_search`] /// /// Contains a mapping from a layer description to a keyspace /// accumulator that contains all the keys which intersect the layer /// from the original search space. #[derive(Debug)] pub struct RangeSearchResult { pub found: HashMap<SearchResult, KeySpaceAccum>, } impl RangeSearchResult { fn new() -> Self { Self { found: HashMap::new(), } } fn map_to_in_memory_layer( in_memory_layer: Option<InMemoryLayerDesc>, range: Range<Key>, ) -> RangeSearchResult { match in_memory_layer { Some(inmem) => { let search_result = SearchResult { lsn_floor: inmem.get_lsn_range().start, layer: ReadableLayerWeak::InMemoryLayer(inmem), }; let mut accum = KeySpaceAccum::new(); accum.add_range(range); RangeSearchResult { found: HashMap::from([(search_result, accum)]), } } None => RangeSearchResult::new(), } } } /// Collector for results of range search queries on the LayerMap. /// It should be provided with two iterators for the delta and image coverage /// that contain all the changes for layers which intersect the range. struct RangeSearchCollector<Iter> where Iter: Iterator<Item = (i128, Option<Arc<PersistentLayerDesc>>)>, { in_memory_layer: Option<InMemoryLayerDesc>, delta_coverage: Peekable<Iter>, image_coverage: Peekable<Iter>, key_range: Range<Key>, end_lsn: Lsn, current_delta: Option<Arc<PersistentLayerDesc>>, current_image: Option<Arc<PersistentLayerDesc>>, result: RangeSearchResult, } #[derive(Debug)] enum NextLayerType { Delta(i128), Image(i128), Both(i128), } impl NextLayerType { fn next_change_at_key(&self) -> Key { match self { NextLayerType::Delta(at) => Key::from_i128(*at), NextLayerType::Image(at) => Key::from_i128(*at), NextLayerType::Both(at) => Key::from_i128(*at), } } } impl<Iter> RangeSearchCollector<Iter> where Iter: Iterator<Item = (i128, Option<Arc<PersistentLayerDesc>>)>, { fn new( key_range: Range<Key>, end_lsn: Lsn, in_memory_layer: Option<InMemoryLayerDesc>, delta_coverage: Iter, image_coverage: Iter, ) -> Self { Self { in_memory_layer, delta_coverage: delta_coverage.peekable(), image_coverage: image_coverage.peekable(), key_range, end_lsn, current_delta: None, current_image: None, result: RangeSearchResult::new(), } } /// Run the collector. Collection is implemented via a two pointer algorithm. /// One pointer tracks the start of the current range and the other tracks /// the beginning of the next range which will overlap with the next change /// in coverage across both image and delta. fn collect(mut self) -> RangeSearchResult { let next_layer_type = self.choose_next_layer_type(); let mut current_range_start = match next_layer_type { None => { // No changes for the range self.pad_range(self.key_range.clone()); return self.result; } Some(layer_type) if self.key_range.end <= layer_type.next_change_at_key() => { // Changes only after the end of the range self.pad_range(self.key_range.clone()); return self.result; } Some(layer_type) => { // Changes for the range exist. let coverage_start = layer_type.next_change_at_key(); let range_before = self.key_range.start..coverage_start; self.pad_range(range_before); self.advance(&layer_type); coverage_start } }; while current_range_start < self.key_range.end { let next_layer_type = self.choose_next_layer_type(); match next_layer_type { Some(t) => { let current_range_end = t.next_change_at_key(); self.add_range(current_range_start..current_range_end); current_range_start = current_range_end; self.advance(&t); } None => { self.add_range(current_range_start..self.key_range.end); current_range_start = self.key_range.end; } } } self.result } /// Map a range which does not intersect any persistent layers to /// the in-memory layer candidate. fn pad_range(&mut self, key_range: Range<Key>) { if !key_range.is_empty() { if let Some(ref inmem) = self.in_memory_layer { let search_result = SearchResult { layer: ReadableLayerWeak::InMemoryLayer(inmem.clone()), lsn_floor: inmem.get_lsn_range().start, }; self.result .found .entry(search_result) .or_default() .add_range(key_range); } } } /// Select the appropiate layer for the given range and update /// the collector. fn add_range(&mut self, covered_range: Range<Key>) { let selected = LayerMap::select_layer( self.current_delta.clone(), self.current_image.clone(), self.in_memory_layer.clone(), self.end_lsn, ); match selected { Some(search_result) => self .result .found .entry(search_result) .or_default() .add_range(covered_range), None => self.pad_range(covered_range), } } /// Move to the next coverage change. fn advance(&mut self, layer_type: &NextLayerType) { match layer_type { NextLayerType::Delta(_) => { let (_, layer) = self.delta_coverage.next().unwrap(); self.current_delta = layer; } NextLayerType::Image(_) => { let (_, layer) = self.image_coverage.next().unwrap(); self.current_image = layer; } NextLayerType::Both(_) => { let (_, image_layer) = self.image_coverage.next().unwrap(); let (_, delta_layer) = self.delta_coverage.next().unwrap(); self.current_image = image_layer; self.current_delta = delta_layer; } } } /// Pick the next coverage change: the one at the lesser key or both if they're alligned. fn choose_next_layer_type(&mut self) -> Option<NextLayerType> { let next_delta_at = self.delta_coverage.peek().map(|(key, _)| key); let next_image_at = self.image_coverage.peek().map(|(key, _)| key); match (next_delta_at, next_image_at) { (None, None) => None, (Some(next_delta_at), None) => Some(NextLayerType::Delta(*next_delta_at)), (None, Some(next_image_at)) => Some(NextLayerType::Image(*next_image_at)), (Some(next_delta_at), Some(next_image_at)) if next_image_at < next_delta_at => { Some(NextLayerType::Image(*next_image_at)) } (Some(next_delta_at), Some(next_image_at)) if next_delta_at < next_image_at => { Some(NextLayerType::Delta(*next_delta_at)) } (Some(next_delta_at), Some(_)) => Some(NextLayerType::Both(*next_delta_at)), } } } #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct InMemoryLayerDesc { handle: InMemoryLayerHandle, lsn_range: Range<Lsn>, } impl InMemoryLayerDesc { pub(crate) fn get_lsn_range(&self) -> Range<Lsn> { self.lsn_range.clone() } } #[derive(Debug, PartialEq, Eq, Clone, Hash)] enum InMemoryLayerHandle { Open, Frozen(usize), } impl LayerMap { /// /// Find the latest layer (by lsn.end) that covers the given /// 'key', with lsn.start < 'end_lsn'. /// /// The caller of this function is the page reconstruction /// algorithm looking for the next relevant delta layer, or /// the terminal image layer. The caller will pass the lsn_floor /// value as end_lsn in the next call to search. /// /// If there's an image layer exactly below the given end_lsn, /// search should return that layer regardless if there are /// overlapping deltas. /// /// If the latest layer is a delta and there is an overlapping /// image with it below, the lsn_floor returned should be right /// above that image so we don't skip it in the search. Otherwise /// the lsn_floor returned should be the bottom of the delta layer /// because we should make as much progress down the lsn axis /// as possible. It's fine if this way we skip some overlapping /// deltas, because the delta we returned would contain the same /// wal content. /// /// TODO: This API is convoluted and inefficient. If the caller /// makes N search calls, we'll end up finding the same latest /// image layer N times. We should either cache the latest image /// layer result, or simplify the api to `get_latest_image` and /// `get_latest_delta`, and only call `get_latest_image` once. /// pub fn search(&self, key: Key, end_lsn: Lsn) -> Option<SearchResult> { let in_memory_layer = self.search_in_memory_layer(end_lsn); let version = match self.historic.get().unwrap().get_version(end_lsn.0 - 1) { Some(version) => version, None => { return in_memory_layer.map(|desc| SearchResult { lsn_floor: desc.get_lsn_range().start, layer: ReadableLayerWeak::InMemoryLayer(desc), }); } }; let latest_delta = version.delta_coverage.query(key.to_i128()); let latest_image = version.image_coverage.query(key.to_i128()); Self::select_layer(latest_delta, latest_image, in_memory_layer, end_lsn) } /// Select a layer from three potential candidates (in-memory, delta and image layer). /// The candidates represent the first layer of each type which intersect a key range. /// /// Layer types have an in implicit priority (image > delta > in-memory). For instance, /// if we have the option of reading an LSN range from both an image and a delta, we /// should read from the image. fn select_layer( delta_layer: Option<Arc<PersistentLayerDesc>>, image_layer: Option<Arc<PersistentLayerDesc>>, in_memory_layer: Option<InMemoryLayerDesc>, end_lsn: Lsn, ) -> Option<SearchResult> { assert!(delta_layer.as_ref().is_none_or(|l| l.is_delta())); assert!(image_layer.as_ref().is_none_or(|l| !l.is_delta())); match (delta_layer, image_layer, in_memory_layer) { (None, None, None) => None, (None, Some(image), None) => { let lsn_floor = image.get_lsn_range().start; Some(SearchResult { layer: ReadableLayerWeak::PersistentLayer(image), lsn_floor, }) } (Some(delta), None, None) => { let lsn_floor = delta.get_lsn_range().start; Some(SearchResult { layer: ReadableLayerWeak::PersistentLayer(delta), lsn_floor, }) } (Some(delta), Some(image), None) => { let img_lsn = image.get_lsn_range().start; let image_is_newer = image.get_lsn_range().end >= delta.get_lsn_range().end; let image_exact_match = img_lsn + 1 == end_lsn; if image_is_newer || image_exact_match { Some(SearchResult { layer: ReadableLayerWeak::PersistentLayer(image), lsn_floor: img_lsn, }) } else { // If the delta overlaps with the image in the LSN dimension, do a partial // up to the image layer. let lsn_floor = std::cmp::max(delta.get_lsn_range().start, image.get_lsn_range().start + 1); Some(SearchResult { layer: ReadableLayerWeak::PersistentLayer(delta), lsn_floor, }) } } (None, None, Some(inmem)) => { let lsn_floor = inmem.get_lsn_range().start; Some(SearchResult { layer: ReadableLayerWeak::InMemoryLayer(inmem), lsn_floor, }) } (None, Some(image), Some(inmem)) => { // If the in-memory layer overlaps with the image in the LSN dimension, do a partial // up to the image layer. let img_lsn = image.get_lsn_range().start; let image_is_newer = image.get_lsn_range().end >= inmem.get_lsn_range().end; let image_exact_match = img_lsn + 1 == end_lsn; if image_is_newer || image_exact_match { Some(SearchResult { layer: ReadableLayerWeak::PersistentLayer(image), lsn_floor: img_lsn, }) } else { let lsn_floor = std::cmp::max(inmem.get_lsn_range().start, image.get_lsn_range().start + 1); Some(SearchResult { layer: ReadableLayerWeak::InMemoryLayer(inmem), lsn_floor, }) } } (Some(delta), None, Some(inmem)) => { // Overlaps between delta and in-memory layers are not a valid // state, but we handle them here for completeness. let delta_end = delta.get_lsn_range().end; let delta_is_newer = delta_end >= inmem.get_lsn_range().end; let delta_exact_match = delta_end == end_lsn; if delta_is_newer || delta_exact_match { Some(SearchResult { lsn_floor: delta.get_lsn_range().start, layer: ReadableLayerWeak::PersistentLayer(delta), }) } else { // If the in-memory layer overlaps with the delta in the LSN dimension, do a partial // up to the delta layer. let lsn_floor = std::cmp::max(inmem.get_lsn_range().start, delta.get_lsn_range().end); Some(SearchResult { layer: ReadableLayerWeak::InMemoryLayer(inmem), lsn_floor, }) } } (Some(delta), Some(image), Some(inmem)) => { // Determine the preferred persistent layer without taking the in-memory layer // into consideration. let persistent_res = Self::select_layer(Some(delta.clone()), Some(image.clone()), None, end_lsn) .unwrap(); let persistent_l = match persistent_res.layer { ReadableLayerWeak::PersistentLayer(l) => l, ReadableLayerWeak::InMemoryLayer(_) => unreachable!(), }; // Now handle the in-memory layer overlaps. let inmem_res = if persistent_l.is_delta() { Self::select_layer(Some(persistent_l), None, Some(inmem.clone()), end_lsn) .unwrap() } else { Self::select_layer(None, Some(persistent_l), Some(inmem.clone()), end_lsn) .unwrap() }; Some(SearchResult { layer: inmem_res.layer, // Use the more restrictive LSN floor lsn_floor: std::cmp::max(persistent_res.lsn_floor, inmem_res.lsn_floor), }) } } } pub fn range_search(&self, key_range: Range<Key>, end_lsn: Lsn) -> RangeSearchResult { let in_memory_layer = self.search_in_memory_layer(end_lsn); let version = match self.historic.get().unwrap().get_version(end_lsn.0 - 1) { Some(version) => version, None => { return RangeSearchResult::map_to_in_memory_layer(in_memory_layer, key_range); } }; let raw_range = key_range.start.to_i128()..key_range.end.to_i128(); let delta_changes = version.delta_coverage.range_overlaps(&raw_range); let image_changes = version.image_coverage.range_overlaps(&raw_range); let collector = RangeSearchCollector::new( key_range, end_lsn, in_memory_layer, delta_changes, image_changes, ); collector.collect() } /// Start a batch of updates, applied on drop pub fn batch_update(&mut self) -> BatchedUpdates<'_> { BatchedUpdates { layer_map: self } } /// /// Insert an on-disk layer /// /// Helper function for BatchedUpdates::insert_historic /// /// TODO(chi): remove L generic so that we do not need to pass layer object. pub(self) fn insert_historic_noflush(&mut self, layer_desc: PersistentLayerDesc) { // TODO: See #3869, resulting #4088, attempted fix and repro #4094 if Self::is_l0(&layer_desc.key_range, layer_desc.is_delta) { self.l0_delta_layers.push(layer_desc.clone().into()); self.watch_l0_deltas .send_replace(self.l0_delta_layers.len()); } self.historic.insert( historic_layer_coverage::LayerKey::from(&layer_desc), layer_desc.into(), ); } /// /// Remove an on-disk layer from the map. /// /// Helper function for BatchedUpdates::remove_historic /// pub fn remove_historic_noflush(&mut self, layer_desc: &PersistentLayerDesc) { self.historic .remove(historic_layer_coverage::LayerKey::from(layer_desc)); let layer_key = layer_desc.key(); if Self::is_l0(&layer_desc.key_range, layer_desc.is_delta) { let len_before = self.l0_delta_layers.len(); let mut l0_delta_layers = std::mem::take(&mut self.l0_delta_layers); l0_delta_layers.retain(|other| other.key() != layer_key); self.l0_delta_layers = l0_delta_layers; self.watch_l0_deltas .send_replace(self.l0_delta_layers.len()); // this assertion is related to use of Arc::ptr_eq in Self::compare_arced_layers, // there's a chance that the comparison fails at runtime due to it comparing (pointer, // vtable) pairs. assert_eq!( self.l0_delta_layers.len(), len_before - 1, "failed to locate removed historic layer from l0_delta_layers" ); } } /// Helper function for BatchedUpdates::drop. pub(self) fn flush_updates(&mut self) { self.historic.rebuild(); } /// Is there a newer image layer for given key- and LSN-range? Or a set /// of image layers within the specified lsn range that cover the entire /// specified key range? /// /// This is used for garbage collection, to determine if an old layer can /// be deleted. pub fn image_layer_exists(&self, key: &Range<Key>, lsn: &Range<Lsn>) -> bool { if key.is_empty() { // Vacuously true. There's a newer image for all 0 of the kerys in the range. return true; } let version = match self.historic.get().unwrap().get_version(lsn.end.0 - 1) { Some(v) => v, None => return false, }; let start = key.start.to_i128(); let end = key.end.to_i128(); let layer_covers = |layer: Option<Arc<PersistentLayerDesc>>| match layer { Some(layer) => layer.get_lsn_range().start >= lsn.start, None => false, }; // Check the start is covered if !layer_covers(version.image_coverage.query(start)) { return false; } // Check after all changes of coverage for (_, change_val) in version.image_coverage.range(start..end) { if !layer_covers(change_val) { return false; } } true } pub fn iter_historic_layers(&self) -> impl ExactSizeIterator<Item = Arc<PersistentLayerDesc>> { self.historic.iter() } /// Get a ref counted pointer for the first in memory layer that matches the provided predicate. pub(crate) fn search_in_memory_layer(&self, below: Lsn) -> Option<InMemoryLayerDesc> { let is_below = |l: &Arc<InMemoryLayer>| { let start_lsn = l.get_lsn_range().start; below > start_lsn }; if let Some(open) = &self.open_layer { if is_below(open) { return Some(InMemoryLayerDesc { handle: InMemoryLayerHandle::Open, lsn_range: open.get_lsn_range(), }); } } self.frozen_layers .iter() .enumerate() .rfind(|(_idx, l)| is_below(l)) .map(|(idx, l)| InMemoryLayerDesc { handle: InMemoryLayerHandle::Frozen(idx), lsn_range: l.get_lsn_range(), }) } pub(crate) fn in_memory_layer(&self, desc: &InMemoryLayerDesc) -> Arc<InMemoryLayer> { match desc.handle { InMemoryLayerHandle::Open => self.open_layer.as_ref().unwrap().clone(), InMemoryLayerHandle::Frozen(idx) => self.frozen_layers[idx].clone(), } } /// /// Divide the whole given range of keys into sub-ranges based on the latest /// image layer that covers each range at the specified lsn (inclusive). /// This is used when creating new image layers. pub fn image_coverage( &self, key_range: &Range<Key>, lsn: Lsn, ) -> Vec<(Range<Key>, Option<Arc<PersistentLayerDesc>>)> { let version = match self.historic.get().unwrap().get_version(lsn.0) { Some(v) => v, None => return vec![], }; let start = key_range.start.to_i128(); let end = key_range.end.to_i128(); // Initialize loop variables let mut coverage: Vec<(Range<Key>, Option<Arc<PersistentLayerDesc>>)> = vec![]; let mut current_key = start; let mut current_val = version.image_coverage.query(start); // Loop through the change events and push intervals for (change_key, change_val) in version.image_coverage.range(start..end) { let kr = Key::from_i128(current_key)..Key::from_i128(change_key); coverage.push((kr, current_val.take())); current_key = change_key; current_val.clone_from(&change_val); } // Add the final interval let kr = Key::from_i128(current_key)..Key::from_i128(end); coverage.push((kr, current_val.take())); coverage } /// Check if the key range resembles that of an L0 layer. pub fn is_l0(key_range: &Range<Key>, is_delta_layer: bool) -> bool { is_delta_layer && key_range == &(Key::MIN..Key::MAX) } /// This function determines which layers are counted in `count_deltas`: /// layers that should count towards deciding whether or not to reimage /// a certain partition range. /// /// There are two kinds of layers we currently consider reimage-worthy: /// /// Case 1: Non-L0 layers are currently reimage-worthy by default. /// TODO Some of these layers are very sparse and cover the entire key /// range. Replacing 256MB of data (or less!) with terabytes of /// images doesn't seem wise. We need a better heuristic, possibly /// based on some of these factors: /// a) whether this layer has any wal in this partition range /// b) the size of the layer /// c) the number of images needed to cover it /// d) the estimated time until we'll have to reimage over it for GC /// /// Case 2: Since L0 layers by definition cover the entire key space, we consider /// them reimage-worthy only when the entire key space can be covered by very few /// images (currently 1). /// TODO The optimal number should probably be slightly higher than 1, but to /// implement that we need to plumb a lot more context into this function /// than just the current partition_range. pub fn is_reimage_worthy(layer: &PersistentLayerDesc, partition_range: &Range<Key>) -> bool { // Case 1 if !Self::is_l0(&layer.key_range, layer.is_delta) { return true; } // Case 2 if partition_range == &(Key::MIN..Key::MAX) { return true; } false } /// Count the height of the tallest stack of reimage-worthy deltas /// in this 2d region. ///
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline.rs
pageserver/src/tenant/timeline.rs
pub(crate) mod analysis; pub(crate) mod compaction; pub mod delete; pub(crate) mod detach_ancestor; mod eviction_task; pub(crate) mod handle; mod heatmap_layers_downloader; pub(crate) mod import_pgdata; mod init; pub mod layer_manager; pub(crate) mod logical_size; pub mod offload; pub mod span; pub mod uninit; mod walreceiver; use hashlink::LruCache; use std::array; use std::cmp::{max, min}; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, HashMap, HashSet}; use std::ops::{ControlFlow, Deref, Range}; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering}; use std::sync::{Arc, Mutex, OnceLock, RwLock, Weak}; use std::time::{Duration, Instant, SystemTime}; use anyhow::{Context, Result, anyhow, bail, ensure}; use arc_swap::{ArcSwap, ArcSwapOption}; use bytes::Bytes; use camino::Utf8Path; use chrono::{DateTime, Utc}; use compaction::{CompactionOutcome, GcCompactionCombinedSettings}; use enumset::EnumSet; use fail::fail_point; use futures::stream::FuturesUnordered; use futures::{FutureExt, StreamExt}; use handle::ShardTimelineId; use layer_manager::{ LayerManagerLockHolder, LayerManagerReadGuard, LayerManagerWriteGuard, LockedLayerManager, Shutdown, }; use once_cell::sync::Lazy; use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL; use pageserver_api::key::{ KEY_SIZE, Key, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE, SPARSE_RANGE, }; use pageserver_api::keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning}; use pageserver_api::models::{ CompactKeyRange, CompactLsnRange, CompactionAlgorithm, CompactionAlgorithmSettings, DetachBehavior, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, LsnLease, PageTraceEvent, RelSizeMigration, TimelineState, }; use pageserver_api::reltag::{BlockNumber, RelTag}; use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId}; use postgres_connection::PgConnectionConfig; use postgres_ffi::v14::xlog_utils; use postgres_ffi::{PgMajorVersion, WAL_SEGMENT_SIZE, to_pg_timestamp}; use rand::Rng; use remote_storage::DownloadError; use serde_with::serde_as; use storage_broker::BrokerClientChannel; use tokio::runtime::Handle; use tokio::sync::mpsc::Sender; use tokio::sync::{Notify, oneshot, watch}; use tokio_util::sync::CancellationToken; use tracing::*; use utils::generation::Generation; use utils::guard_arc_swap::GuardArcSwap; use utils::id::TimelineId; use utils::logging::{MonitorSlowFutureCallback, log_slow, monitor_slow_future}; use utils::lsn::{AtomicLsn, Lsn, RecordLsn}; use utils::postgres_client::PostgresClientProtocol; use utils::rate_limit::RateLimit; use utils::seqwait::SeqWait; use utils::simple_rcu::{Rcu, RcuReadGuard}; use utils::sync::gate::{Gate, GateGuard}; use utils::{completion, critical_timeline, fs_ext, pausable_failpoint}; #[cfg(test)] use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta}; use self::delete::DeleteTimelineFlow; pub(super) use self::eviction_task::EvictionTaskTenantState; use self::eviction_task::EvictionTaskTimelineState; use self::logical_size::LogicalSize; use self::walreceiver::{WalReceiver, WalReceiverConf}; use super::remote_timeline_client::RemoteTimelineClient; use super::remote_timeline_client::index::{GcCompactionState, IndexPart}; use super::secondary::heatmap::HeatMapLayer; use super::storage_layer::{LayerFringe, LayerVisibilityHint, ReadableLayer}; use super::tasks::log_compaction_error; use super::upload_queue::NotInitialized; use super::{ AttachedTenantConf, GcError, HeatMapTimeline, MaybeOffloaded, debug_assert_current_span_has_tenant_and_timeline_id, }; use crate::PERF_TRACE_TARGET; use crate::aux_file::AuxFileSizeEstimator; use crate::basebackup_cache::BasebackupCache; use crate::config::PageServerConf; use crate::context::{ DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder, }; use crate::disk_usage_eviction_task::{DiskUsageEvictionInfo, EvictionCandidate, finite_f32}; use crate::feature_resolver::TenantFeatureResolver; use crate::keyspace::{KeyPartitioning, KeySpace}; use crate::l0_flush::{self, L0FlushGlobalState}; use crate::metrics::{ DELTAS_PER_READ_GLOBAL, LAYERS_PER_READ_AMORTIZED_GLOBAL, LAYERS_PER_READ_BATCH_GLOBAL, LAYERS_PER_READ_GLOBAL, ScanLatencyOngoingRecording, TimelineMetrics, }; use crate::page_service::TenantManagerTypes; use crate::pgdatadir_mapping::{ CalculateLogicalSizeError, CollectKeySpaceError, DirectoryKind, LsnForTimestamp, MAX_AUX_FILE_V2_DELTAS, MetricsUpdate, }; use crate::task_mgr::TaskKind; use crate::tenant::gc_result::GcResult; use crate::tenant::layer_map::LayerMap; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::storage_layer::delta_layer::DeltaEntry; use crate::tenant::storage_layer::inmemory_layer::IndexEntry; use crate::tenant::storage_layer::{ AsLayerDesc, BatchLayerWriter, DeltaLayerWriter, EvictionError, ImageLayerName, ImageLayerWriter, InMemoryLayer, IoConcurrency, Layer, LayerAccessStatsReset, LayerName, PersistentLayerDesc, PersistentLayerKey, ResidentLayer, ValueReconstructSituation, ValueReconstructState, ValuesReconstructState, }; use crate::tenant::tasks::BackgroundLoopKind; use crate::tenant::timeline::logical_size::CurrentLogicalSize; use crate::virtual_file::{MaybeFatalIo, VirtualFile}; use crate::walingest::WalLagCooldown; use crate::walredo::RedoAttemptType; use crate::{ZERO_PAGE, task_mgr, walredo}; #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub(crate) enum FlushLoopState { NotStarted, Running { #[cfg(test)] expect_initdb_optimization: bool, #[cfg(test)] initdb_optimization_count: usize, }, Exited, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ImageLayerCreationMode { /// Try to create image layers based on `time_for_new_image_layer`. Used in compaction code path. Try, /// Force creating the image layers if possible. For now, no image layers will be created /// for metadata keys. Used in compaction code path with force flag enabled. Force, /// Initial ingestion of the data, and no data should be dropped in this function. This /// means that no metadata keys should be included in the partitions. Used in flush frozen layer /// code path. Initial, } #[derive(Clone, Debug, Default)] pub enum LastImageLayerCreationStatus { Incomplete { /// The last key of the partition (exclusive) that was processed in the last /// image layer creation attempt. We will continue from this key in the next /// attempt. last_key: Key, }, Complete, #[default] Initial, } impl std::fmt::Display for ImageLayerCreationMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{self:?}") } } /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things. /// Can be removed after all refactors are done. fn drop_layer_manager_rlock(rlock: LayerManagerReadGuard<'_>) { drop(rlock) } /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things. /// Can be removed after all refactors are done. fn drop_layer_manager_wlock(rlock: LayerManagerWriteGuard<'_>) { drop(rlock) } /// The outward-facing resources required to build a Timeline pub struct TimelineResources { pub remote_client: RemoteTimelineClient, pub pagestream_throttle: Arc<crate::tenant::throttle::Throttle>, pub pagestream_throttle_metrics: Arc<crate::metrics::tenant_throttling::Pagestream>, pub l0_compaction_trigger: Arc<Notify>, pub l0_flush_global_state: l0_flush::L0FlushGlobalState, pub basebackup_cache: Arc<BasebackupCache>, pub feature_resolver: Arc<TenantFeatureResolver>, } pub struct Timeline { pub(crate) conf: &'static PageServerConf, tenant_conf: Arc<ArcSwap<AttachedTenantConf>>, myself: Weak<Self>, pub(crate) tenant_shard_id: TenantShardId, pub timeline_id: TimelineId, /// The generation of the tenant that instantiated us: this is used for safety when writing remote objects. /// Never changes for the lifetime of this [`Timeline`] object. /// /// This duplicates the generation stored in LocationConf, but that structure is mutable: /// this copy enforces the invariant that generatio doesn't change during a Tenant's lifetime. pub(crate) generation: Generation, /// The detailed sharding information from our parent Tenant. This enables us to map keys /// to shards, and is constant through the lifetime of this Timeline. shard_identity: ShardIdentity, pub pg_version: PgMajorVersion, /// The tuple has two elements. /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote). /// 2. `LayerMap`, the acceleration data structure for `get_reconstruct_data`. /// /// `LayerMap` maps out the `(PAGE,LSN) / (KEY,LSN)` space, which is composed of `(KeyRange, LsnRange)` rectangles. /// We describe these rectangles through the `PersistentLayerDesc` struct. /// /// When we want to reconstruct a page, we first find the `PersistentLayerDesc`'s that we need for page reconstruction, /// using `LayerMap`. Then, we use `LayerFileManager` to get the `PersistentLayer`'s that correspond to the /// `PersistentLayerDesc`'s. /// /// Hence, it's important to keep things coherent. The `LayerFileManager` must always have an entry for all /// `PersistentLayerDesc`'s in the `LayerMap`. If it doesn't, `LayerFileManager::get_from_desc` will panic at /// runtime, e.g., during page reconstruction. /// /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`, /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`. pub(crate) layers: LockedLayerManager, last_freeze_at: AtomicLsn, // Atomic would be more appropriate here. last_freeze_ts: RwLock<Instant>, pub(crate) standby_horizon: AtomicLsn, // WAL redo manager. `None` only for broken tenants. walredo_mgr: Option<Arc<super::WalRedoManager>>, /// Remote storage client. /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details. pub(crate) remote_client: Arc<RemoteTimelineClient>, // What page versions do we hold in the repository? If we get a // request > last_record_lsn, we need to wait until we receive all // the WAL up to the request. The SeqWait provides functions for // that. TODO: If we get a request for an old LSN, such that the // versions have already been garbage collected away, we should // throw an error, but we don't track that currently. // // last_record_lsn.load().last points to the end of last processed WAL record. // // We also remember the starting point of the previous record in // 'last_record_lsn.load().prev'. It's used to set the xl_prev pointer of the // first WAL record when the node is started up. But here, we just // keep track of it. last_record_lsn: SeqWait<RecordLsn, Lsn>, // All WAL records have been processed and stored durably on files on // local disk, up to this LSN. On crash and restart, we need to re-process // the WAL starting from this point. // // Some later WAL records might have been processed and also flushed to disk // already, so don't be surprised to see some, but there's no guarantee on // them yet. disk_consistent_lsn: AtomicLsn, // Parent timeline that this timeline was branched from, and the LSN // of the branch point. ancestor_timeline: Option<Arc<Timeline>>, ancestor_lsn: Lsn, // The LSN of gc-compaction that was last applied to this timeline. gc_compaction_state: ArcSwapOption<GcCompactionState>, pub(crate) metrics: Arc<TimelineMetrics>, // `Timeline` doesn't write these metrics itself, but it manages the lifetime. Code // in `crate::page_service` writes these metrics. pub(crate) query_metrics: crate::metrics::SmgrQueryTimePerTimeline, directory_metrics_inited: [AtomicBool; DirectoryKind::KINDS_NUM], directory_metrics: [AtomicU64; DirectoryKind::KINDS_NUM], /// Ensures layers aren't frozen by checkpointer between /// [`Timeline::get_layer_for_write`] and layer reads. /// Locked automatically by [`TimelineWriter`] and checkpointer. /// Must always be acquired before the layer map/individual layer lock /// to avoid deadlock. /// /// The state is cleared upon freezing. write_lock: tokio::sync::Mutex<Option<TimelineWriterState>>, /// Used to avoid multiple `flush_loop` tasks running pub(super) flush_loop_state: Mutex<FlushLoopState>, /// layer_flush_start_tx can be used to wake up the layer-flushing task. /// - The u64 value is a counter, incremented every time a new flush cycle is requested. /// The flush cycle counter is sent back on the layer_flush_done channel when /// the flush finishes. You can use that to wait for the flush to finish. /// - The LSN is updated to max() of its current value and the latest disk_consistent_lsn /// read by whoever sends an update layer_flush_start_tx: tokio::sync::watch::Sender<(u64, Lsn)>, /// to be notified when layer flushing has finished, subscribe to the layer_flush_done channel layer_flush_done_tx: tokio::sync::watch::Sender<(u64, Result<(), FlushLayerError>)>, // The LSN at which we have executed GC: whereas [`Self::gc_info`] records the LSN at which // we _intend_ to GC (i.e. the PITR cutoff), this LSN records where we actually last did it. // Because PITR interval is mutable, it's possible for this LSN to be earlier or later than // the planned GC cutoff. pub applied_gc_cutoff_lsn: Rcu<Lsn>, pub(crate) gc_compaction_layer_update_lock: tokio::sync::RwLock<()>, // List of child timelines and their branch points. This is needed to avoid // garbage collecting data that is still needed by the child timelines. pub(crate) gc_info: std::sync::RwLock<GcInfo>, pub(crate) last_image_layer_creation_status: ArcSwap<LastImageLayerCreationStatus>, // It may change across major versions so for simplicity // keep it after running initdb for a timeline. // It is needed in checks when we want to error on some operations // when they are requested for pre-initdb lsn. // It can be unified with latest_gc_cutoff_lsn under some "first_valid_lsn", // though let's keep them both for better error visibility. pub initdb_lsn: Lsn, /// The repartitioning result. Allows a single writer and multiple readers. pub(crate) partitioning: GuardArcSwap<((KeyPartitioning, SparseKeyPartitioning), Lsn)>, /// Configuration: how often should the partitioning be recalculated. repartition_threshold: u64, last_image_layer_creation_check_at: AtomicLsn, last_image_layer_creation_check_instant: std::sync::Mutex<Option<Instant>>, /// Current logical size of the "datadir", at the last LSN. current_logical_size: LogicalSize, /// Information about the last processed message by the WAL receiver, /// or None if WAL receiver has not received anything for this timeline /// yet. pub last_received_wal: Mutex<Option<WalReceiverInfo>>, pub walreceiver: Mutex<Option<WalReceiver>>, /// Relation size cache pub(crate) rel_size_latest_cache: RwLock<HashMap<RelTag, (Lsn, BlockNumber)>>, pub(crate) rel_size_snapshot_cache: Mutex<LruCache<(Lsn, RelTag), BlockNumber>>, download_all_remote_layers_task_info: RwLock<Option<DownloadRemoteLayersTaskInfo>>, state: watch::Sender<TimelineState>, /// Prevent two tasks from deleting the timeline at the same time. If held, the /// timeline is being deleted. If 'true', the timeline has already been deleted. pub delete_progress: TimelineDeleteProgress, eviction_task_timeline_state: tokio::sync::Mutex<EvictionTaskTimelineState>, /// Load or creation time information about the disk_consistent_lsn and when the loading /// happened. Used for consumption metrics. pub(crate) loaded_at: (Lsn, SystemTime), /// Gate to prevent shutdown completing while I/O is still happening to this timeline's data pub(crate) gate: Gate, /// Cancellation token scoped to this timeline: anything doing long-running work relating /// to the timeline should drop out when this token fires. pub(crate) cancel: CancellationToken, /// Make sure we only have one running compaction at a time in tests. /// /// Must only be taken in two places: /// - [`Timeline::compact`] (this file) /// - [`delete::delete_local_timeline_directory`] /// /// Timeline deletion will acquire both compaction and gc locks in whatever order. compaction_lock: tokio::sync::Mutex<()>, /// If true, the last compaction failed. compaction_failed: AtomicBool, /// Begin Hadron: If true, the pageserver has likely detected data corruption in the timeline. /// We need to feed this information back to the Safekeeper and postgres for them to take the /// appropriate action. corruption_detected: AtomicBool, /// Notifies the tenant compaction loop that there is pending L0 compaction work. l0_compaction_trigger: Arc<Notify>, /// Make sure we only have one running gc at a time. /// /// Must only be taken in two places: /// - [`Timeline::gc`] (this file) /// - [`delete::delete_local_timeline_directory`] /// /// Timeline deletion will acquire both compaction and gc locks in whatever order. gc_lock: tokio::sync::Mutex<()>, /// Cloned from [`super::TenantShard::pagestream_throttle`] on construction. pub(crate) pagestream_throttle: Arc<crate::tenant::throttle::Throttle>, /// Size estimator for aux file v2 pub(crate) aux_file_size_estimator: AuxFileSizeEstimator, /// Some test cases directly place keys into the timeline without actually modifying the directory /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and /// in the future, add `extra_test_sparse_keyspace` if necessary. #[cfg(test)] pub(crate) extra_test_dense_keyspace: ArcSwap<KeySpace>, pub(crate) l0_flush_global_state: L0FlushGlobalState, pub(crate) handles: handle::PerTimelineState<TenantManagerTypes>, pub(crate) attach_wal_lag_cooldown: Arc<OnceLock<WalLagCooldown>>, /// Cf. [`crate::tenant::CreateTimelineIdempotency`]. pub(crate) create_idempotency: crate::tenant::CreateTimelineIdempotency, /// If Some, collects GetPage metadata for an ongoing PageTrace. pub(crate) page_trace: ArcSwapOption<Sender<PageTraceEvent>>, pub(super) previous_heatmap: ArcSwapOption<PreviousHeatmap>, /// May host a background Tokio task which downloads all the layers from the current /// heatmap on demand. heatmap_layers_downloader: Mutex<Option<heatmap_layers_downloader::HeatmapLayersDownloader>>, pub(crate) rel_size_v2_status: ArcSwap<(Option<RelSizeMigration>, Option<Lsn>)>, wait_lsn_log_slow: tokio::sync::Semaphore, /// A channel to send async requests to prepare a basebackup for the basebackup cache. basebackup_cache: Arc<BasebackupCache>, #[expect(dead_code)] feature_resolver: Arc<TenantFeatureResolver>, /// Basebackup will collect the count and store it here. Used for reldirv2 rollout. pub(crate) db_rel_count: ArcSwapOption<(usize, usize)>, } pub(crate) enum PreviousHeatmap { Active { heatmap: HeatMapTimeline, read_at: std::time::Instant, // End LSN covered by the heatmap if known end_lsn: Option<Lsn>, }, Obsolete, } pub type TimelineDeleteProgress = Arc<tokio::sync::Mutex<DeleteTimelineFlow>>; pub struct WalReceiverInfo { pub wal_source_connconf: PgConnectionConfig, pub last_received_msg_lsn: Lsn, pub last_received_msg_ts: u128, } /// Information about how much history needs to be retained, needed by /// Garbage Collection. #[derive(Default)] pub(crate) struct GcInfo { /// Specific LSNs that are needed. /// /// Currently, this includes all points where child branches have /// been forked off from. In the future, could also include /// explicit user-defined snapshot points. pub(crate) retain_lsns: Vec<(Lsn, TimelineId, MaybeOffloaded)>, /// The cutoff coordinates, which are combined by selecting the minimum. pub(crate) cutoffs: GcCutoffs, /// Leases granted to particular LSNs. pub(crate) leases: BTreeMap<Lsn, LsnLease>, /// Whether our branch point is within our ancestor's PITR interval (for cost estimation) pub(crate) within_ancestor_pitr: bool, } impl GcInfo { pub(crate) fn min_cutoff(&self) -> Lsn { self.cutoffs.select_min() } pub(super) fn insert_child( &mut self, child_id: TimelineId, child_lsn: Lsn, is_offloaded: MaybeOffloaded, ) { self.retain_lsns.push((child_lsn, child_id, is_offloaded)); self.retain_lsns.sort_by_key(|i| i.0); } pub(super) fn remove_child_maybe_offloaded( &mut self, child_id: TimelineId, maybe_offloaded: MaybeOffloaded, ) -> bool { // Remove at most one element. Needed for correctness if there is two live `Timeline` objects referencing // the same timeline. Shouldn't but maybe can occur when Arc's live longer than intended. let mut removed = false; self.retain_lsns.retain(|i| { if removed { return true; } let remove = i.1 == child_id && i.2 == maybe_offloaded; removed |= remove; !remove }); removed } pub(super) fn remove_child_not_offloaded(&mut self, child_id: TimelineId) -> bool { self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::No) } pub(super) fn remove_child_offloaded(&mut self, child_id: TimelineId) -> bool { self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes) } pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool { self.leases.contains_key(&lsn) } } /// The `GcInfo` component describing which Lsns need to be retained. Functionally, this /// is a single number (the oldest LSN which we must retain), but it internally distinguishes /// between time-based and space-based retention for observability and consumption metrics purposes. #[derive(Clone, Debug, Default)] pub(crate) struct GcCutoffs { /// Calculated from the [`pageserver_api::models::TenantConfig::gc_horizon`], this LSN indicates how much /// history we must keep to retain a specified number of bytes of WAL. pub(crate) space: Lsn, /// Calculated from [`pageserver_api::models::TenantConfig::pitr_interval`], this LSN indicates /// how much history we must keep to enable reading back at least the PITR interval duration. /// /// None indicates that the PITR cutoff has not been computed. A PITR interval of 0 will yield /// Some(last_record_lsn). pub(crate) time: Option<Lsn>, } impl GcCutoffs { fn select_min(&self) -> Lsn { // NB: if we haven't computed the PITR cutoff yet, we can't GC anything. self.space.min(self.time.unwrap_or_default()) } } pub(crate) struct TimelineVisitOutcome { completed_keyspace: KeySpace, image_covered_keyspace: KeySpace, } /// An error happened in a get() operation. #[derive(thiserror::Error, Debug)] pub(crate) enum PageReconstructError { #[error(transparent)] Other(anyhow::Error), #[error("Ancestor LSN wait error: {0}")] AncestorLsnTimeout(WaitLsnError), #[error("timeline shutting down")] Cancelled, /// An error happened replaying WAL records #[error(transparent)] WalRedo(anyhow::Error), #[error("{0}")] MissingKey(Box<MissingKeyError>), } impl PageReconstructError { pub(crate) fn is_cancel(&self) -> bool { match self { PageReconstructError::Other(_) => false, PageReconstructError::AncestorLsnTimeout(e) => e.is_cancel(), PageReconstructError::Cancelled => true, PageReconstructError::WalRedo(_) => false, PageReconstructError::MissingKey(_) => false, } } #[allow(dead_code)] // we use the is_cancel + into_anyhow pattern in quite a few places, this one will follow soon enough pub(crate) fn into_anyhow(self) -> anyhow::Error { match self { PageReconstructError::Other(e) => e, PageReconstructError::AncestorLsnTimeout(e) => e.into_anyhow(), PageReconstructError::Cancelled => anyhow::Error::new(self), PageReconstructError::WalRedo(e) => e, PageReconstructError::MissingKey(_) => anyhow::Error::new(self), } } } impl From<anyhow::Error> for PageReconstructError { fn from(value: anyhow::Error) -> Self { // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error match value.downcast::<PageReconstructError>() { Ok(pre) => pre, Err(other) => PageReconstructError::Other(other), } } } impl From<utils::bin_ser::DeserializeError> for PageReconstructError { fn from(value: utils::bin_ser::DeserializeError) -> Self { PageReconstructError::Other(anyhow::Error::new(value).context("deserialization failure")) } } impl From<layer_manager::Shutdown> for PageReconstructError { fn from(_: layer_manager::Shutdown) -> Self { PageReconstructError::Cancelled } } impl GetVectoredError { #[cfg(test)] pub(crate) fn is_missing_key_error(&self) -> bool { matches!(self, Self::MissingKey(_)) } } impl From<layer_manager::Shutdown> for GetVectoredError { fn from(_: layer_manager::Shutdown) -> Self { GetVectoredError::Cancelled } } /// A layer identifier when used in the [`ReadPath`] structure. This enum is for observability purposes /// only and not used by the "real read path". pub enum ReadPathLayerId { PersistentLayer(PersistentLayerKey), InMemoryLayer(Range<Lsn>), } impl std::fmt::Display for ReadPathLayerId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { ReadPathLayerId::PersistentLayer(key) => write!(f, "{key}"), ReadPathLayerId::InMemoryLayer(range) => { write!(f, "in-mem {}..{}", range.start, range.end) } } } } pub struct ReadPath { keyspace: KeySpace, lsn: Lsn, path: Vec<(ReadPathLayerId, KeySpace, Range<Lsn>)>, } impl ReadPath { pub fn new(keyspace: KeySpace, lsn: Lsn) -> Self { Self { keyspace, lsn, path: Vec::new(), } } pub fn record_layer_visit( &mut self, layer_to_read: &ReadableLayer, keyspace_to_read: &KeySpace, lsn_range: &Range<Lsn>, ) { let id = match layer_to_read { ReadableLayer::PersistentLayer(layer) => { ReadPathLayerId::PersistentLayer(layer.layer_desc().key()) } ReadableLayer::InMemoryLayer(layer) => { ReadPathLayerId::InMemoryLayer(layer.get_lsn_range()) } }; self.path .push((id, keyspace_to_read.clone(), lsn_range.clone())); } } impl std::fmt::Display for ReadPath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Read path for {} at lsn {}:", self.keyspace, self.lsn)?; for (idx, (layer_id, keyspace, lsn_range)) in self.path.iter().enumerate() { writeln!( f, "{}: {} {}..{} {}", idx, layer_id, lsn_range.start, lsn_range.end, keyspace )?; } Ok(()) } } #[derive(thiserror::Error)] pub struct MissingKeyError { keyspace: KeySpace, shard: ShardNumber, query: Option<VersionedKeySpaceQuery>, // This is largest request LSN from the get page request batch original_hwm_lsn: Lsn, ancestor_lsn: Option<Lsn>, /// Debug information about the read path if there's an error read_path: Option<ReadPath>, backtrace: Option<std::backtrace::Backtrace>, } impl MissingKeyError { fn enrich(&mut self, query: VersionedKeySpaceQuery) { self.query = Some(query); } } impl std::fmt::Debug for MissingKeyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{self}") } } impl std::fmt::Display for MissingKeyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "could not find data for key {} (shard {:?}), original HWM LSN {}", self.keyspace, self.shard, self.original_hwm_lsn )?; if let Some(ref ancestor_lsn) = self.ancestor_lsn { write!(f, ", ancestor {ancestor_lsn}")?; } if let Some(ref query) = self.query { write!(f, ", query {query}")?; } if let Some(ref read_path) = self.read_path { write!(f, "\n{read_path}")?; } if let Some(ref backtrace) = self.backtrace { write!(f, "\n{backtrace}")?; } Ok(()) } } #[derive(thiserror::Error, Debug)] pub(crate) enum CreateImageLayersError { #[error("timeline shutting down")] Cancelled, #[error("read failed")] GetVectoredError(#[source] GetVectoredError), #[error("reconstruction failed")] PageReconstructError(#[source] PageReconstructError), #[error(transparent)] Other(anyhow::Error), } impl From<layer_manager::Shutdown> for CreateImageLayersError { fn from(_: layer_manager::Shutdown) -> Self { CreateImageLayersError::Cancelled } } #[derive(thiserror::Error, Debug, Clone)] pub(crate) enum FlushLayerError { /// Timeline cancellation token was cancelled #[error("timeline shutting down")] Cancelled, /// We tried to flush a layer while the Timeline is in an unexpected state #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")] NotRunning(FlushLoopState), // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush // loop via a watch channel, where we can only borrow it. #[error("create image layers (shared)")] CreateImageLayersError(Arc<CreateImageLayersError>), #[error("other (shared)")] Other(#[from] Arc<anyhow::Error>), } impl FlushLayerError { // When crossing from generic anyhow errors to this error type, we explicitly check // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err. fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self { let cancelled = timeline.cancel.is_cancelled() // The upload queue might have been shut down before the official cancellation of the timeline. || err .downcast_ref::<NotInitialized>() .map(NotInitialized::is_stopping) .unwrap_or_default(); if cancelled { Self::Cancelled } else { Self::Other(Arc::new(err)) } } } impl From<layer_manager::Shutdown> for FlushLayerError { fn from(_: layer_manager::Shutdown) -> Self { FlushLayerError::Cancelled } } #[derive(thiserror::Error, Debug)] pub enum GetVectoredError { #[error("timeline shutting down")] Cancelled, #[error("requested too many keys: {0} > {1}")] Oversized(u64, u64), #[error("requested at invalid LSN: {0}")] InvalidLsn(Lsn), #[error("requested key not found: {0}")] MissingKey(Box<MissingKeyError>), #[error("ancestry walk")] GetReadyAncestorError(#[source] GetReadyAncestorError), #[error(transparent)] Other(#[from] anyhow::Error), } impl From<GetReadyAncestorError> for GetVectoredError { fn from(value: GetReadyAncestorError) -> Self { use GetReadyAncestorError::*;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/block_io.rs
pageserver/src/tenant/block_io.rs
//! //! Low-level Block-oriented I/O functions //! use std::ops::Deref; use super::storage_layer::delta_layer::{Adapter, DeltaLayerInner}; use crate::context::RequestContext; use crate::page_cache::{self, FileId, PAGE_SZ, PageReadGuard, PageWriteGuard, ReadBufResult}; #[cfg(test)] use crate::virtual_file::IoBufferMut; use crate::virtual_file::{IoBuffer, VirtualFile}; /// This is implemented by anything that can read 8 kB (PAGE_SZ) /// blocks, using the page cache /// /// There are currently two implementations: EphemeralFile, and FileBlockReader /// below. pub trait BlockReader { /// /// Create a new "cursor" for reading from this reader. /// /// A cursor caches the last accessed page, allowing for faster /// access if the same block is accessed repeatedly. fn block_cursor(&self) -> BlockCursor<'_>; } impl<B> BlockReader for &B where B: BlockReader, { fn block_cursor(&self) -> BlockCursor<'_> { (*self).block_cursor() } } /// Reference to an in-memory copy of an immutable on-disk block. pub enum BlockLease<'a> { PageReadGuard(PageReadGuard<'static>), EphemeralFileMutableTail(&'a [u8; PAGE_SZ]), Slice(&'a [u8; PAGE_SZ]), #[cfg(test)] Arc(std::sync::Arc<[u8; PAGE_SZ]>), #[cfg(test)] IoBufferMut(IoBufferMut), } impl From<PageReadGuard<'static>> for BlockLease<'static> { fn from(value: PageReadGuard<'static>) -> BlockLease<'static> { BlockLease::PageReadGuard(value) } } #[cfg(test)] impl From<std::sync::Arc<[u8; PAGE_SZ]>> for BlockLease<'_> { fn from(value: std::sync::Arc<[u8; PAGE_SZ]>) -> Self { BlockLease::Arc(value) } } impl Deref for BlockLease<'_> { type Target = [u8; PAGE_SZ]; fn deref(&self) -> &Self::Target { match self { BlockLease::PageReadGuard(v) => v.deref(), BlockLease::EphemeralFileMutableTail(v) => v, BlockLease::Slice(v) => v, #[cfg(test)] BlockLease::Arc(v) => v.deref(), #[cfg(test)] BlockLease::IoBufferMut(v) => { TryFrom::try_from(&v[..]).expect("caller must ensure that v has PAGE_SZ") } } } } /// Provides the ability to read blocks from different sources, /// similar to using traits for this purpose. /// /// Unlike traits, we also support the read function to be async though. pub(crate) enum BlockReaderRef<'a> { FileBlockReader(&'a FileBlockReader<'a>), Adapter(Adapter<&'a DeltaLayerInner>), #[cfg(test)] TestDisk(&'a super::disk_btree::tests::TestDisk), #[cfg(test)] VirtualFile(&'a VirtualFile), } impl BlockReaderRef<'_> { #[inline(always)] async fn read_blk( &self, blknum: u32, ctx: &RequestContext, ) -> Result<BlockLease, std::io::Error> { use BlockReaderRef::*; match self { FileBlockReader(r) => r.read_blk(blknum, ctx).await, Adapter(r) => r.read_blk(blknum, ctx).await, #[cfg(test)] TestDisk(r) => r.read_blk(blknum), #[cfg(test)] VirtualFile(r) => r.read_blk(blknum, ctx).await, } } } /// /// A "cursor" for efficiently reading multiple pages from a BlockReader /// /// You can access the last page with `*cursor`. 'read_blk' returns 'self', so /// that in many cases you can use a BlockCursor as a drop-in replacement for /// the underlying BlockReader. For example: /// /// ```no_run /// # use pageserver::tenant::block_io::{BlockReader, FileBlockReader}; /// # use pageserver::context::RequestContext; /// # let reader: FileBlockReader = unimplemented!("stub"); /// # let ctx: RequestContext = unimplemented!("stub"); /// let cursor = reader.block_cursor(); /// let buf = cursor.read_blk(1, &ctx); /// // do stuff with 'buf' /// let buf = cursor.read_blk(2, &ctx); /// // do stuff with 'buf' /// ``` /// pub struct BlockCursor<'a> { pub(super) read_compressed: bool, reader: BlockReaderRef<'a>, } impl<'a> BlockCursor<'a> { pub(crate) fn new(reader: BlockReaderRef<'a>) -> Self { Self::new_with_compression(reader, false) } pub(crate) fn new_with_compression(reader: BlockReaderRef<'a>, read_compressed: bool) -> Self { BlockCursor { read_compressed, reader, } } // Needed by cli pub fn new_fileblockreader(reader: &'a FileBlockReader) -> Self { BlockCursor { read_compressed: false, reader: BlockReaderRef::FileBlockReader(reader), } } /// Read a block. /// /// Returns a "lease" object that can be used to /// access to the contents of the page. (For the page cache, the /// lease object represents a lock on the buffer.) #[inline(always)] pub async fn read_blk( &self, blknum: u32, ctx: &RequestContext, ) -> Result<BlockLease, std::io::Error> { self.reader.read_blk(blknum, ctx).await } } /// An adapter for reading a (virtual) file using the page cache. /// /// The file is assumed to be immutable. This doesn't provide any functions /// for modifying the file, nor for invalidating the cache if it is modified. #[derive(Clone)] pub struct FileBlockReader<'a> { pub file: &'a VirtualFile, /// Unique ID of this file, used as key in the page cache. file_id: page_cache::FileId, compressed_reads: bool, } impl<'a> FileBlockReader<'a> { pub fn new(file: &'a VirtualFile, file_id: FileId) -> Self { FileBlockReader { file_id, file, compressed_reads: true, } } /// Read a page from the underlying file into given buffer. async fn fill_buffer( &self, buf: PageWriteGuard<'static>, blkno: u32, ctx: &RequestContext, ) -> Result<PageWriteGuard<'static>, std::io::Error> { assert!(buf.len() == PAGE_SZ); self.file .read_exact_at_page(buf, blkno as u64 * PAGE_SZ as u64, ctx) .await } /// Read a block. /// /// Returns a "lease" object that can be used to /// access to the contents of the page. (For the page cache, the /// lease object represents a lock on the buffer.) pub async fn read_blk<'b>( &self, blknum: u32, ctx: &RequestContext, ) -> Result<BlockLease<'b>, std::io::Error> { let cache = page_cache::get(); match cache .read_immutable_buf(self.file_id, blknum, ctx) .await .map_err(|e| std::io::Error::other(format!("Failed to read immutable buf: {e:#}")))? { ReadBufResult::Found(guard) => Ok(guard.into()), ReadBufResult::NotFound(write_guard) => { // Read the page from disk into the buffer let write_guard = self.fill_buffer(write_guard, blknum, ctx).await?; Ok(write_guard.mark_valid().into()) } } } } impl BlockReader for FileBlockReader<'_> { fn block_cursor(&self) -> BlockCursor<'_> { BlockCursor::new_with_compression( BlockReaderRef::FileBlockReader(self), self.compressed_reads, ) } } /// /// Trait for block-oriented output /// pub trait BlockWriter { /// /// Write a page to the underlying storage. /// /// 'buf' must be of size PAGE_SZ. Returns the block number the page was /// written to. /// fn write_blk(&mut self, buf: IoBuffer) -> Result<u32, std::io::Error>; } /// /// A simple in-memory buffer of blocks. /// pub struct BlockBuf { pub blocks: Vec<IoBuffer>, } impl BlockWriter for BlockBuf { fn write_blk(&mut self, buf: IoBuffer) -> Result<u32, std::io::Error> { assert!(buf.len() == PAGE_SZ); let blknum = self.blocks.len(); self.blocks.push(buf); Ok(blknum as u32) } } impl BlockBuf { pub fn new() -> Self { BlockBuf { blocks: Vec::new() } } pub fn size(&self) -> u64 { (self.blocks.len() * PAGE_SZ) as u64 } } impl Default for BlockBuf { fn default() -> Self { Self::new() } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/blob_io.rs
pageserver/src/tenant/blob_io.rs
//! //! Functions for reading and writing variable-sized "blobs". //! //! Each blob begins with a 1- or 4-byte length field, followed by the //! actual data. If the length is smaller than 128 bytes, the length //! is written as a one byte. If it's larger than that, the length //! is written as a four-byte integer, in big-endian, with the high //! bit set. This way, we can detect whether it's 1- or 4-byte header //! by peeking at the first byte. For blobs larger than 128 bits, //! we also specify three reserved bits, only one of the three bit //! patterns is currently in use (0b011) and signifies compression //! with zstd. //! //! len < 128: 0XXXXXXX //! len >= 128: 1CCCXXXX XXXXXXXX XXXXXXXX XXXXXXXX //! use std::cmp::min; use anyhow::Context; use async_compression::Level; use bytes::{BufMut, BytesMut}; use pageserver_api::models::ImageCompressionAlgorithm; use tokio::io::AsyncWriteExt; use tokio_epoll_uring::IoBuf; use tokio_util::sync::CancellationToken; use tracing::warn; use crate::context::RequestContext; use crate::page_cache::PAGE_SZ; use crate::tenant::block_io::BlockCursor; use crate::virtual_file::IoBufferMut; use crate::virtual_file::owned_buffers_io::io_buf_ext::{FullSlice, IoBufExt}; use crate::virtual_file::owned_buffers_io::write::{BufferedWriter, FlushTaskError}; use crate::virtual_file::owned_buffers_io::write::{BufferedWriterShutdownMode, OwnedAsyncWriter}; #[derive(Copy, Clone, Debug)] pub struct CompressionInfo { pub written_compressed: bool, pub compressed_size: Option<usize>, } /// A blob header, with header+data length and compression info. /// /// TODO: use this more widely, and add an encode() method too. /// TODO: document the header format. #[derive(Clone, Copy, Default)] pub struct Header { pub header_len: usize, pub data_len: usize, pub compression_bits: u8, } impl Header { /// Decodes a header from a byte slice. pub fn decode(bytes: &[u8]) -> anyhow::Result<Self> { let Some(&first_header_byte) = bytes.first() else { anyhow::bail!("zero-length blob header"); }; // If the first bit is 0, this is just a 1-byte length prefix up to 128 bytes. if first_header_byte < 0x80 { return Ok(Self { header_len: 1, // by definition data_len: first_header_byte as usize, compression_bits: BYTE_UNCOMPRESSED, }); } // Otherwise, this is a 4-byte header containing compression information and length. const HEADER_LEN: usize = 4; let mut header_buf: [u8; HEADER_LEN] = bytes[0..HEADER_LEN] .try_into() .map_err(|_| anyhow::anyhow!("blob header too short: {bytes:?}"))?; // TODO: verify the compression bits and convert to an enum. let compression_bits = header_buf[0] & LEN_COMPRESSION_BIT_MASK; header_buf[0] &= !LEN_COMPRESSION_BIT_MASK; let data_len = u32::from_be_bytes(header_buf) as usize; Ok(Self { header_len: HEADER_LEN, data_len, compression_bits, }) } /// Returns the total header+data length. pub fn total_len(&self) -> usize { self.header_len + self.data_len } } #[derive(Debug, thiserror::Error)] pub enum WriteBlobError { #[error(transparent)] Flush(FlushTaskError), #[error(transparent)] Other(anyhow::Error), } impl WriteBlobError { pub fn is_cancel(&self) -> bool { match self { WriteBlobError::Flush(e) => e.is_cancel(), WriteBlobError::Other(_) => false, } } pub fn into_anyhow(self) -> anyhow::Error { match self { WriteBlobError::Flush(e) => e.into_anyhow(), WriteBlobError::Other(e) => e, } } } impl BlockCursor<'_> { /// Read a blob into a new buffer. pub async fn read_blob( &self, offset: u64, ctx: &RequestContext, ) -> Result<Vec<u8>, std::io::Error> { let mut buf = Vec::new(); self.read_blob_into_buf(offset, &mut buf, ctx).await?; Ok(buf) } /// Read blob into the given buffer. Any previous contents in the buffer /// are overwritten. pub async fn read_blob_into_buf( &self, offset: u64, dstbuf: &mut Vec<u8>, ctx: &RequestContext, ) -> Result<(), std::io::Error> { let mut blknum = (offset / PAGE_SZ as u64) as u32; let mut off = (offset % PAGE_SZ as u64) as usize; let mut buf = self.read_blk(blknum, ctx).await?; // peek at the first byte, to determine if it's a 1- or 4-byte length let first_len_byte = buf[off]; let len: usize = if first_len_byte < 0x80 { // 1-byte length header off += 1; first_len_byte as usize } else { // 4-byte length header let mut len_buf = [0u8; 4]; let thislen = PAGE_SZ - off; if thislen < 4 { // it is split across two pages len_buf[..thislen].copy_from_slice(&buf[off..PAGE_SZ]); blknum += 1; buf = self.read_blk(blknum, ctx).await?; len_buf[thislen..].copy_from_slice(&buf[0..4 - thislen]); off = 4 - thislen; } else { len_buf.copy_from_slice(&buf[off..off + 4]); off += 4; } let bit_mask = if self.read_compressed { !LEN_COMPRESSION_BIT_MASK } else { 0x7f }; len_buf[0] &= bit_mask; u32::from_be_bytes(len_buf) as usize }; let compression_bits = first_len_byte & LEN_COMPRESSION_BIT_MASK; let mut tmp_buf = Vec::new(); let buf_to_write; let compression = if compression_bits <= BYTE_UNCOMPRESSED || !self.read_compressed { if compression_bits > BYTE_UNCOMPRESSED { warn!("reading key above future limit ({len} bytes)"); } buf_to_write = dstbuf; None } else if compression_bits == BYTE_ZSTD { buf_to_write = &mut tmp_buf; Some(dstbuf) } else { let error = std::io::Error::new( std::io::ErrorKind::InvalidData, format!("invalid compression byte {compression_bits:x}"), ); return Err(error); }; buf_to_write.clear(); buf_to_write.reserve(len); // Read the payload let mut remain = len; while remain > 0 { let mut page_remain = PAGE_SZ - off; if page_remain == 0 { // continue on next page blknum += 1; buf = self.read_blk(blknum, ctx).await?; off = 0; page_remain = PAGE_SZ; } let this_blk_len = min(remain, page_remain); buf_to_write.extend_from_slice(&buf[off..off + this_blk_len]); remain -= this_blk_len; off += this_blk_len; } if let Some(dstbuf) = compression { if compression_bits == BYTE_ZSTD { let mut decoder = async_compression::tokio::write::ZstdDecoder::new(dstbuf); decoder.write_all(buf_to_write).await?; decoder.flush().await?; } else { unreachable!("already checked above") } } Ok(()) } } /// Reserved bits for length and compression pub(super) const LEN_COMPRESSION_BIT_MASK: u8 = 0xf0; /// The maximum size of blobs we support. The highest few bits /// are reserved for compression and other further uses. pub(crate) const MAX_SUPPORTED_BLOB_LEN: usize = 0x0fff_ffff; pub(super) const BYTE_UNCOMPRESSED: u8 = 0x80; pub(super) const BYTE_ZSTD: u8 = BYTE_UNCOMPRESSED | 0x10; /// A wrapper of `VirtualFile` that allows users to write blobs. pub struct BlobWriter<W> { /// We do tiny writes for the length headers; they need to be in an owned buffer; io_buf: Option<BytesMut>, writer: BufferedWriter<IoBufferMut, W>, offset: u64, } impl<W> BlobWriter<W> where W: OwnedAsyncWriter + std::fmt::Debug + Send + Sync + 'static, { /// See [`BufferedWriter`] struct-level doc comment for semantics of `start_offset`. pub fn new( file: W, start_offset: u64, gate: &utils::sync::gate::Gate, cancel: CancellationToken, ctx: &RequestContext, flush_task_span: tracing::Span, ) -> anyhow::Result<Self> { Ok(Self { io_buf: Some(BytesMut::new()), writer: BufferedWriter::new( file, start_offset, || IoBufferMut::with_capacity(Self::CAPACITY), gate.enter()?, cancel, ctx, flush_task_span, ), offset: start_offset, }) } pub fn size(&self) -> u64 { self.offset } const CAPACITY: usize = 64 * 1024; /// Writes `src_buf` to the file at the current offset. async fn write_all<Buf: IoBuf + Send>( &mut self, src_buf: FullSlice<Buf>, ctx: &RequestContext, ) -> (FullSlice<Buf>, Result<(), FlushTaskError>) { let res = self .writer // TODO: why are we taking a FullSlice if we're going to pass a borrow downstack? // Can remove all the complexity around owned buffers upstack .write_buffered_borrowed(&src_buf, ctx) .await .map(|len| { self.offset += len as u64; }); (src_buf, res) } /// Write a blob of data. Returns the offset that it was written to, /// which can be used to retrieve the data later. pub async fn write_blob<Buf: IoBuf + Send>( &mut self, srcbuf: FullSlice<Buf>, ctx: &RequestContext, ) -> (FullSlice<Buf>, Result<u64, WriteBlobError>) { let (buf, res) = self .write_blob_maybe_compressed(srcbuf, ctx, ImageCompressionAlgorithm::Disabled) .await; (buf, res.map(|(off, _compression_info)| off)) } /// Write a blob of data. Returns the offset that it was written to, /// which can be used to retrieve the data later. pub(crate) async fn write_blob_maybe_compressed<Buf: IoBuf + Send>( &mut self, srcbuf: FullSlice<Buf>, ctx: &RequestContext, algorithm: ImageCompressionAlgorithm, ) -> ( FullSlice<Buf>, Result<(u64, CompressionInfo), WriteBlobError>, ) { let offset = self.offset; let mut compression_info = CompressionInfo { written_compressed: false, compressed_size: None, }; let len = srcbuf.len(); let mut io_buf = self.io_buf.take().expect("we always put it back below"); io_buf.clear(); let mut compressed_buf = None; let ((io_buf_slice, hdr_res), srcbuf) = async { if len < 128 { // Short blob. Write a 1-byte length header io_buf.put_u8(len as u8); let (slice, res) = self.write_all(io_buf.slice_len(), ctx).await; let res = res.map_err(WriteBlobError::Flush); ((slice, res), srcbuf) } else { // Write a 4-byte length header if len > MAX_SUPPORTED_BLOB_LEN { return ( ( io_buf.slice_len(), Err(WriteBlobError::Other(anyhow::anyhow!( "blob too large ({len} bytes)" ))), ), srcbuf, ); } let (high_bit_mask, len_written, srcbuf) = match algorithm { ImageCompressionAlgorithm::Zstd { level } => { let mut encoder = if let Some(level) = level { async_compression::tokio::write::ZstdEncoder::with_quality( Vec::new(), Level::Precise(level.into()), ) } else { async_compression::tokio::write::ZstdEncoder::new(Vec::new()) }; encoder.write_all(&srcbuf[..]).await.unwrap(); encoder.shutdown().await.unwrap(); let compressed = encoder.into_inner(); compression_info.compressed_size = Some(compressed.len()); if compressed.len() < len { compression_info.written_compressed = true; let compressed_len = compressed.len(); compressed_buf = Some(compressed); (BYTE_ZSTD, compressed_len, srcbuf) } else { (BYTE_UNCOMPRESSED, len, srcbuf) } } ImageCompressionAlgorithm::Disabled => (BYTE_UNCOMPRESSED, len, srcbuf), }; let mut len_buf = (len_written as u32).to_be_bytes(); assert_eq!(len_buf[0] & 0xf0, 0); len_buf[0] |= high_bit_mask; io_buf.extend_from_slice(&len_buf[..]); let (slice, res) = self.write_all(io_buf.slice_len(), ctx).await; let res = res.map_err(WriteBlobError::Flush); ((slice, res), srcbuf) } } .await; self.io_buf = Some(io_buf_slice.into_raw_slice().into_inner()); match hdr_res { Ok(_) => (), Err(e) => return (srcbuf, Err(e)), } let (srcbuf, res) = if let Some(compressed_buf) = compressed_buf { let (_buf, res) = self.write_all(compressed_buf.slice_len(), ctx).await; (srcbuf, res) } else { self.write_all(srcbuf, ctx).await }; let res = res.map_err(WriteBlobError::Flush); (srcbuf, res.map(|_| (offset, compression_info))) } /// Writes a raw blob containing both header and data, returning its offset. pub(crate) async fn write_blob_raw<Buf: IoBuf + Send>( &mut self, raw_with_header: FullSlice<Buf>, ctx: &RequestContext, ) -> (FullSlice<Buf>, Result<u64, WriteBlobError>) { // Verify the header, to ensure we don't write invalid/corrupt data. let header = match Header::decode(&raw_with_header) .context("decoding blob header") .map_err(WriteBlobError::Other) { Ok(header) => header, Err(err) => return (raw_with_header, Err(err)), }; if raw_with_header.len() != header.total_len() { let header_total_len = header.total_len(); let raw_len = raw_with_header.len(); return ( raw_with_header, Err(WriteBlobError::Other(anyhow::anyhow!( "header length mismatch: {header_total_len} != {raw_len}" ))), ); } let offset = self.offset; let (raw_with_header, result) = self.write_all(raw_with_header, ctx).await; let result = result.map_err(WriteBlobError::Flush); (raw_with_header, result.map(|_| offset)) } /// Finish this blob writer and return the underlying `W`. pub async fn shutdown( self, mode: BufferedWriterShutdownMode, ctx: &RequestContext, ) -> Result<W, FlushTaskError> { let (_, file) = self.writer.shutdown(mode, ctx).await?; Ok(file) } } #[cfg(test)] pub(crate) mod tests { use camino::Utf8PathBuf; use camino_tempfile::Utf8TempDir; use rand::{Rng, SeedableRng}; use tracing::info_span; use super::*; use crate::context::DownloadBehavior; use crate::task_mgr::TaskKind; use crate::tenant::block_io::BlockReaderRef; use crate::virtual_file; use crate::virtual_file::TempVirtualFile; use crate::virtual_file::VirtualFile; async fn round_trip_test(blobs: &[Vec<u8>]) -> anyhow::Result<()> { round_trip_test_compressed(blobs, false).await } pub(crate) async fn write_maybe_compressed( blobs: &[Vec<u8>], compression: bool, ctx: &RequestContext, ) -> anyhow::Result<(Utf8TempDir, Utf8PathBuf, Vec<u64>)> { let temp_dir = camino_tempfile::tempdir()?; let pathbuf = temp_dir.path().join("file"); let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); // Write part (in block to drop the file) let mut offsets = Vec::new(); { let file = TempVirtualFile::new( VirtualFile::open_with_options_v2( pathbuf.as_path(), virtual_file::OpenOptions::new() .create_new(true) .write(true), ctx, ) .await?, gate.enter()?, ); let mut wtr = BlobWriter::new(file, 0, &gate, cancel.clone(), ctx, info_span!("test")).unwrap(); for blob in blobs.iter() { let (_, res) = if compression { let res = wtr .write_blob_maybe_compressed( blob.clone().slice_len(), ctx, ImageCompressionAlgorithm::Zstd { level: Some(1) }, ) .await; (res.0, res.1.map(|(off, _)| off)) } else { wtr.write_blob(blob.clone().slice_len(), ctx).await }; let offs = res?; offsets.push(offs); } let file = wtr .shutdown( BufferedWriterShutdownMode::ZeroPadToNextMultiple(PAGE_SZ), ctx, ) .await?; file.disarm_into_inner() }; Ok((temp_dir, pathbuf, offsets)) } async fn round_trip_test_compressed( blobs: &[Vec<u8>], compression: bool, ) -> anyhow::Result<()> { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error).with_scope_unit_test(); let (_temp_dir, pathbuf, offsets) = write_maybe_compressed(blobs, compression, &ctx).await?; println!("Done writing!"); let file = VirtualFile::open_v2(pathbuf, &ctx).await?; let rdr = BlockReaderRef::VirtualFile(&file); let rdr = BlockCursor::new_with_compression(rdr, compression); for (idx, (blob, offset)) in blobs.iter().zip(offsets.iter()).enumerate() { let blob_read = rdr.read_blob(*offset, &ctx).await?; assert_eq!( blob, &blob_read, "mismatch for idx={idx} at offset={offset}" ); } Ok(()) } pub(crate) fn random_array(len: usize) -> Vec<u8> { let mut rng = rand::rng(); (0..len).map(|_| rng.random()).collect::<_>() } #[tokio::test] async fn test_one() -> anyhow::Result<()> { let blobs = &[vec![12, 21, 22]]; round_trip_test(blobs).await?; Ok(()) } #[tokio::test] async fn test_hello_simple() -> anyhow::Result<()> { let blobs = &[ vec![0, 1, 2, 3], b"Hello, World!".to_vec(), Vec::new(), b"foobar".to_vec(), ]; round_trip_test(blobs).await?; round_trip_test_compressed(blobs, true).await?; Ok(()) } #[tokio::test] async fn test_really_big_array() -> anyhow::Result<()> { let blobs = &[ b"test".to_vec(), random_array(10 * PAGE_SZ), b"hello".to_vec(), random_array(66 * PAGE_SZ), vec![0xf3; 24 * PAGE_SZ], b"foobar".to_vec(), ]; round_trip_test(blobs).await?; round_trip_test_compressed(blobs, true).await?; Ok(()) } #[tokio::test] async fn test_arrays_inc() -> anyhow::Result<()> { let blobs = (0..PAGE_SZ / 8) .map(|v| random_array(v * 16)) .collect::<Vec<_>>(); round_trip_test(&blobs).await?; Ok(()) } #[tokio::test] async fn test_arrays_random_size() -> anyhow::Result<()> { let mut rng = rand::rngs::StdRng::seed_from_u64(42); let blobs = (0..1024) .map(|_| { let mut sz: u16 = rng.random(); // Make 50% of the arrays small if rng.random() { sz &= 63; } random_array(sz.into()) }) .collect::<Vec<_>>(); round_trip_test(&blobs).await?; Ok(()) } #[tokio::test] async fn test_arrays_page_boundary() -> anyhow::Result<()> { let blobs = &[ random_array(PAGE_SZ - 4), random_array(PAGE_SZ - 4), random_array(PAGE_SZ - 4), ]; round_trip_test(blobs).await?; Ok(()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/ephemeral_file.rs
pageserver/src/tenant/ephemeral_file.rs
//! Implementation of append-only file data structure //! used to keep in-memory layers spilled on disk. use std::io; use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; use camino::Utf8PathBuf; use num_traits::Num; use pageserver_api::shard::TenantShardId; use tokio_epoll_uring::{BoundedBuf, Slice}; use tokio_util::sync::CancellationToken; use tracing::{error, info_span}; use utils::id::TimelineId; use utils::sync::gate::GateGuard; use crate::assert_u64_eq_usize::{U64IsUsize, UsizeIsU64}; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::page_cache; use crate::tenant::storage_layer::inmemory_layer::GlobalResourceUnits; use crate::tenant::storage_layer::inmemory_layer::vectored_dio_read::File; use crate::virtual_file::owned_buffers_io::io_buf_aligned::IoBufAlignedMut; use crate::virtual_file::owned_buffers_io::slice::SliceMutExt; use crate::virtual_file::owned_buffers_io::write::{Buffer, FlushTaskError}; use crate::virtual_file::{self, IoBufferMut, TempVirtualFile, VirtualFile, owned_buffers_io}; use self::owned_buffers_io::write::OwnedAsyncWriter; pub struct EphemeralFile { _tenant_shard_id: TenantShardId, _timeline_id: TimelineId, page_cache_file_id: page_cache::FileId, file: TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter, buffered_writer: tokio::sync::RwLock<BufferedWriter>, bytes_written: AtomicU64, resource_units: std::sync::Mutex<GlobalResourceUnits>, } type BufferedWriter = owned_buffers_io::write::BufferedWriter< IoBufferMut, TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter, >; /// A TempVirtualFile that is co-owned by the [`EphemeralFile`]` and [`BufferedWriter`]. /// /// (Actually [`BufferedWriter`] internally is just a client to a background flush task. /// The co-ownership is between [`EphemeralFile`] and that flush task.) /// /// Co-ownership allows us to serve reads for data that has already been flushed by the [`BufferedWriter`]. #[derive(Debug, Clone)] struct TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter { inner: Arc<TempVirtualFile>, } const TAIL_SZ: usize = 64 * 1024; impl EphemeralFile { pub async fn create( conf: &PageServerConf, tenant_shard_id: TenantShardId, timeline_id: TimelineId, gate: &utils::sync::gate::Gate, cancel: &CancellationToken, ctx: &RequestContext, ) -> anyhow::Result<EphemeralFile> { // TempVirtualFile requires us to never reuse a filename while an old // instance of TempVirtualFile created with that filename is not done dropping yet. // So, we use a monotonic counter to disambiguate the filenames. static NEXT_TEMP_DISAMBIGUATOR: AtomicU64 = AtomicU64::new(1); let filename_disambiguator = NEXT_TEMP_DISAMBIGUATOR.fetch_add(1, std::sync::atomic::Ordering::Relaxed); let filename = conf .timeline_path(&tenant_shard_id, &timeline_id) .join(Utf8PathBuf::from(format!( "ephemeral-{filename_disambiguator}" ))); let file = TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter::new( VirtualFile::open_with_options_v2( &filename, virtual_file::OpenOptions::new() .create_new(true) .read(true) .write(true), ctx, ) .await?, gate.enter()?, ); let page_cache_file_id = page_cache::next_file_id(); // XXX get rid, we're not page-caching anymore Ok(EphemeralFile { _tenant_shard_id: tenant_shard_id, _timeline_id: timeline_id, page_cache_file_id, file: file.clone(), buffered_writer: tokio::sync::RwLock::new(BufferedWriter::new( file, 0, || IoBufferMut::with_capacity(TAIL_SZ), gate.enter()?, cancel.child_token(), ctx, info_span!(parent: None, "ephemeral_file_buffered_writer", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), timeline_id=%timeline_id, path = %filename), )), bytes_written: AtomicU64::new(0), resource_units: std::sync::Mutex::new(GlobalResourceUnits::new()), }) } } impl TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter { fn new(file: VirtualFile, gate_guard: GateGuard) -> Self { Self { inner: Arc::new(TempVirtualFile::new(file, gate_guard)), } } } impl OwnedAsyncWriter for TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter { fn write_all_at<Buf: owned_buffers_io::io_buf_aligned::IoBufAligned + Send>( &self, buf: owned_buffers_io::io_buf_ext::FullSlice<Buf>, offset: u64, ctx: &RequestContext, ) -> impl std::future::Future< Output = ( owned_buffers_io::io_buf_ext::FullSlice<Buf>, std::io::Result<()>, ), > + Send { self.inner.write_all_at(buf, offset, ctx) } fn set_len( &self, len: u64, ctx: &RequestContext, ) -> impl Future<Output = std::io::Result<()>> + Send { self.inner.set_len(len, ctx) } } impl std::ops::Deref for TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter { type Target = VirtualFile; fn deref(&self) -> &Self::Target { &self.inner } } #[derive(Debug, thiserror::Error)] pub(crate) enum EphemeralFileWriteError { #[error("cancelled")] Cancelled, } impl EphemeralFile { pub(crate) fn len(&self) -> u64 { // TODO(vlad): The value returned here is not always correct if // we have more than one concurrent writer. Writes are always // sequenced, but we could grab the buffered writer lock if we wanted // to. self.bytes_written.load(Ordering::Acquire) } pub(crate) fn page_cache_file_id(&self) -> page_cache::FileId { self.page_cache_file_id } pub(crate) async fn load_to_io_buf( &self, ctx: &RequestContext, ) -> Result<IoBufferMut, io::Error> { let size = self.len().into_usize(); let buf = IoBufferMut::with_capacity(size); let (slice, nread) = self.read_exact_at_eof_ok(0, buf.slice_full(), ctx).await?; assert_eq!(nread, size); let buf = slice.into_inner(); assert_eq!(buf.len(), nread); assert_eq!(buf.capacity(), size, "we shouldn't be reallocating"); Ok(buf) } /// Returns the offset at which the first byte of the input was written, for use /// in constructing indices over the written value. /// /// Panics if the write is short because there's no way we can recover from that. /// TODO: make upstack handle this as an error. pub(crate) async fn write_raw( &self, srcbuf: &[u8], ctx: &RequestContext, ) -> Result<u64, EphemeralFileWriteError> { let (pos, control) = self.write_raw_controlled(srcbuf, ctx).await?; if let Some(control) = control { control.release().await; } Ok(pos) } async fn write_raw_controlled( &self, srcbuf: &[u8], ctx: &RequestContext, ) -> Result<(u64, Option<owned_buffers_io::write::FlushControl>), EphemeralFileWriteError> { let mut writer = self.buffered_writer.write().await; let (nwritten, control) = writer .write_buffered_borrowed_controlled(srcbuf, ctx) .await .map_err(|e| match e { FlushTaskError::Cancelled => EphemeralFileWriteError::Cancelled, })?; assert_eq!( nwritten, srcbuf.len(), "buffered writer has no short writes" ); // There's no realistic risk of overflow here. We won't have exabytes sized files on disk. let pos = self .bytes_written .fetch_add(srcbuf.len().into_u64(), Ordering::AcqRel); let mut resource_units = self.resource_units.lock().unwrap(); resource_units.maybe_publish_size(self.bytes_written.load(Ordering::Relaxed)); Ok((pos, control)) } pub(crate) fn tick(&self) -> Option<u64> { let mut resource_units = self.resource_units.lock().unwrap(); let len = self.bytes_written.load(Ordering::Relaxed); resource_units.publish_size(len) } } impl super::storage_layer::inmemory_layer::vectored_dio_read::File for EphemeralFile { async fn read_exact_at_eof_ok<B: IoBufAlignedMut + Send>( &self, start: u64, mut dst: tokio_epoll_uring::Slice<B>, ctx: &RequestContext, ) -> std::io::Result<(tokio_epoll_uring::Slice<B>, usize)> { // We will fill the slice in back to front. Hence, we need // the slice to be fully initialized. // TODO(vlad): Is there a nicer way of doing this? dst.as_mut_rust_slice_full_zeroed(); let writer = self.buffered_writer.read().await; // Read bytes written while under lock. This is a hack to deal with concurrent // writes updating the number of bytes written. `bytes_written` is not DIO alligned // but we may end the read there. // // TODO(vlad): Feels like there's a nicer path where we align the end if it // shoots over the end of the file. let bytes_written = self.bytes_written.load(Ordering::Acquire); let dst_cap = dst.bytes_total().into_u64(); let end = { // saturating_add is correct here because the max file size is u64::MAX, so, // if start + dst.len() > u64::MAX, then we know it will be a short read let mut end: u64 = start.saturating_add(dst_cap); if end > bytes_written { end = bytes_written; } end }; let submitted_offset = writer.bytes_submitted(); let maybe_flushed = writer.inspect_maybe_flushed(); let mutable = match writer.inspect_mutable() { Some(mutable) => &mutable[0..mutable.pending()], None => { // Timeline::cancel and hence buffered writer flush was cancelled. // Remain read-available while timeline is shutting down. &[] } }; // inclusive, exclusive #[derive(Debug)] struct Range<N>(N, N); impl<N: Num + Clone + Copy + PartialOrd + Ord> Range<N> { fn len(&self) -> N { if self.0 > self.1 { N::zero() } else { self.1 - self.0 } } } let (written_range, maybe_flushed_range) = { if maybe_flushed.is_some() { // [ written ][ maybe_flushed ][ mutable ] // ^ // `submitted_offset` // <++++++ on disk +++++++????????????????> ( Range( start, std::cmp::min(end, submitted_offset.saturating_sub(TAIL_SZ as u64)), ), Range( std::cmp::max(start, submitted_offset.saturating_sub(TAIL_SZ as u64)), std::cmp::min(end, submitted_offset), ), ) } else { // [ written ][ mutable ] // ^ // `submitted_offset` // <++++++ on disk +++++++++++++++++++++++> ( Range(start, std::cmp::min(end, submitted_offset)), // zero len Range(submitted_offset, u64::MIN), ) } }; let mutable_range = Range(std::cmp::max(start, submitted_offset), end); // There are three sources from which we might have to read data: // 1. The file itself // 2. The buffer which contains changes currently being flushed // 3. The buffer which contains chnages yet to be flushed // // For better concurrency, we do them in reverse order: perform the in-memory // reads while holding the writer lock, drop the writer lock and read from the // file if required. let dst = if mutable_range.len() > 0 { let offset_in_buffer = mutable_range .0 .checked_sub(submitted_offset) .unwrap() .into_usize(); let to_copy = &mutable[offset_in_buffer..(offset_in_buffer + mutable_range.len().into_usize())]; let bounds = dst.bounds(); let mut view = dst.slice({ let start = written_range.len().into_usize() + maybe_flushed_range.len().into_usize(); let end = start.checked_add(mutable_range.len().into_usize()).unwrap(); start..end }); view.as_mut_rust_slice_full_zeroed() .copy_from_slice(to_copy); Slice::from_buf_bounds(Slice::into_inner(view), bounds) } else { dst }; let dst = if maybe_flushed_range.len() > 0 { let offset_in_buffer = maybe_flushed_range .0 .checked_sub(submitted_offset.saturating_sub(TAIL_SZ as u64)) .unwrap() .into_usize(); // Checked previously the buffer is Some. let maybe_flushed = maybe_flushed.unwrap(); let to_copy = &maybe_flushed [offset_in_buffer..(offset_in_buffer + maybe_flushed_range.len().into_usize())]; let bounds = dst.bounds(); let mut view = dst.slice({ let start = written_range.len().into_usize(); let end = start .checked_add(maybe_flushed_range.len().into_usize()) .unwrap(); start..end }); view.as_mut_rust_slice_full_zeroed() .copy_from_slice(to_copy); Slice::from_buf_bounds(Slice::into_inner(view), bounds) } else { dst }; drop(writer); let dst = if written_range.len() > 0 { let bounds = dst.bounds(); let slice = self .file .read_exact_at(dst.slice(0..written_range.len().into_usize()), start, ctx) .await?; Slice::from_buf_bounds(Slice::into_inner(slice), bounds) } else { dst }; // TODO: in debug mode, randomize the remaining bytes in `dst` to catch bugs Ok((dst, (end - start).into_usize())) } } /// Does the given filename look like an ephemeral file? pub fn is_ephemeral_file(filename: &str) -> bool { if let Some(rest) = filename.strip_prefix("ephemeral-") { rest.parse::<u32>().is_ok() } else { false } } #[cfg(test)] mod tests { use std::fs; use std::str::FromStr; use rand::Rng; use super::*; use crate::context::DownloadBehavior; use crate::task_mgr::TaskKind; fn harness( test_name: &str, ) -> Result< ( &'static PageServerConf, TenantShardId, TimelineId, RequestContext, ), io::Error, > { let repo_dir = PageServerConf::test_repo_dir(test_name); let _ = fs::remove_dir_all(&repo_dir); let conf = PageServerConf::dummy_conf(repo_dir); // Make a static copy of the config. This can never be free'd, but that's // OK in a test. let conf: &'static PageServerConf = Box::leak(Box::new(conf)); let tenant_shard_id = TenantShardId::from_str("11000000000000000000000000000000").unwrap(); let timeline_id = TimelineId::from_str("22000000000000000000000000000000").unwrap(); fs::create_dir_all(conf.timeline_path(&tenant_shard_id, &timeline_id))?; let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error).with_scope_unit_test(); Ok((conf, tenant_shard_id, timeline_id, ctx)) } #[tokio::test] async fn ephemeral_file_holds_gate_open() { const FOREVER: std::time::Duration = std::time::Duration::from_secs(5); let (conf, tenant_id, timeline_id, ctx) = harness("ephemeral_file_holds_gate_open").unwrap(); let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); let mut closing = tokio::task::spawn(async move { gate.close().await; }); // gate is entered until the ephemeral file is dropped // do not start paused tokio-epoll-uring has a sleep loop tokio::time::pause(); tokio::time::timeout(FOREVER, &mut closing) .await .expect_err("closing cannot complete before dropping"); // this is a requirement of the reset_tenant functionality: we have to be able to restart a // tenant fast, and for that, we need all tenant_dir operations be guarded by entering a gate drop(file); tokio::time::timeout(FOREVER, &mut closing) .await .expect("closing completes right away") .expect("closing does not panic"); } #[tokio::test] async fn test_ephemeral_file_basics() { let (conf, tenant_id, timeline_id, ctx) = harness("test_ephemeral_file_basics").unwrap(); let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); let writer = file.buffered_writer.read().await; let mutable = writer.mutable(); let cap = mutable.capacity(); let align = mutable.align(); drop(writer); let write_nbytes = cap * 2 + cap / 2; let content: Vec<u8> = rand::rng() .sample_iter(rand::distr::StandardUniform) .take(write_nbytes) .collect(); let mut value_offsets = Vec::new(); for range in (0..write_nbytes) .step_by(align) .map(|start| start..(start + align).min(write_nbytes)) { let off = file.write_raw(&content[range], &ctx).await.unwrap(); value_offsets.push(off); } assert_eq!(file.len() as usize, write_nbytes); for (i, range) in (0..write_nbytes) .step_by(align) .map(|start| start..(start + align).min(write_nbytes)) .enumerate() { assert_eq!(value_offsets[i], range.start.into_u64()); let buf = IoBufferMut::with_capacity(range.len()); let (buf_slice, nread) = file .read_exact_at_eof_ok(range.start.into_u64(), buf.slice_full(), &ctx) .await .unwrap(); let buf = buf_slice.into_inner(); assert_eq!(nread, range.len()); assert_eq!(&buf, &content[range]); } let file_contents = std::fs::read(file.file.path()).unwrap(); assert!(file_contents == content[0..cap * 2]); let writer = file.buffered_writer.read().await; let maybe_flushed_buffer_contents = writer.inspect_maybe_flushed().unwrap(); assert_eq!(&maybe_flushed_buffer_contents[..], &content[cap..cap * 2]); let mutable_buffer_contents = writer.mutable(); assert_eq!(mutable_buffer_contents, &content[cap * 2..write_nbytes]); } #[tokio::test] async fn test_flushes_do_happen() { let (conf, tenant_id, timeline_id, ctx) = harness("test_flushes_do_happen").unwrap(); let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); // mutable buffer and maybe_flushed buffer each has `cap` bytes. let writer = file.buffered_writer.read().await; let cap = writer.mutable().capacity(); drop(writer); let content: Vec<u8> = rand::rng() .sample_iter(rand::distr::StandardUniform) .take(cap * 2 + cap / 2) .collect(); file.write_raw(&content, &ctx).await.unwrap(); // assert the state is as this test expects it to be let load_io_buf_res = file.load_to_io_buf(&ctx).await.unwrap(); assert_eq!(&load_io_buf_res[..], &content[0..cap * 2 + cap / 2]); let md = file.file.path().metadata().unwrap(); assert_eq!( md.len(), 2 * cap.into_u64(), "buffered writer requires one write to be flushed if we write 2.5x buffer capacity" ); let writer = file.buffered_writer.read().await; assert_eq!( &writer.inspect_maybe_flushed().unwrap()[0..cap], &content[cap..cap * 2] ); assert_eq!( &writer.mutable()[0..cap / 2], &content[cap * 2..cap * 2 + cap / 2] ); } #[tokio::test] async fn test_read_split_across_file_and_buffer() { // This test exercises the logic on the read path that splits the logical read // into a read from the flushed part (= the file) and a copy from the buffered writer's buffer. // // This test build on the assertions in test_flushes_do_happen let (conf, tenant_id, timeline_id, ctx) = harness("test_read_split_across_file_and_buffer").unwrap(); let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); let writer = file.buffered_writer.read().await; let mutable = writer.mutable(); let cap = mutable.capacity(); let align = mutable.align(); drop(writer); let content: Vec<u8> = rand::rng() .sample_iter(rand::distr::StandardUniform) .take(cap * 2 + cap / 2) .collect(); let (_, control) = file.write_raw_controlled(&content, &ctx).await.unwrap(); let test_read = |start: usize, len: usize| { let file = &file; let ctx = &ctx; let content = &content; async move { let (buf, nread) = file .read_exact_at_eof_ok( start.into_u64(), IoBufferMut::with_capacity(len).slice_full(), ctx, ) .await .unwrap(); assert_eq!(nread, len); assert_eq!(&buf.into_inner(), &content[start..(start + len)]); } }; let test_read_all_offset_combinations = || { async move { test_read(align, align).await; // border onto edge of file test_read(cap - align, align).await; // read across file and buffer test_read(cap - align, 2 * align).await; // stay from start of maybe flushed buffer test_read(cap, align).await; // completely within maybe flushed buffer test_read(cap + align, align).await; // border onto edge of maybe flushed buffer. test_read(cap * 2 - align, align).await; // read across maybe flushed and mutable buffer test_read(cap * 2 - align, 2 * align).await; // read across three segments test_read(cap - align, cap + 2 * align).await; // completely within mutable buffer test_read(cap * 2 + align, align).await; } }; // completely within the file range assert!(align < cap, "test assumption"); assert!(cap % align == 0); // test reads at different flush stages. let not_started = control.unwrap().into_not_started(); test_read_all_offset_combinations().await; let in_progress = not_started.ready_to_flush(); test_read_all_offset_combinations().await; in_progress.wait_until_flush_is_done().await; test_read_all_offset_combinations().await; } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/disk_btree.rs
pageserver/src/tenant/disk_btree.rs
//! //! Simple on-disk B-tree implementation //! //! This is used as the index structure within image and delta layers //! //! Features: //! - Fixed-width keys //! - Fixed-width values (VALUE_SZ) //! - The tree is created in a bulk operation. Insert/deletion after creation //! is not supported //! - page-oriented //! //! TODO: //! - maybe something like an Adaptive Radix Tree would be more efficient? //! - the values stored by image and delta layers are offsets into the file, //! and they are in monotonically increasing order. Prefix compression would //! be very useful for them, too. //! - An Iterator interface would be more convenient for the callers than the //! 'visit' function //! use std::cmp::Ordering; use std::iter::Rev; use std::ops::{Range, RangeInclusive}; use std::{io, result}; use async_stream::try_stream; use byteorder::{BE, ReadBytesExt}; use bytes::BufMut; use either::Either; use futures::{Stream, StreamExt}; use hex; use thiserror::Error; use tracing::error; use crate::context::RequestContext; use crate::tenant::block_io::{BlockReader, BlockWriter}; use crate::virtual_file::{IoBuffer, IoBufferMut, owned_buffers_io::write::Buffer}; // The maximum size of a value stored in the B-tree. 5 bytes is enough currently. pub const VALUE_SZ: usize = 5; pub const MAX_VALUE: u64 = 0x007f_ffff_ffff; pub const PAGE_SZ: usize = 8192; #[derive(Clone, Copy, Debug)] struct Value([u8; VALUE_SZ]); impl Value { fn from_slice(slice: &[u8]) -> Value { let mut b = [0u8; VALUE_SZ]; b.copy_from_slice(slice); Value(b) } fn from_u64(x: u64) -> Value { assert!(x <= 0x007f_ffff_ffff); Value([ (x >> 32) as u8, (x >> 24) as u8, (x >> 16) as u8, (x >> 8) as u8, x as u8, ]) } fn from_blknum(x: u32) -> Value { Value([ 0x80, (x >> 24) as u8, (x >> 16) as u8, (x >> 8) as u8, x as u8, ]) } #[allow(dead_code)] fn is_offset(self) -> bool { self.0[0] & 0x80 != 0 } fn to_u64(self) -> u64 { let b = &self.0; ((b[0] as u64) << 32) | ((b[1] as u64) << 24) | ((b[2] as u64) << 16) | ((b[3] as u64) << 8) | b[4] as u64 } fn to_blknum(self) -> u32 { let b = &self.0; assert!(b[0] == 0x80); ((b[1] as u32) << 24) | ((b[2] as u32) << 16) | ((b[3] as u32) << 8) | b[4] as u32 } } #[derive(Error, Debug)] pub enum DiskBtreeError { #[error("Attempt to append a value that is too large {0} > {}", MAX_VALUE)] AppendOverflow(u64), #[error("Unsorted input: key {key:?} is <= last_key {last_key:?}")] UnsortedInput { key: Box<[u8]>, last_key: Box<[u8]> }, #[error("Could not push to new leaf node")] FailedToPushToNewLeafNode, #[error("IoError: {0}")] Io(#[from] io::Error), } pub type Result<T> = result::Result<T, DiskBtreeError>; /// This is the on-disk representation. struct OnDiskNode<'a, const L: usize> { // Fixed-width fields num_children: u16, level: u8, prefix_len: u8, suffix_len: u8, // Variable-length fields. These are stored on-disk after the fixed-width // fields, in this order. In the in-memory representation, these point to // the right parts in the page buffer. prefix: &'a [u8], keys: &'a [u8], values: &'a [u8], } impl<const L: usize> OnDiskNode<'_, L> { /// /// Interpret a PAGE_SZ page as a node. /// fn deparse(buf: &[u8]) -> Result<OnDiskNode<L>> { let mut cursor = std::io::Cursor::new(buf); let num_children = cursor.read_u16::<BE>()?; let level = cursor.read_u8()?; let prefix_len = cursor.read_u8()?; let suffix_len = cursor.read_u8()?; let mut off = cursor.position(); let prefix_off = off as usize; off += prefix_len as u64; let keys_off = off as usize; let keys_len = num_children as usize * suffix_len as usize; off += keys_len as u64; let values_off = off as usize; let values_len = num_children as usize * VALUE_SZ; //off += values_len as u64; let prefix = &buf[prefix_off..prefix_off + prefix_len as usize]; let keys = &buf[keys_off..keys_off + keys_len]; let values = &buf[values_off..values_off + values_len]; Ok(OnDiskNode { num_children, level, prefix_len, suffix_len, prefix, keys, values, }) } /// /// Read a value at 'idx' /// fn value(&self, idx: usize) -> Value { let value_off = idx * VALUE_SZ; let value_slice = &self.values[value_off..value_off + VALUE_SZ]; Value::from_slice(value_slice) } fn binary_search( &self, search_key: &[u8; L], keybuf: &mut [u8], ) -> result::Result<usize, usize> { let mut size = self.num_children as usize; let mut low = 0; let mut high = size; while low < high { let mid = low + size / 2; let key_off = mid * self.suffix_len as usize; let suffix = &self.keys[key_off..key_off + self.suffix_len as usize]; // Does this match? keybuf[self.prefix_len as usize..].copy_from_slice(suffix); let cmp = keybuf[..].cmp(search_key); if cmp == Ordering::Less { low = mid + 1; } else if cmp == Ordering::Greater { high = mid; } else { return Ok(mid); } size = high - low; } Err(low) } } /// /// Public reader object, to search the tree. /// #[derive(Clone)] pub struct DiskBtreeReader<R, const L: usize> where R: BlockReader, { start_blk: u32, root_blk: u32, reader: R, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum VisitDirection { Forwards, Backwards, } impl<R, const L: usize> DiskBtreeReader<R, L> where R: BlockReader, { pub fn new(start_blk: u32, root_blk: u32, reader: R) -> Self { DiskBtreeReader { start_blk, root_blk, reader, } } /// /// Read the value for given key. Returns the value, or None if it doesn't exist. /// pub async fn get(&self, search_key: &[u8; L], ctx: &RequestContext) -> Result<Option<u64>> { let mut result: Option<u64> = None; self.visit( search_key, VisitDirection::Forwards, |key, value| { if key == search_key { result = Some(value); } false }, ctx, ) .await?; Ok(result) } pub fn iter<'a>(self, start_key: &'a [u8; L], ctx: &'a RequestContext) -> DiskBtreeIterator<'a> where R: 'a + Send, { DiskBtreeIterator { stream: Box::pin(self.into_stream(start_key, ctx)), } } /// Return a stream which yields all key, value pairs from the index /// starting from the first key greater or equal to `start_key`. /// /// Note 1: that this is a copy of [`Self::visit`]. /// TODO: Once the sequential read path is removed this will become /// the only index traversal method. /// /// Note 2: this function used to take `&self` but it now consumes `self`. This is due to /// the lifetime constraints of the reader and the stream / iterator it creates. Using `&self` /// requires the reader to be present when the stream is used, and this creates a lifetime /// dependency between the reader and the stream. Now if we want to create an iterator that /// holds the stream, someone will need to keep a reference to the reader, which is inconvenient /// to use from the image/delta layer APIs. /// /// Feel free to add the `&self` variant back if it's necessary. pub fn into_stream<'a>( self, start_key: &'a [u8; L], ctx: &'a RequestContext, ) -> impl Stream<Item = std::result::Result<(Vec<u8>, u64), DiskBtreeError>> + 'a where R: 'a, { try_stream! { let mut stack = Vec::new(); stack.push((self.root_blk, None)); let block_cursor = self.reader.block_cursor(); let mut node_buf = [0_u8; PAGE_SZ]; while let Some((node_blknum, opt_iter)) = stack.pop() { // Read the node, through the PS PageCache, into local variable `node_buf`. // We could keep the page cache read guard alive, but, at the time of writing, // we run quite small PS PageCache s => can't risk running out of // PageCache space because this stream isn't consumed fast enough. let page_read_guard = block_cursor .read_blk(self.start_blk + node_blknum, ctx) .await?; node_buf.copy_from_slice(page_read_guard.as_ref()); drop(page_read_guard); // drop page cache read guard early let node = OnDiskNode::deparse(&node_buf)?; let prefix_len = node.prefix_len as usize; let suffix_len = node.suffix_len as usize; assert!(node.num_children > 0); let mut keybuf = Vec::new(); keybuf.extend(node.prefix); keybuf.resize(prefix_len + suffix_len, 0); let mut iter: Either<Range<usize>, Rev<RangeInclusive<usize>>> = if let Some(iter) = opt_iter { iter } else { // Locate the first match let idx = match node.binary_search(start_key, keybuf.as_mut_slice()) { Ok(idx) => idx, Err(idx) => { if node.level == 0 { // Imagine that the node contains the following keys: // // 1 // 3 <-- idx // 5 // // If the search key is '2' and there is exact match, // the binary search would return the index of key // '3'. That's cool, '3' is the first key to return. idx } else { // This is an internal page, so each key represents a lower // bound for what's in the child page. If there is no exact // match, we have to return the *previous* entry. // // 1 <-- return this // 3 <-- idx // 5 idx.saturating_sub(1) } } }; Either::Left(idx..node.num_children.into()) }; // idx points to the first match now. Keep going from there while let Some(idx) = iter.next() { let key_off = idx * suffix_len; let suffix = &node.keys[key_off..key_off + suffix_len]; keybuf[prefix_len..].copy_from_slice(suffix); let value = node.value(idx); #[allow(clippy::collapsible_if)] if node.level == 0 { // leaf yield (keybuf.clone(), value.to_u64()); } else { stack.push((node_blknum, Some(iter))); stack.push((value.to_blknum(), None)); break; } } } } } /// /// Scan the tree, starting from 'search_key', in the given direction. 'visitor' /// will be called for every key >= 'search_key' (or <= 'search_key', if scanning /// backwards) /// pub async fn visit<V>( &self, search_key: &[u8; L], dir: VisitDirection, mut visitor: V, ctx: &RequestContext, ) -> Result<bool> where V: FnMut(&[u8], u64) -> bool, { let mut stack = Vec::new(); stack.push((self.root_blk, None)); let block_cursor = self.reader.block_cursor(); while let Some((node_blknum, opt_iter)) = stack.pop() { // Locate the node. let node_buf = block_cursor .read_blk(self.start_blk + node_blknum, ctx) .await?; let node = OnDiskNode::deparse(node_buf.as_ref())?; let prefix_len = node.prefix_len as usize; let suffix_len = node.suffix_len as usize; assert!(node.num_children > 0); let mut keybuf = Vec::new(); keybuf.extend(node.prefix); keybuf.resize(prefix_len + suffix_len, 0); let mut iter = if let Some(iter) = opt_iter { iter } else if dir == VisitDirection::Forwards { // Locate the first match let idx = match node.binary_search(search_key, keybuf.as_mut_slice()) { Ok(idx) => idx, Err(idx) => { if node.level == 0 { // Imagine that the node contains the following keys: // // 1 // 3 <-- idx // 5 // // If the search key is '2' and there is exact match, // the binary search would return the index of key // '3'. That's cool, '3' is the first key to return. idx } else { // This is an internal page, so each key represents a lower // bound for what's in the child page. If there is no exact // match, we have to return the *previous* entry. // // 1 <-- return this // 3 <-- idx // 5 idx.saturating_sub(1) } } }; Either::Left(idx..node.num_children.into()) } else { let idx = match node.binary_search(search_key, keybuf.as_mut_slice()) { Ok(idx) => { // Exact match. That's the first entry to return, and walk // backwards from there. idx } Err(idx) => { // No exact match. The binary search returned the index of the // first key that's > search_key. Back off by one, and walk // backwards from there. if let Some(idx) = idx.checked_sub(1) { idx } else { return Ok(false); } } }; Either::Right((0..=idx).rev()) }; // idx points to the first match now. Keep going from there while let Some(idx) = iter.next() { let key_off = idx * suffix_len; let suffix = &node.keys[key_off..key_off + suffix_len]; keybuf[prefix_len..].copy_from_slice(suffix); let value = node.value(idx); #[allow(clippy::collapsible_if)] if node.level == 0 { // leaf if !visitor(&keybuf, value.to_u64()) { return Ok(false); } } else { stack.push((node_blknum, Some(iter))); stack.push((value.to_blknum(), None)); break; } } } Ok(true) } #[allow(dead_code)] pub async fn dump(&self, ctx: &RequestContext) -> Result<()> { let mut stack = Vec::new(); stack.push((self.root_blk, String::new(), 0, 0, 0)); let block_cursor = self.reader.block_cursor(); while let Some((blknum, path, depth, child_idx, key_off)) = stack.pop() { let blk = block_cursor.read_blk(self.start_blk + blknum, ctx).await?; let buf: &[u8] = blk.as_ref(); let node = OnDiskNode::<L>::deparse(buf)?; if child_idx == 0 { print!("{:indent$}", "", indent = depth * 2); let path_prefix = stack .iter() .map(|(_blknum, path, ..)| path.as_str()) .collect::<String>(); println!( "blk #{blknum}: path {path_prefix}{path}: prefix {}, suffix_len {}", hex::encode(node.prefix), node.suffix_len ); } if child_idx + 1 < node.num_children { let key_off = key_off + node.suffix_len as usize; stack.push((blknum, path.clone(), depth, child_idx + 1, key_off)); } let key = &node.keys[key_off..key_off + node.suffix_len as usize]; let val = node.value(child_idx as usize); print!("{:indent$}", "", indent = depth * 2 + 2); println!("{}: {}", hex::encode(key), hex::encode(val.0)); if node.level > 0 { stack.push((val.to_blknum(), hex::encode(node.prefix), depth + 1, 0, 0)); } } Ok(()) } } pub struct DiskBtreeIterator<'a> { #[allow(clippy::type_complexity)] stream: std::pin::Pin< Box<dyn Stream<Item = std::result::Result<(Vec<u8>, u64), DiskBtreeError>> + 'a + Send>, >, } impl DiskBtreeIterator<'_> { pub async fn next(&mut self) -> Option<std::result::Result<(Vec<u8>, u64), DiskBtreeError>> { self.stream.next().await } } /// /// Public builder object, for creating a new tree. /// /// Usage: Create a builder object by calling 'new', load all the data into the /// tree by calling 'append' for each key-value pair, and then call 'finish' /// /// 'L' is the key length in bytes pub struct DiskBtreeBuilder<W, const L: usize> where W: BlockWriter, { writer: W, /// /// `stack[0]` is the current root page, `stack.last()` is the leaf. /// /// We maintain the length of the stack to be always greater than zero. /// Two exceptions are: /// 1. `Self::flush_node`. The method will push the new node if it extracted the last one. /// So because other methods cannot see the intermediate state invariant still holds. /// 2. `Self::finish`. It consumes self and does not return it back, /// which means that this is where the structure is destroyed. /// Thus stack of zero length cannot be observed by other methods. stack: Vec<BuildNode<L>>, /// Last key that was appended to the tree. Used to sanity check that append /// is called in increasing key order. last_key: Option<[u8; L]>, } impl<W, const L: usize> DiskBtreeBuilder<W, L> where W: BlockWriter, { pub fn new(writer: W) -> Self { DiskBtreeBuilder { writer, last_key: None, stack: vec![BuildNode::new(0)], } } pub fn append(&mut self, key: &[u8; L], value: u64) -> Result<()> { if value > MAX_VALUE { return Err(DiskBtreeError::AppendOverflow(value)); } if let Some(last_key) = &self.last_key { if key <= last_key { return Err(DiskBtreeError::UnsortedInput { key: key.as_slice().into(), last_key: last_key.as_slice().into(), }); } } self.last_key = Some(*key); self.append_internal(key, Value::from_u64(value)) } fn append_internal(&mut self, key: &[u8; L], value: Value) -> Result<()> { // Try to append to the current leaf buffer let last = self .stack .last_mut() .expect("should always have at least one item"); let level = last.level; if last.push(key, value) { return Ok(()); } // It did not fit. Try to compress, and if it succeeds to make // some room on the node, try appending to it again. #[allow(clippy::collapsible_if)] if last.compress() { if last.push(key, value) { return Ok(()); } } // Could not append to the current leaf. Flush it and create a new one. self.flush_node()?; // Replace the node we flushed with an empty one and append the new // key to it. let mut last = BuildNode::new(level); if !last.push(key, value) { return Err(DiskBtreeError::FailedToPushToNewLeafNode); } self.stack.push(last); Ok(()) } /// Flush the bottommost node in the stack to disk. Appends a downlink to its parent, /// and recursively flushes the parent too, if it becomes full. If the root page becomes full, /// creates a new root page, increasing the height of the tree. fn flush_node(&mut self) -> Result<()> { // Get the current bottommost node in the stack and flush it to disk. let last = self .stack .pop() .expect("should always have at least one item"); let buf = last.pack(); let downlink_key = last.first_key(); let downlink_ptr = self.writer.write_blk(buf)?; // Append the downlink to the parent. If there is no parent, ie. this was the root page, // create a new root page, increasing the height of the tree. if self.stack.is_empty() { self.stack.push(BuildNode::new(last.level + 1)); } self.append_internal(&downlink_key, Value::from_blknum(downlink_ptr)) } /// /// Flushes everything to disk, and returns the block number of the root page. /// The caller must store the root block number "out-of-band", and pass it /// to the DiskBtreeReader::new() when you want to read the tree again. /// (In the image and delta layers, it is stored in the beginning of the file, /// in the summary header) /// pub fn finish(mut self) -> Result<(u32, W)> { // flush all levels, except the root. while self.stack.len() > 1 { self.flush_node()?; } let root = self .stack .first() .expect("by the check above we left one item there"); let buf = root.pack(); let root_blknum = self.writer.write_blk(buf)?; Ok((root_blknum, self.writer)) } pub fn borrow_writer(&self) -> &W { &self.writer } } /// /// BuildNode represesnts an incomplete page that we are appending to. /// #[derive(Clone, Debug)] struct BuildNode<const L: usize> { num_children: u16, level: u8, prefix: Vec<u8>, suffix_len: usize, keys: Vec<u8>, values: Vec<u8>, size: usize, // physical size of this node, if it was written to disk like this } const NODE_SIZE: usize = PAGE_SZ; const NODE_HDR_SIZE: usize = 2 + 1 + 1 + 1; impl<const L: usize> BuildNode<L> { fn new(level: u8) -> Self { BuildNode { num_children: 0, level, prefix: Vec::new(), suffix_len: 0, keys: Vec::new(), values: Vec::new(), size: NODE_HDR_SIZE, } } /// Try to append a key-value pair to this node. Returns 'true' on /// success, 'false' if the page was full or the key was /// incompatible with the prefix of the existing keys. fn push(&mut self, key: &[u8; L], value: Value) -> bool { // If we have already performed prefix-compression on the page, // check that the incoming key has the same prefix. if self.num_children > 0 { // does the prefix allow it? if !key.starts_with(&self.prefix) { return false; } } else { self.suffix_len = key.len(); } // Is the node too full? if self.size + self.suffix_len + VALUE_SZ >= NODE_SIZE { return false; } // All clear self.num_children += 1; self.keys.extend(&key[self.prefix.len()..]); self.values.extend(value.0); assert!(self.keys.len() == self.num_children as usize * self.suffix_len); assert!(self.values.len() == self.num_children as usize * VALUE_SZ); self.size += self.suffix_len + VALUE_SZ; true } /// /// Perform prefix-compression. /// /// Returns 'true' on success, 'false' if no compression was possible. /// fn compress(&mut self) -> bool { let first_suffix = self.first_suffix(); let last_suffix = self.last_suffix(); // Find the common prefix among all keys let mut prefix_len = 0; while prefix_len < self.suffix_len { if first_suffix[prefix_len] != last_suffix[prefix_len] { break; } prefix_len += 1; } if prefix_len == 0 { return false; } // Can compress. Rewrite the keys without the common prefix. self.prefix.extend(&self.keys[..prefix_len]); let mut new_keys = Vec::new(); let mut key_off = 0; while key_off < self.keys.len() { let next_key_off = key_off + self.suffix_len; new_keys.extend(&self.keys[key_off + prefix_len..next_key_off]); key_off = next_key_off; } self.keys = new_keys; self.suffix_len -= prefix_len; self.size -= prefix_len * self.num_children as usize; self.size += prefix_len; assert!(self.keys.len() == self.num_children as usize * self.suffix_len); assert!(self.values.len() == self.num_children as usize * VALUE_SZ); true } /// /// Serialize the node to on-disk format. /// fn pack(&self) -> IoBuffer { assert!(self.keys.len() == self.num_children as usize * self.suffix_len); assert!(self.values.len() == self.num_children as usize * VALUE_SZ); assert!(self.num_children > 0); let mut buf = IoBufferMut::with_capacity(PAGE_SZ); buf.put_u16(self.num_children); buf.put_u8(self.level); buf.put_u8(self.prefix.len() as u8); buf.put_u8(self.suffix_len as u8); buf.put(&self.prefix[..]); buf.put(&self.keys[..]); buf.put(&self.values[..]); assert!(buf.len() == self.size); assert!(buf.len() <= PAGE_SZ); buf.extend_with(0, PAGE_SZ - buf.len()); buf.freeze() } fn first_suffix(&self) -> &[u8] { &self.keys[..self.suffix_len] } fn last_suffix(&self) -> &[u8] { &self.keys[self.keys.len() - self.suffix_len..] } /// Return the full first key of the page, including the prefix fn first_key(&self) -> [u8; L] { let mut key = [0u8; L]; key[..self.prefix.len()].copy_from_slice(&self.prefix); key[self.prefix.len()..].copy_from_slice(self.first_suffix()); key } } #[cfg(test)] pub(crate) mod tests { use std::collections::BTreeMap; use std::sync::atomic::{AtomicUsize, Ordering}; use rand::Rng; use super::*; use crate::context::DownloadBehavior; use crate::task_mgr::TaskKind; use crate::tenant::block_io::{BlockCursor, BlockLease, BlockReaderRef}; #[derive(Clone, Default)] pub(crate) struct TestDisk { blocks: Vec<IoBuffer>, } impl TestDisk { fn new() -> Self { Self::default() } pub(crate) fn read_blk(&self, blknum: u32) -> io::Result<BlockLease> { let mut buf = [0u8; PAGE_SZ]; buf.copy_from_slice(&self.blocks[blknum as usize]); Ok(std::sync::Arc::new(buf).into()) } } impl BlockReader for TestDisk { fn block_cursor(&self) -> BlockCursor<'_> { BlockCursor::new(BlockReaderRef::TestDisk(self)) } } impl BlockWriter for &mut TestDisk { fn write_blk(&mut self, buf: IoBuffer) -> io::Result<u32> { let blknum = self.blocks.len(); self.blocks.push(buf); Ok(blknum as u32) } } #[tokio::test] async fn basic() -> Result<()> { let mut disk = TestDisk::new(); let mut writer = DiskBtreeBuilder::<_, 6>::new(&mut disk); let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error).with_scope_unit_test(); let all_keys: Vec<&[u8; 6]> = vec![ b"xaaaaa", b"xaaaba", b"xaaaca", b"xabaaa", b"xababa", b"xabaca", b"xabada", b"xabadb", ]; let all_data: Vec<(&[u8; 6], u64)> = all_keys .iter() .enumerate() .map(|(idx, key)| (*key, idx as u64)) .collect(); for (key, val) in all_data.iter() { writer.append(key, *val)?; } let (root_offset, _writer) = writer.finish()?; let reader = DiskBtreeReader::new(0, root_offset, disk); reader.dump(&ctx).await?; // Test the `get` function on all the keys. for (key, val) in all_data.iter() { assert_eq!(reader.get(key, &ctx).await?, Some(*val)); } // And on some keys that don't exist assert_eq!(reader.get(b"aaaaaa", &ctx).await?, None); assert_eq!(reader.get(b"zzzzzz", &ctx).await?, None); assert_eq!(reader.get(b"xaaabx", &ctx).await?, None); // Test search with `visit` function let search_key = b"xabaaa"; let expected: Vec<(Vec<u8>, u64)> = all_data .iter() .filter(|(key, _value)| key[..] >= search_key[..]) .map(|(key, value)| (key.to_vec(), *value)) .collect(); let mut data = Vec::new(); reader .visit( search_key, VisitDirection::Forwards, |key, value| { data.push((key.to_vec(), value)); true }, &ctx, ) .await?; assert_eq!(data, expected); // Test a backwards scan let mut expected: Vec<(Vec<u8>, u64)> = all_data .iter() .filter(|(key, _value)| key[..] <= search_key[..]) .map(|(key, value)| (key.to_vec(), *value)) .collect(); expected.reverse(); let mut data = Vec::new(); reader .visit( search_key, VisitDirection::Backwards, |key, value| { data.push((key.to_vec(), value)); true }, &ctx, ) .await?; assert_eq!(data, expected); // Backward scan where nothing matches reader .visit( b"aaaaaa", VisitDirection::Backwards, |key, value| { panic!("found unexpected key {}: {}", hex::encode(key), value); }, &ctx, ) .await?; // Full scan let expected: Vec<(Vec<u8>, u64)> = all_data .iter() .map(|(key, value)| (key.to_vec(), *value)) .collect(); let mut data = Vec::new(); reader .visit( &[0u8; 6], VisitDirection::Forwards, |key, value| { data.push((key.to_vec(), value)); true }, &ctx, ) .await?; assert_eq!(data, expected); Ok(()) } #[tokio::test] async fn lots_of_keys() -> Result<()> { let mut disk = TestDisk::new(); let mut writer = DiskBtreeBuilder::<_, 8>::new(&mut disk); let ctx =
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/metadata.rs
pageserver/src/tenant/metadata.rs
//! Describes the legacy now hopefully no longer modified per-timeline metadata. //! //! It is stored in `index_part.json` managed by [`remote_timeline_client`]. For many tenants and //! their timelines, this struct and its original serialization format is still needed because //! they were written a long time ago. //! //! Instead of changing and adding versioning to this, just change [`IndexPart`] with soft json //! versioning. //! //! To clean up this module we need to migrate all index_part.json files to a later version. //! While doing this, we need to be mindful about s3 based recovery as well, so it might take //! however long we keep the old versions to be able to delete the old code. After that, we can //! remove everything else than [`TimelineMetadataBodyV2`], rename it as `TimelineMetadata` and //! move it to `index.rs`. Before doing all of this, we need to keep the structures for backwards //! compatibility. //! //! [`remote_timeline_client`]: super::remote_timeline_client //! [`IndexPart`]: super::remote_timeline_client::index::IndexPart use anyhow::ensure; use postgres_ffi::PgMajorVersion; use serde::{Deserialize, Serialize}; use utils::bin_ser::{BeSer, SerializeError}; use utils::id::TimelineId; use utils::lsn::Lsn; /// Use special format number to enable backward compatibility. const METADATA_FORMAT_VERSION: u16 = 4; /// Previous supported format versions. /// /// In practice, none of these should remain, all are [`METADATA_FORMAT_VERSION`], but confirming /// that requires a scrubber run which is yet to be done. const METADATA_OLD_FORMAT_VERSION: u16 = 3; /// When the file existed on disk we assumed that a write of up to METADATA_MAX_SIZE bytes is atomic. /// /// This is the same assumption that PostgreSQL makes with the control file, /// /// see PG_CONTROL_MAX_SAFE_SIZE const METADATA_MAX_SIZE: usize = 512; /// Legacy metadata stored as a component of `index_part.json` per timeline. /// /// Do not make new changes to this type or the module. In production, we have two different kinds /// of serializations of this type: bincode and json. Bincode version reflects what used to be /// stored on disk in earlier versions and does internal crc32 checksumming. /// /// This type should not implement `serde::Serialize` or `serde::Deserialize` because there would /// be a confusion whether you want the old version ([`TimelineMetadata::from_bytes`]) or the modern /// as-exists in `index_part.json` ([`self::modern_serde`]). /// /// ```compile_fail /// #[derive(serde::Serialize)] /// struct DoNotDoThis(pageserver::tenant::metadata::TimelineMetadata); /// ``` /// /// ```compile_fail /// #[derive(serde::Deserialize)] /// struct NeitherDoThis(pageserver::tenant::metadata::TimelineMetadata); /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct TimelineMetadata { hdr: TimelineMetadataHeader, body: TimelineMetadataBodyV2, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] struct TimelineMetadataHeader { checksum: u32, // CRC of serialized metadata body size: u16, // size of serialized metadata format_version: u16, // metadata format version (used for compatibility checks) } impl TryFrom<&TimelineMetadataBodyV2> for TimelineMetadataHeader { type Error = Crc32CalculationFailed; fn try_from(value: &TimelineMetadataBodyV2) -> Result<Self, Self::Error> { #[derive(Default)] struct Crc32Sink { crc: u32, count: usize, } impl std::io::Write for Crc32Sink { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { self.crc = crc32c::crc32c_append(self.crc, buf); self.count += buf.len(); Ok(buf.len()) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } // jump through hoops to calculate the crc32 so that TimelineMetadata::ne works // across serialization versions let mut sink = Crc32Sink::default(); <TimelineMetadataBodyV2 as utils::bin_ser::BeSer>::ser_into(value, &mut sink) .map_err(Crc32CalculationFailed)?; let size = METADATA_HDR_SIZE + sink.count; Ok(TimelineMetadataHeader { checksum: sink.crc, size: size as u16, format_version: METADATA_FORMAT_VERSION, }) } } #[derive(thiserror::Error, Debug)] #[error("re-serializing for crc32 failed")] struct Crc32CalculationFailed(#[source] utils::bin_ser::SerializeError); const METADATA_HDR_SIZE: usize = size_of::<TimelineMetadataHeader>(); #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] struct TimelineMetadataBodyV2 { disk_consistent_lsn: Lsn, // This is only set if we know it. We track it in memory when the page // server is running, but we only track the value corresponding to // 'last_record_lsn', not 'disk_consistent_lsn' which can lag behind by a // lot. We only store it in the metadata file when we flush *all* the // in-memory data so that 'last_record_lsn' is the same as // 'disk_consistent_lsn'. That's OK, because after page server restart, as // soon as we reprocess at least one record, we will have a valid // 'prev_record_lsn' value in memory again. This is only really needed when // doing a clean shutdown, so that there is no more WAL beyond // 'disk_consistent_lsn' prev_record_lsn: Option<Lsn>, ancestor_timeline: Option<TimelineId>, ancestor_lsn: Lsn, // The LSN at which GC was last executed. Synonym of [`Timeline::applied_gc_cutoff_lsn`]. latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, pg_version: PgMajorVersion, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] struct TimelineMetadataBodyV1 { disk_consistent_lsn: Lsn, // This is only set if we know it. We track it in memory when the page // server is running, but we only track the value corresponding to // 'last_record_lsn', not 'disk_consistent_lsn' which can lag behind by a // lot. We only store it in the metadata file when we flush *all* the // in-memory data so that 'last_record_lsn' is the same as // 'disk_consistent_lsn'. That's OK, because after page server restart, as // soon as we reprocess at least one record, we will have a valid // 'prev_record_lsn' value in memory again. This is only really needed when // doing a clean shutdown, so that there is no more WAL beyond // 'disk_consistent_lsn' prev_record_lsn: Option<Lsn>, ancestor_timeline: Option<TimelineId>, ancestor_lsn: Lsn, latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, } impl TimelineMetadata { pub fn new( disk_consistent_lsn: Lsn, prev_record_lsn: Option<Lsn>, ancestor_timeline: Option<TimelineId>, ancestor_lsn: Lsn, latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, pg_version: PgMajorVersion, ) -> Self { Self { hdr: TimelineMetadataHeader { checksum: 0, size: 0, format_version: METADATA_FORMAT_VERSION, }, body: TimelineMetadataBodyV2 { disk_consistent_lsn, prev_record_lsn, ancestor_timeline, ancestor_lsn, latest_gc_cutoff_lsn, initdb_lsn, pg_version, }, } } #[cfg(test)] pub(crate) fn with_recalculated_checksum(mut self) -> anyhow::Result<Self> { self.hdr = TimelineMetadataHeader::try_from(&self.body)?; Ok(self) } fn upgrade_timeline_metadata(metadata_bytes: &[u8]) -> anyhow::Result<Self> { let mut hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?; // backward compatible only up to this version ensure!( hdr.format_version == METADATA_OLD_FORMAT_VERSION, "unsupported metadata format version {}", hdr.format_version ); let metadata_size = hdr.size as usize; let body: TimelineMetadataBodyV1 = TimelineMetadataBodyV1::des(&metadata_bytes[METADATA_HDR_SIZE..metadata_size])?; let body = TimelineMetadataBodyV2 { disk_consistent_lsn: body.disk_consistent_lsn, prev_record_lsn: body.prev_record_lsn, ancestor_timeline: body.ancestor_timeline, ancestor_lsn: body.ancestor_lsn, latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn, initdb_lsn: body.initdb_lsn, pg_version: PgMajorVersion::PG14, // All timelines created before this version had pg_version 14 }; hdr.format_version = METADATA_FORMAT_VERSION; Ok(Self { hdr, body }) } pub fn from_bytes(metadata_bytes: &[u8]) -> anyhow::Result<Self> { ensure!( metadata_bytes.len() == METADATA_MAX_SIZE, "metadata bytes size is wrong" ); let hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?; let metadata_size = hdr.size as usize; ensure!( metadata_size <= METADATA_MAX_SIZE, "corrupted metadata file" ); let calculated_checksum = crc32c::crc32c(&metadata_bytes[METADATA_HDR_SIZE..metadata_size]); ensure!( hdr.checksum == calculated_checksum, "metadata checksum mismatch" ); if hdr.format_version != METADATA_FORMAT_VERSION { // If metadata has the old format, // upgrade it and return the result TimelineMetadata::upgrade_timeline_metadata(metadata_bytes) } else { let body = TimelineMetadataBodyV2::des(&metadata_bytes[METADATA_HDR_SIZE..metadata_size])?; ensure!( body.disk_consistent_lsn.is_aligned(), "disk_consistent_lsn is not aligned" ); Ok(TimelineMetadata { hdr, body }) } } pub fn to_bytes(&self) -> Result<Vec<u8>, SerializeError> { let body_bytes = self.body.ser()?; let metadata_size = METADATA_HDR_SIZE + body_bytes.len(); let hdr = TimelineMetadataHeader { size: metadata_size as u16, format_version: METADATA_FORMAT_VERSION, checksum: crc32c::crc32c(&body_bytes), }; let hdr_bytes = hdr.ser()?; let mut metadata_bytes = vec![0u8; METADATA_MAX_SIZE]; metadata_bytes[0..METADATA_HDR_SIZE].copy_from_slice(&hdr_bytes); metadata_bytes[METADATA_HDR_SIZE..metadata_size].copy_from_slice(&body_bytes); Ok(metadata_bytes) } /// [`Lsn`] that corresponds to the corresponding timeline directory /// contents, stored locally in the pageserver workdir. pub fn disk_consistent_lsn(&self) -> Lsn { self.body.disk_consistent_lsn } pub fn prev_record_lsn(&self) -> Option<Lsn> { self.body.prev_record_lsn } pub fn ancestor_timeline(&self) -> Option<TimelineId> { self.body.ancestor_timeline } pub fn ancestor_lsn(&self) -> Lsn { self.body.ancestor_lsn } /// When reparenting, the `ancestor_lsn` does not change. /// /// Returns true if anything was changed. pub fn reparent(&mut self, timeline: &TimelineId) { assert!(self.body.ancestor_timeline.is_some()); // no assertion for redoing this: it's fine, we may have to repeat this multiple times over self.body.ancestor_timeline = Some(*timeline); } /// Returns true if anything was changed pub fn detach_from_ancestor(&mut self, branchpoint: &(TimelineId, Lsn)) { // Detaching from ancestor now doesn't always detach directly to the direct ancestor, but we // ensure the LSN is the same. So we don't check the timeline ID. if self.body.ancestor_lsn != Lsn(0) { assert_eq!(self.body.ancestor_lsn, branchpoint.1); } self.body.ancestor_timeline = None; self.body.ancestor_lsn = Lsn(0); } pub fn latest_gc_cutoff_lsn(&self) -> Lsn { self.body.latest_gc_cutoff_lsn } pub fn initdb_lsn(&self) -> Lsn { self.body.initdb_lsn } pub fn pg_version(&self) -> PgMajorVersion { self.body.pg_version } // Checksums make it awkward to build a valid instance by hand. This helper // provides a TimelineMetadata with a valid checksum in its header. pub fn example() -> Self { let instance = Self::new( "0/16960E8".parse::<Lsn>().unwrap(), None, None, Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(), PgMajorVersion::PG14, ); let bytes = instance.to_bytes().unwrap(); Self::from_bytes(&bytes).unwrap() } pub(crate) fn apply(&mut self, update: &MetadataUpdate) { self.body.disk_consistent_lsn = update.disk_consistent_lsn; self.body.prev_record_lsn = update.prev_record_lsn; self.body.latest_gc_cutoff_lsn = update.latest_gc_cutoff_lsn; } } pub(crate) mod modern_serde { use serde::{Deserialize, Serialize}; use super::{TimelineMetadata, TimelineMetadataBodyV2, TimelineMetadataHeader}; pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result<TimelineMetadata, D::Error> where D: serde::de::Deserializer<'de>, { // for legacy reasons versions 1-5 had TimelineMetadata serialized as a Vec<u8> field with // BeSer. struct Visitor; impl<'d> serde::de::Visitor<'d> for Visitor { type Value = TimelineMetadata; fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.write_str("BeSer bytes or json structure") } fn visit_seq<A>(self, seq: A) -> Result<Self::Value, A::Error> where A: serde::de::SeqAccess<'d>, { use serde::de::Error; let de = serde::de::value::SeqAccessDeserializer::new(seq); Vec::<u8>::deserialize(de) .map(|v| TimelineMetadata::from_bytes(&v).map_err(A::Error::custom))? } fn visit_map<A>(self, map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'d>, { use serde::de::Error; let de = serde::de::value::MapAccessDeserializer::new(map); let body = TimelineMetadataBodyV2::deserialize(de)?; let hdr = TimelineMetadataHeader::try_from(&body).map_err(A::Error::custom)?; Ok(TimelineMetadata { hdr, body }) } } deserializer.deserialize_any(Visitor) } pub(crate) fn serialize<S>( metadata: &TimelineMetadata, serializer: S, ) -> Result<S::Ok, S::Error> where S: serde::Serializer, { // header is not needed, upon reading we've upgraded all v1 to v2 metadata.body.serialize(serializer) } #[test] fn deserializes_bytes_as_well_as_equivalent_body_v2() { #[derive(serde::Deserialize, serde::Serialize)] struct Wrapper( #[serde(deserialize_with = "deserialize", serialize_with = "serialize")] TimelineMetadata, ); let too_many_bytes = "[216,111,252,208,0,54,0,4,0,0,0,0,1,73,253,144,1,0,0,0,0,1,73,253,24,0,0,0,0,0,0,0,0,0,0,0,0,0,1,73,253,24,0,0,0,0,1,73,253,24,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]"; let wrapper_from_bytes = serde_json::from_str::<Wrapper>(too_many_bytes).unwrap(); let serialized = serde_json::to_value(&wrapper_from_bytes).unwrap(); assert_eq!( serialized, serde_json::json! {{ "disk_consistent_lsn": "0/149FD90", "prev_record_lsn": "0/149FD18", "ancestor_timeline": null, "ancestor_lsn": "0/0", "latest_gc_cutoff_lsn": "0/149FD18", "initdb_lsn": "0/149FD18", "pg_version": 15 }} ); let wrapper_from_json = serde_json::value::from_value::<Wrapper>(serialized).unwrap(); assert_eq!(wrapper_from_bytes.0, wrapper_from_json.0); } } /// Parts of the metadata which are regularly modified. pub(crate) struct MetadataUpdate { disk_consistent_lsn: Lsn, prev_record_lsn: Option<Lsn>, latest_gc_cutoff_lsn: Lsn, } impl MetadataUpdate { pub(crate) fn new( disk_consistent_lsn: Lsn, prev_record_lsn: Option<Lsn>, latest_gc_cutoff_lsn: Lsn, ) -> Self { Self { disk_consistent_lsn, prev_record_lsn, latest_gc_cutoff_lsn, } } } #[cfg(test)] mod tests { use super::*; use crate::tenant::harness::TIMELINE_ID; #[test] fn metadata_serializes_correctly() { let original_metadata = TimelineMetadata::new( Lsn(0x200), Some(Lsn(0x100)), Some(TIMELINE_ID), Lsn(0), Lsn(0), Lsn(0), // Any version will do here, so use the default crate::DEFAULT_PG_VERSION, ); let metadata_bytes = original_metadata .to_bytes() .expect("Should serialize correct metadata to bytes"); let deserialized_metadata = TimelineMetadata::from_bytes(&metadata_bytes) .expect("Should deserialize its own bytes"); assert_eq!( deserialized_metadata.body, original_metadata.body, "Metadata that was serialized to bytes and deserialized back should not change" ); } // Generate old version metadata and read it with current code. // Ensure that it is upgraded correctly #[test] fn test_metadata_upgrade() { #[derive(Debug, Clone, PartialEq, Eq)] struct TimelineMetadataV1 { hdr: TimelineMetadataHeader, body: TimelineMetadataBodyV1, } let metadata_v1 = TimelineMetadataV1 { hdr: TimelineMetadataHeader { checksum: 0, size: 0, format_version: METADATA_OLD_FORMAT_VERSION, }, body: TimelineMetadataBodyV1 { disk_consistent_lsn: Lsn(0x200), prev_record_lsn: Some(Lsn(0x100)), ancestor_timeline: Some(TIMELINE_ID), ancestor_lsn: Lsn(0), latest_gc_cutoff_lsn: Lsn(0), initdb_lsn: Lsn(0), }, }; impl TimelineMetadataV1 { pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> { let body_bytes = self.body.ser()?; let metadata_size = METADATA_HDR_SIZE + body_bytes.len(); let hdr = TimelineMetadataHeader { size: metadata_size as u16, format_version: METADATA_OLD_FORMAT_VERSION, checksum: crc32c::crc32c(&body_bytes), }; let hdr_bytes = hdr.ser()?; let mut metadata_bytes = vec![0u8; METADATA_MAX_SIZE]; metadata_bytes[0..METADATA_HDR_SIZE].copy_from_slice(&hdr_bytes); metadata_bytes[METADATA_HDR_SIZE..metadata_size].copy_from_slice(&body_bytes); Ok(metadata_bytes) } } let metadata_bytes = metadata_v1 .to_bytes() .expect("Should serialize correct metadata to bytes"); // This should deserialize to the latest version format let deserialized_metadata = TimelineMetadata::from_bytes(&metadata_bytes) .expect("Should deserialize its own bytes"); let expected_metadata = TimelineMetadata::new( Lsn(0x200), Some(Lsn(0x100)), Some(TIMELINE_ID), Lsn(0), Lsn(0), Lsn(0), PgMajorVersion::PG14, // All timelines created before this version had pg_version 14 ); assert_eq!( deserialized_metadata.body, expected_metadata.body, "Metadata of the old version {METADATA_OLD_FORMAT_VERSION} should be upgraded to the latest version {METADATA_FORMAT_VERSION}" ); } #[test] fn test_metadata_bincode_serde_ensure_roundtrip() { let original_metadata = TimelineMetadata::new( Lsn(0x200), Some(Lsn(0x100)), Some(TIMELINE_ID), Lsn(0), Lsn(0), Lsn(0), // Updating this version to 17 will cause the test to fail at the // next assert_eq!(). PgMajorVersion::PG16, ); let expected_bytes = vec![ /* TimelineMetadataHeader */ 74, 104, 158, 105, 0, 70, 0, 4, // checksum, size, format_version (4 + 2 + 2) /* TimelineMetadataBodyV2 */ 0, 0, 0, 0, 0, 0, 2, 0, // disk_consistent_lsn (8 bytes) 1, 0, 0, 0, 0, 0, 0, 1, 0, // prev_record_lsn (9 bytes) 1, 17, 34, 51, 68, 85, 102, 119, 136, 17, 34, 51, 68, 85, 102, 119, 136, // ancestor_timeline (17 bytes) 0, 0, 0, 0, 0, 0, 0, 0, // ancestor_lsn (8 bytes) 0, 0, 0, 0, 0, 0, 0, 0, // latest_gc_cutoff_lsn (8 bytes) 0, 0, 0, 0, 0, 0, 0, 0, // initdb_lsn (8 bytes) 0, 0, 0, 16, // pg_version (4 bytes) /* padding bytes */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; let metadata_ser_bytes = original_metadata.to_bytes().unwrap(); assert_eq!(metadata_ser_bytes, expected_bytes); let expected_metadata = { let mut temp_metadata = original_metadata; let body_bytes = temp_metadata .body .ser() .expect("Cannot serialize the metadata body"); let metadata_size = METADATA_HDR_SIZE + body_bytes.len(); let hdr = TimelineMetadataHeader { size: metadata_size as u16, format_version: METADATA_FORMAT_VERSION, checksum: crc32c::crc32c(&body_bytes), }; temp_metadata.hdr = hdr; temp_metadata }; let des_metadata = TimelineMetadata::from_bytes(&metadata_ser_bytes).unwrap(); assert_eq!(des_metadata, expected_metadata); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/vectored_blob_io.rs
pageserver/src/tenant/vectored_blob_io.rs
//! //! Utilities for vectored reading of variable-sized "blobs". //! //! The "blob" api is an abstraction on top of the "block" api, //! with the main difference being that blobs do not have a fixed //! size (each blob is prefixed with 1 or 4 byte length field) //! //! The vectored apis provided in this module allow for planning //! and executing disk IO which covers multiple blobs. //! //! Reads are planned with [`VectoredReadPlanner`] which will coalesce //! adjacent blocks into a single disk IO request and exectuted by //! [`VectoredBlobReader`] which does all the required offset juggling //! and returns a buffer housing all the blobs and a list of offsets. //! //! Note that the vectored blob api does *not* go through the page cache. use std::collections::BTreeMap; use std::ops::Deref; use bytes::Bytes; use pageserver_api::key::Key; use tokio::io::AsyncWriteExt; use tokio_epoll_uring::BoundedBuf; use utils::lsn::Lsn; use utils::vec_map::VecMap; use crate::context::RequestContext; use crate::tenant::blob_io::{BYTE_UNCOMPRESSED, BYTE_ZSTD, Header}; use crate::virtual_file::{self, IoBufferMut, VirtualFile}; /// Metadata bundled with the start and end offset of a blob. #[derive(Copy, Clone, Debug)] pub struct BlobMeta { pub key: Key, pub lsn: Lsn, pub will_init: bool, } /// A view into the vectored blobs read buffer. #[derive(Clone, Debug)] pub(crate) enum BufView<'a> { Slice(&'a [u8]), Bytes(bytes::Bytes), } impl<'a> BufView<'a> { /// Creates a new slice-based view on the blob. pub fn new_slice(slice: &'a [u8]) -> Self { Self::Slice(slice) } /// Creates a new [`bytes::Bytes`]-based view on the blob. pub fn new_bytes(bytes: bytes::Bytes) -> Self { Self::Bytes(bytes) } /// Convert the view into `Bytes`. /// /// If using slice as the underlying storage, the copy will be an O(n) operation. pub fn into_bytes(self) -> Bytes { match self { BufView::Slice(slice) => Bytes::copy_from_slice(slice), BufView::Bytes(bytes) => bytes, } } /// Creates a sub-view of the blob based on the range. fn view(&self, range: std::ops::Range<usize>) -> Self { match self { BufView::Slice(slice) => BufView::Slice(&slice[range]), BufView::Bytes(bytes) => BufView::Bytes(bytes.slice(range)), } } } impl Deref for BufView<'_> { type Target = [u8]; fn deref(&self) -> &Self::Target { match self { BufView::Slice(slice) => slice, BufView::Bytes(bytes) => bytes, } } } impl AsRef<[u8]> for BufView<'_> { fn as_ref(&self) -> &[u8] { match self { BufView::Slice(slice) => slice, BufView::Bytes(bytes) => bytes.as_ref(), } } } impl<'a> From<&'a [u8]> for BufView<'a> { fn from(value: &'a [u8]) -> Self { Self::new_slice(value) } } impl From<Bytes> for BufView<'_> { fn from(value: Bytes) -> Self { Self::new_bytes(value) } } /// Blob offsets into [`VectoredBlobsBuf::buf`]. The byte ranges is potentially compressed, /// subject to [`VectoredBlob::compression_bits`]. pub struct VectoredBlob { /// Blob metadata. pub meta: BlobMeta, /// Header start offset. header_start: usize, /// Data start offset. data_start: usize, /// End offset. end: usize, /// Compression used on the data, extracted from the header. compression_bits: u8, } impl VectoredBlob { /// Reads a decompressed view of the blob. pub(crate) async fn read<'a>(&self, buf: &BufView<'a>) -> Result<BufView<'a>, std::io::Error> { let view = buf.view(self.data_start..self.end); match self.compression_bits { BYTE_UNCOMPRESSED => Ok(view), BYTE_ZSTD => { let mut decompressed_vec = Vec::new(); let mut decoder = async_compression::tokio::write::ZstdDecoder::new(&mut decompressed_vec); decoder.write_all(&view).await?; decoder.flush().await?; // Zero-copy conversion from `Vec` to `Bytes` Ok(BufView::new_bytes(Bytes::from(decompressed_vec))) } bits => { let error = std::io::Error::new( std::io::ErrorKind::InvalidData, format!( "Failed to decompress blob for {}@{}, {}..{}: invalid compression byte {bits:x}", self.meta.key, self.meta.lsn, self.data_start, self.end ), ); Err(error) } } } /// Returns the raw blob including header. pub(crate) fn raw_with_header<'a>(&self, buf: &BufView<'a>) -> BufView<'a> { buf.view(self.header_start..self.end) } } impl std::fmt::Display for VectoredBlob { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "{}@{}, {}..{}", self.meta.key, self.meta.lsn, self.data_start, self.end ) } } /// Return type of [`VectoredBlobReader::read_blobs`] pub struct VectoredBlobsBuf { /// Buffer for all blobs in this read pub buf: IoBufferMut, /// Offsets into the buffer and metadata for all blobs in this read pub blobs: Vec<VectoredBlob>, } /// Description of one disk read for multiple blobs. /// Used as the argument form [`VectoredBlobReader::read_blobs`] #[derive(Debug)] pub struct VectoredRead { pub start: u64, pub end: u64, /// Start offset and metadata for each blob in this read pub blobs_at: VecMap<u64, BlobMeta>, } impl VectoredRead { pub(crate) fn size(&self) -> usize { (self.end - self.start) as usize } } #[derive(Eq, PartialEq, Debug)] pub(crate) enum VectoredReadExtended { Yes, No, } /// A vectored read builder that tries to coalesce all reads that fits in a chunk. pub(crate) struct ChunkedVectoredReadBuilder { /// Start block number start_blk_no: usize, /// End block number (exclusive). end_blk_no: usize, /// Start offset and metadata for each blob in this read blobs_at: VecMap<u64, BlobMeta>, max_read_size: Option<usize>, } impl ChunkedVectoredReadBuilder { const CHUNK_SIZE: usize = virtual_file::get_io_buffer_alignment(); /// Start building a new vectored read. /// /// Note that by design, this does not check against reading more than `max_read_size` to /// support reading larger blobs than the configuration value. The builder will be single use /// however after that. fn new_impl( start_offset: u64, end_offset: u64, meta: BlobMeta, max_read_size: Option<usize>, ) -> Self { let mut blobs_at = VecMap::default(); blobs_at .append(start_offset, meta) .expect("First insertion always succeeds"); let start_blk_no = start_offset as usize / Self::CHUNK_SIZE; let end_blk_no = (end_offset as usize).div_ceil(Self::CHUNK_SIZE); Self { start_blk_no, end_blk_no, blobs_at, max_read_size, } } pub(crate) fn new( start_offset: u64, end_offset: u64, meta: BlobMeta, max_read_size: usize, ) -> Self { Self::new_impl(start_offset, end_offset, meta, Some(max_read_size)) } pub(crate) fn new_streaming(start_offset: u64, end_offset: u64, meta: BlobMeta) -> Self { Self::new_impl(start_offset, end_offset, meta, None) } /// Attempts to extend the current read with a new blob if the new blob resides in the same or the immediate next chunk. /// /// The resulting size also must be below the max read size. pub(crate) fn extend(&mut self, start: u64, end: u64, meta: BlobMeta) -> VectoredReadExtended { tracing::trace!(start, end, "trying to extend"); let start_blk_no = start as usize / Self::CHUNK_SIZE; let end_blk_no = (end as usize).div_ceil(Self::CHUNK_SIZE); let not_limited_by_max_read_size = { if let Some(max_read_size) = self.max_read_size { let coalesced_size = (end_blk_no - self.start_blk_no) * Self::CHUNK_SIZE; coalesced_size <= max_read_size } else { true } }; // True if the second block starts in the same block or the immediate next block where the first block ended. // // Note: This automatically handles the case where two blocks are adjacent to each other, // whether they starts on chunk size boundary or not. let is_adjacent_chunk_read = { // 1. first.end & second.start are in the same block self.end_blk_no == start_blk_no + 1 || // 2. first.end ends one block before second.start self.end_blk_no == start_blk_no }; if is_adjacent_chunk_read && not_limited_by_max_read_size { self.end_blk_no = end_blk_no; self.blobs_at .append(start, meta) .expect("LSNs are ordered within vectored reads"); return VectoredReadExtended::Yes; } VectoredReadExtended::No } pub(crate) fn size(&self) -> usize { (self.end_blk_no - self.start_blk_no) * Self::CHUNK_SIZE } pub(crate) fn build(self) -> VectoredRead { let start = (self.start_blk_no * Self::CHUNK_SIZE) as u64; let end = (self.end_blk_no * Self::CHUNK_SIZE) as u64; VectoredRead { start, end, blobs_at: self.blobs_at, } } } #[derive(Copy, Clone, Debug)] pub enum BlobFlag { None, Ignore, ReplaceAll, } /// Planner for vectored blob reads. /// /// Blob offsets are received via [`VectoredReadPlanner::handle`] /// and coalesced into disk reads. /// /// The implementation is very simple: /// * Collect all blob offsets in an ordered structure /// * Iterate over the collected blobs and coalesce them into reads at the end pub struct VectoredReadPlanner { // Track all the blob offsets. Start offsets must be ordered. // Values in the value tuples are: // ( // lsn of the blob, // start offset of the blob in the underlying file, // end offset of the blob in the underlying file, // whether the blob initializes the page image or not // see [`pageserver_api::record::NeonWalRecord::will_init`] // ) blobs: BTreeMap<Key, Vec<(Lsn, u64, u64, bool)>>, // Arguments for previous blob passed into [`VectoredReadPlanner::handle`] prev: Option<(Key, Lsn, u64, BlobFlag)>, max_read_size: usize, } impl VectoredReadPlanner { pub fn new(max_read_size: usize) -> Self { Self { blobs: BTreeMap::new(), prev: None, max_read_size, } } /// Include a new blob in the read plan. /// /// This function is called from a B-Tree index visitor (see `DeltaLayerInner::plan_reads` /// and `ImageLayerInner::plan_reads`). Said visitor wants to collect blob offsets for all /// keys in a given keyspace. This function must be called for each key in the desired /// keyspace (monotonically continuous). [`Self::handle_range_end`] must /// be called after every range in the offset. /// /// In the event that keys are skipped, the behaviour is undefined and can lead to an /// incorrect read plan. We can end up asserting, erroring in wal redo or returning /// incorrect data to the user. /// /// The `flag` argument has two interesting values: /// * [`BlobFlag::ReplaceAll`]: The blob for this key should replace all existing blobs. /// This is used for WAL records that `will_init`. /// * [`BlobFlag::Ignore`]: This blob should not be included in the read. This happens /// if the blob is cached. pub fn handle(&mut self, key: Key, lsn: Lsn, offset: u64, flag: BlobFlag) { // Implementation note: internally lag behind by one blob such that // we have a start and end offset when initialising [`VectoredRead`] let (prev_key, prev_lsn, prev_offset, prev_flag) = match self.prev { None => { self.prev = Some((key, lsn, offset, flag)); return; } Some(prev) => prev, }; self.add_blob(prev_key, prev_lsn, prev_offset, offset, prev_flag); self.prev = Some((key, lsn, offset, flag)); } pub fn handle_range_end(&mut self, offset: u64) { if let Some((prev_key, prev_lsn, prev_offset, prev_flag)) = self.prev { self.add_blob(prev_key, prev_lsn, prev_offset, offset, prev_flag); } self.prev = None; } fn add_blob(&mut self, key: Key, lsn: Lsn, start_offset: u64, end_offset: u64, flag: BlobFlag) { match flag { BlobFlag::None => { let blobs_for_key = self.blobs.entry(key).or_default(); blobs_for_key.push((lsn, start_offset, end_offset, false)); } BlobFlag::ReplaceAll => { let blobs_for_key = self.blobs.entry(key).or_default(); blobs_for_key.clear(); blobs_for_key.push((lsn, start_offset, end_offset, true)); } BlobFlag::Ignore => {} } } pub fn finish(self) -> Vec<VectoredRead> { let mut current_read_builder: Option<ChunkedVectoredReadBuilder> = None; let mut reads = Vec::new(); for (key, blobs_for_key) in self.blobs { for (lsn, start_offset, end_offset, will_init) in blobs_for_key { let extended = match &mut current_read_builder { Some(read_builder) => read_builder.extend( start_offset, end_offset, BlobMeta { key, lsn, will_init, }, ), None => VectoredReadExtended::No, }; if extended == VectoredReadExtended::No { let next_read_builder = ChunkedVectoredReadBuilder::new( start_offset, end_offset, BlobMeta { key, lsn, will_init, }, self.max_read_size, ); let prev_read_builder = current_read_builder.replace(next_read_builder); // `current_read_builder` is None in the first iteration of the outer loop if let Some(read_builder) = prev_read_builder { reads.push(read_builder.build()); } } } } if let Some(read_builder) = current_read_builder { reads.push(read_builder.build()); } reads } } /// Disk reader for vectored blob spans (does not go through the page cache) pub struct VectoredBlobReader<'a> { file: &'a VirtualFile, } impl<'a> VectoredBlobReader<'a> { pub fn new(file: &'a VirtualFile) -> Self { Self { file } } /// Read the requested blobs into the buffer. /// /// We have to deal with the fact that blobs are not fixed size. /// Each blob is prefixed by a size header. /// /// The success return value is a struct which contains the buffer /// filled from disk and a list of offsets at which each blob lies /// in the buffer. pub async fn read_blobs( &self, read: &VectoredRead, buf: IoBufferMut, ctx: &RequestContext, ) -> Result<VectoredBlobsBuf, std::io::Error> { assert!(read.size() > 0); assert!( read.size() <= buf.capacity(), "{} > {}", read.size(), buf.capacity() ); if cfg!(debug_assertions) { const ALIGN: u64 = virtual_file::get_io_buffer_alignment() as u64; debug_assert_eq!( read.start % ALIGN, 0, "Read start at {} does not satisfy the required io buffer alignment ({} bytes)", read.start, ALIGN ); } let buf = self .file .read_exact_at(buf.slice(0..read.size()), read.start, ctx) .await? .into_inner(); let blobs_at = read.blobs_at.as_slice(); let mut blobs = Vec::with_capacity(blobs_at.len()); // Blobs in `read` only provide their starting offset. The end offset // of a blob is implicit: the start of the next blob if one exists // or the end of the read. for (blob_start, meta) in blobs_at.iter().copied() { let header_start = (blob_start - read.start) as usize; let header = Header::decode(&buf[header_start..]).map_err(|anyhow_err| { std::io::Error::new(std::io::ErrorKind::InvalidData, anyhow_err) })?; let data_start = header_start + header.header_len; let end = data_start + header.data_len; let compression_bits = header.compression_bits; blobs.push(VectoredBlob { header_start, data_start, end, meta, compression_bits, }); } Ok(VectoredBlobsBuf { buf, blobs }) } } /// Read planner used in [`crate::tenant::storage_layer::image_layer::ImageLayerIterator`]. /// /// It provides a streaming API for getting read blobs. It returns a batch when /// `handle` gets called and when the current key would just exceed the read_size and /// max_cnt constraints. pub struct StreamingVectoredReadPlanner { read_builder: Option<ChunkedVectoredReadBuilder>, // Arguments for previous blob passed into [`StreamingVectoredReadPlanner::handle`] prev: Option<(Key, Lsn, u64, bool)>, /// Max read size per batch. This is not a strict limit. If there are [0, 100) and [100, 200), while the `max_read_size` is 150, /// we will produce a single batch instead of split them. max_read_size: u64, /// Max item count per batch max_cnt: usize, /// Size of the current batch cnt: usize, } impl StreamingVectoredReadPlanner { pub fn new(max_read_size: u64, max_cnt: usize) -> Self { assert!(max_cnt > 0); assert!(max_read_size > 0); Self { read_builder: None, prev: None, max_cnt, max_read_size, cnt: 0, } } pub fn handle( &mut self, key: Key, lsn: Lsn, offset: u64, will_init: bool, ) -> Option<VectoredRead> { // Implementation note: internally lag behind by one blob such that // we have a start and end offset when initialising [`VectoredRead`] let (prev_key, prev_lsn, prev_offset, prev_will_init) = match self.prev { None => { self.prev = Some((key, lsn, offset, will_init)); return None; } Some(prev) => prev, }; let res = self.add_blob( prev_key, prev_lsn, prev_offset, offset, false, prev_will_init, ); self.prev = Some((key, lsn, offset, will_init)); res } pub fn handle_range_end(&mut self, offset: u64) -> Option<VectoredRead> { let res = if let Some((prev_key, prev_lsn, prev_offset, prev_will_init)) = self.prev { self.add_blob( prev_key, prev_lsn, prev_offset, offset, true, prev_will_init, ) } else { None }; self.prev = None; res } fn add_blob( &mut self, key: Key, lsn: Lsn, start_offset: u64, end_offset: u64, is_last_blob_in_read: bool, will_init: bool, ) -> Option<VectoredRead> { match &mut self.read_builder { Some(read_builder) => { let extended = read_builder.extend( start_offset, end_offset, BlobMeta { key, lsn, will_init, }, ); assert_eq!(extended, VectoredReadExtended::Yes); } None => { self.read_builder = { Some(ChunkedVectoredReadBuilder::new_streaming( start_offset, end_offset, BlobMeta { key, lsn, will_init, }, )) }; } } let read_builder = self.read_builder.as_mut().unwrap(); self.cnt += 1; if is_last_blob_in_read || read_builder.size() >= self.max_read_size as usize || self.cnt >= self.max_cnt { let prev_read_builder = self.read_builder.take(); self.cnt = 0; // `current_read_builder` is None in the first iteration if let Some(read_builder) = prev_read_builder { return Some(read_builder.build()); } } None } } #[cfg(test)] mod tests { use super::super::blob_io::tests::{random_array, write_maybe_compressed}; use super::*; use crate::context::DownloadBehavior; use crate::page_cache::PAGE_SZ; use crate::task_mgr::TaskKind; fn validate_read(read: &VectoredRead, offset_range: &[(Key, Lsn, u64, BlobFlag)]) { const ALIGN: u64 = virtual_file::get_io_buffer_alignment() as u64; assert_eq!(read.start % ALIGN, 0); assert_eq!(read.start / ALIGN, offset_range.first().unwrap().2 / ALIGN); let expected_offsets_in_read: Vec<_> = offset_range.iter().map(|o| o.2).collect(); let offsets_in_read: Vec<_> = read .blobs_at .as_slice() .iter() .map(|(offset, _)| *offset) .collect(); assert_eq!(expected_offsets_in_read, offsets_in_read); } #[test] fn planner_chunked_coalesce_all_test() { use crate::virtual_file; const CHUNK_SIZE: u64 = virtual_file::get_io_buffer_alignment() as u64; let max_read_size = CHUNK_SIZE as usize * 8; let key = Key::MIN; let lsn = Lsn(0); let blob_descriptions = [ (key, lsn, CHUNK_SIZE / 8, BlobFlag::None), // Read 1 BEGIN (key, lsn, CHUNK_SIZE / 4, BlobFlag::Ignore), // Gap (key, lsn, CHUNK_SIZE / 2, BlobFlag::None), (key, lsn, CHUNK_SIZE - 2, BlobFlag::Ignore), // Gap (key, lsn, CHUNK_SIZE, BlobFlag::None), (key, lsn, CHUNK_SIZE * 2 - 1, BlobFlag::None), (key, lsn, CHUNK_SIZE * 2 + 1, BlobFlag::Ignore), // Gap (key, lsn, CHUNK_SIZE * 3 + 1, BlobFlag::None), (key, lsn, CHUNK_SIZE * 5 + 1, BlobFlag::None), (key, lsn, CHUNK_SIZE * 6 + 1, BlobFlag::Ignore), // skipped chunk size, but not a chunk: should coalesce. (key, lsn, CHUNK_SIZE * 7 + 1, BlobFlag::None), (key, lsn, CHUNK_SIZE * 8, BlobFlag::None), // Read 2 BEGIN (b/c max_read_size) (key, lsn, CHUNK_SIZE * 9, BlobFlag::Ignore), // ==== skipped a chunk (key, lsn, CHUNK_SIZE * 10, BlobFlag::None), // Read 3 BEGIN (cannot coalesce) ]; let ranges = [ &[ blob_descriptions[0], blob_descriptions[2], blob_descriptions[4], blob_descriptions[5], blob_descriptions[7], blob_descriptions[8], blob_descriptions[10], ], &blob_descriptions[11..12], &blob_descriptions[13..], ]; let mut planner = VectoredReadPlanner::new(max_read_size); for (key, lsn, offset, flag) in blob_descriptions { planner.handle(key, lsn, offset, flag); } planner.handle_range_end(652 * 1024); let reads = planner.finish(); assert_eq!(reads.len(), ranges.len()); for (idx, read) in reads.iter().enumerate() { validate_read(read, ranges[idx]); } } #[test] fn planner_max_read_size_test() { let max_read_size = 128 * 1024; let key = Key::MIN; let lsn = Lsn(0); let blob_descriptions = vec![ (key, lsn, 0, BlobFlag::None), (key, lsn, 32 * 1024, BlobFlag::None), (key, lsn, 96 * 1024, BlobFlag::None), // Last in read 1 (key, lsn, 128 * 1024, BlobFlag::None), // Last in read 2 (key, lsn, 198 * 1024, BlobFlag::None), // Last in read 3 (key, lsn, 268 * 1024, BlobFlag::None), // Last in read 4 (key, lsn, 396 * 1024, BlobFlag::None), // Last in read 5 (key, lsn, 652 * 1024, BlobFlag::None), // Last in read 6 ]; let ranges = [ &blob_descriptions[0..3], &blob_descriptions[3..4], &blob_descriptions[4..5], &blob_descriptions[5..6], &blob_descriptions[6..7], &blob_descriptions[7..], ]; let mut planner = VectoredReadPlanner::new(max_read_size); for (key, lsn, offset, flag) in blob_descriptions.clone() { planner.handle(key, lsn, offset, flag); } planner.handle_range_end(652 * 1024); let reads = planner.finish(); assert_eq!(reads.len(), 6); // TODO: could remove zero reads to produce 5 reads here for (idx, read) in reads.iter().enumerate() { validate_read(read, ranges[idx]); } } #[test] fn planner_replacement_test() { const CHUNK_SIZE: u64 = virtual_file::get_io_buffer_alignment() as u64; let max_read_size = 128 * CHUNK_SIZE as usize; let first_key = Key::MIN; let second_key = first_key.next(); let lsn = Lsn(0); let blob_descriptions = vec![ (first_key, lsn, 0, BlobFlag::None), // First in read 1 (first_key, lsn, CHUNK_SIZE, BlobFlag::None), // Last in read 1 (second_key, lsn, 2 * CHUNK_SIZE, BlobFlag::ReplaceAll), (second_key, lsn, 3 * CHUNK_SIZE, BlobFlag::None), (second_key, lsn, 4 * CHUNK_SIZE, BlobFlag::ReplaceAll), // First in read 2 (second_key, lsn, 5 * CHUNK_SIZE, BlobFlag::None), // Last in read 2 ]; let ranges = [&blob_descriptions[0..2], &blob_descriptions[4..]]; let mut planner = VectoredReadPlanner::new(max_read_size); for (key, lsn, offset, flag) in blob_descriptions.clone() { planner.handle(key, lsn, offset, flag); } planner.handle_range_end(6 * CHUNK_SIZE); let reads = planner.finish(); assert_eq!(reads.len(), 2); for (idx, read) in reads.iter().enumerate() { validate_read(read, ranges[idx]); } } #[test] fn streaming_planner_max_read_size_test() { let max_read_size = 128 * 1024; let key = Key::MIN; let lsn = Lsn(0); let blob_descriptions = vec![ (key, lsn, 0, BlobFlag::None), (key, lsn, 32 * 1024, BlobFlag::None), (key, lsn, 96 * 1024, BlobFlag::None), (key, lsn, 128 * 1024, BlobFlag::None), (key, lsn, 198 * 1024, BlobFlag::None), (key, lsn, 268 * 1024, BlobFlag::None), (key, lsn, 396 * 1024, BlobFlag::None), (key, lsn, 652 * 1024, BlobFlag::None), ]; let ranges = [ &blob_descriptions[0..3], &blob_descriptions[3..5], &blob_descriptions[5..6], &blob_descriptions[6..7], &blob_descriptions[7..], ]; let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 1000); let mut reads = Vec::new(); for (key, lsn, offset, _) in blob_descriptions.clone() { reads.extend(planner.handle(key, lsn, offset, false)); } reads.extend(planner.handle_range_end(652 * 1024)); assert_eq!(reads.len(), ranges.len()); for (idx, read) in reads.iter().enumerate() { validate_read(read, ranges[idx]); } } #[test] fn streaming_planner_max_cnt_test() { let max_read_size = 1024 * 1024; let key = Key::MIN; let lsn = Lsn(0); let blob_descriptions = vec![ (key, lsn, 0, BlobFlag::None), (key, lsn, 32 * 1024, BlobFlag::None), (key, lsn, 96 * 1024, BlobFlag::None), (key, lsn, 128 * 1024, BlobFlag::None), (key, lsn, 198 * 1024, BlobFlag::None), (key, lsn, 268 * 1024, BlobFlag::None), (key, lsn, 396 * 1024, BlobFlag::None), (key, lsn, 652 * 1024, BlobFlag::None), ]; let ranges = [ &blob_descriptions[0..2], &blob_descriptions[2..4], &blob_descriptions[4..6], &blob_descriptions[6..], ]; let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 2); let mut reads = Vec::new(); for (key, lsn, offset, _) in blob_descriptions.clone() { reads.extend(planner.handle(key, lsn, offset, false)); } reads.extend(planner.handle_range_end(652 * 1024)); assert_eq!(reads.len(), ranges.len()); for (idx, read) in reads.iter().enumerate() { validate_read(read, ranges[idx]); } } #[test] fn streaming_planner_edge_test() { let max_read_size = 1024 * 1024; let key = Key::MIN; let lsn = Lsn(0); { let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 1); let mut reads = Vec::new(); reads.extend(planner.handle_range_end(652 * 1024)); assert!(reads.is_empty()); } { let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 1); let mut reads = Vec::new(); reads.extend(planner.handle(key, lsn, 0, false)); reads.extend(planner.handle_range_end(652 * 1024)); assert_eq!(reads.len(), 1); validate_read(&reads[0], &[(key, lsn, 0, BlobFlag::None)]); } { let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 1); let mut reads = Vec::new(); reads.extend(planner.handle(key, lsn, 0, false)); reads.extend(planner.handle(key, lsn, 128 * 1024, false)); reads.extend(planner.handle_range_end(652 * 1024)); assert_eq!(reads.len(), 2); validate_read(&reads[0], &[(key, lsn, 0, BlobFlag::None)]); validate_read(&reads[1], &[(key, lsn, 128 * 1024, BlobFlag::None)]); } { let mut planner = StreamingVectoredReadPlanner::new(max_read_size, 2); let mut reads = Vec::new(); reads.extend(planner.handle(key, lsn, 0, false)); reads.extend(planner.handle(key, lsn, 128 * 1024, false)); reads.extend(planner.handle_range_end(652 * 1024)); assert_eq!(reads.len(), 1); validate_read( &reads[0], &[ (key, lsn, 0, BlobFlag::None), (key, lsn, 128 * 1024, BlobFlag::None), ], ); } } async fn round_trip_test_compressed( blobs: &[Vec<u8>], compression: bool, ) -> anyhow::Result<()> { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error).with_scope_unit_test(); let (_temp_dir, pathbuf, offsets) = write_maybe_compressed(blobs, compression, &ctx).await?; let file = VirtualFile::open_v2(&pathbuf, &ctx).await?; let file_len = std::fs::metadata(&pathbuf)?.len(); // Multiply by two (compressed data might need more space), and add a few bytes for the header
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/secondary.rs
pageserver/src/tenant/secondary.rs
mod downloader; pub mod heatmap; mod heatmap_uploader; mod scheduler; use std::sync::Arc; use std::time::SystemTime; use metrics::UIntGauge; use pageserver_api::models; use pageserver_api::shard::{ShardIdentity, TenantShardId}; use remote_storage::GenericRemoteStorage; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tracing::instrument; use utils::completion::Barrier; use utils::id::TimelineId; use utils::sync::gate::Gate; use self::downloader::{SecondaryDetail, downloader_task}; use self::heatmap_uploader::heatmap_uploader_task; use super::GetTenantError; use super::config::SecondaryLocationConfig; use super::mgr::TenantManager; use super::span::debug_assert_current_span_has_tenant_id; use super::storage_layer::LayerName; use crate::context::RequestContext; use crate::disk_usage_eviction_task::DiskUsageEvictionInfo; use crate::metrics::{SECONDARY_HEATMAP_TOTAL_SIZE, SECONDARY_RESIDENT_PHYSICAL_SIZE}; use crate::task_mgr::{self, BACKGROUND_RUNTIME, TaskKind}; enum DownloadCommand { Download(TenantShardId), } enum UploadCommand { Upload(TenantShardId), } impl UploadCommand { fn get_tenant_shard_id(&self) -> &TenantShardId { match self { Self::Upload(id) => id, } } } impl DownloadCommand { fn get_tenant_shard_id(&self) -> &TenantShardId { match self { Self::Download(id) => id, } } } struct CommandRequest<T> { payload: T, response_tx: tokio::sync::oneshot::Sender<CommandResponse>, } struct CommandResponse { result: Result<(), SecondaryTenantError>, } #[derive(thiserror::Error, Debug)] pub(crate) enum SecondaryTenantError { #[error("{0}")] GetTenant(GetTenantError), #[error("shutting down")] ShuttingDown, } impl From<GetTenantError> for SecondaryTenantError { fn from(gte: GetTenantError) -> Self { Self::GetTenant(gte) } } // Whereas [`Tenant`] represents an attached tenant, this type represents the work // we do for secondary tenant locations: where we are not serving clients or // ingesting WAL, but we are maintaining a warm cache of layer files. // // This type is all about the _download_ path for secondary mode. The upload path // runs separately (see [`heatmap_uploader`]) while a regular attached `Tenant` exists. // // This structure coordinates TenantManager and SecondaryDownloader, // so that the downloader can indicate which tenants it is currently // operating on, and the manager can indicate when a particular // secondary tenant should cancel any work in flight. #[derive(Debug)] pub(crate) struct SecondaryTenant { /// Carrying a tenant shard ID simplifies callers such as the downloader /// which need to organize many of these objects by ID. tenant_shard_id: TenantShardId, /// Cancellation token indicates to SecondaryDownloader that it should stop doing /// any work for this tenant at the next opportunity. pub(crate) cancel: CancellationToken, pub(crate) gate: Gate, // Secondary mode does not need the full shard identity or the pageserver_api::models::TenantConfig. However, // storing these enables us to report our full LocationConf, enabling convenient reconciliation // by the control plane (see [`Self::get_location_conf`]) pub(crate) shard_identity: ShardIdentity, tenant_conf: std::sync::Mutex<pageserver_api::models::TenantConfig>, // Internal state used by the Downloader. detail: std::sync::Mutex<SecondaryDetail>, // Public state indicating overall progress of downloads relative to the last heatmap seen pub(crate) progress: std::sync::Mutex<models::SecondaryProgress>, // Sum of layer sizes on local disk pub(super) resident_size_metric: UIntGauge, // Sum of layer sizes in the most recently downloaded heatmap pub(super) heatmap_total_size_metric: UIntGauge, } impl SecondaryTenant { pub(crate) fn new( tenant_shard_id: TenantShardId, shard_identity: ShardIdentity, tenant_conf: pageserver_api::models::TenantConfig, config: &SecondaryLocationConfig, ) -> Arc<Self> { let tenant_id = tenant_shard_id.tenant_id.to_string(); let shard_id = format!("{}", tenant_shard_id.shard_slug()); let resident_size_metric = SECONDARY_RESIDENT_PHYSICAL_SIZE .get_metric_with_label_values(&[&tenant_id, &shard_id]) .unwrap(); let heatmap_total_size_metric = SECONDARY_HEATMAP_TOTAL_SIZE .get_metric_with_label_values(&[&tenant_id, &shard_id]) .unwrap(); Arc::new(Self { tenant_shard_id, // todo: shall we make this a descendent of the // main cancellation token, or is it sufficient that // on shutdown we walk the tenants and fire their // individual cancellations? cancel: CancellationToken::new(), gate: Gate::default(), shard_identity, tenant_conf: std::sync::Mutex::new(tenant_conf), detail: std::sync::Mutex::new(SecondaryDetail::new(config.clone())), progress: std::sync::Mutex::default(), resident_size_metric, heatmap_total_size_metric, }) } pub(crate) fn tenant_shard_id(&self) -> TenantShardId { self.tenant_shard_id } pub(crate) async fn shutdown(&self) { self.cancel.cancel(); // Wait for any secondary downloader work to complete self.gate.close().await; self.validate_metrics(); // Metrics are subtracted from and/or removed eagerly. // Deletions are done in the background via [`BackgroundPurges::spawn`]. let tenant_id = self.tenant_shard_id.tenant_id.to_string(); let shard_id = format!("{}", self.tenant_shard_id.shard_slug()); let _ = SECONDARY_RESIDENT_PHYSICAL_SIZE.remove_label_values(&[&tenant_id, &shard_id]); let _ = SECONDARY_HEATMAP_TOTAL_SIZE.remove_label_values(&[&tenant_id, &shard_id]); self.detail .lock() .unwrap() .drain_timelines(&self.tenant_shard_id, &self.resident_size_metric); } pub(crate) fn set_config(&self, config: &SecondaryLocationConfig) { self.detail.lock().unwrap().config = config.clone(); } pub(crate) fn set_tenant_conf(&self, config: &pageserver_api::models::TenantConfig) { *(self.tenant_conf.lock().unwrap()) = config.clone(); } /// For API access: generate a LocationConfig equivalent to the one that would be used to /// create a Tenant in the same state. Do not use this in hot paths: it's for relatively /// rare external API calls, like a reconciliation at startup. pub(crate) fn get_location_conf(&self) -> models::LocationConfig { let conf = self.detail.lock().unwrap().config.clone(); let conf = models::LocationConfigSecondary { warm: conf.warm }; let tenant_conf = self.tenant_conf.lock().unwrap().clone(); models::LocationConfig { mode: models::LocationConfigMode::Secondary, generation: None, secondary_conf: Some(conf), shard_number: self.tenant_shard_id.shard_number.0, shard_count: self.tenant_shard_id.shard_count.literal(), shard_stripe_size: self.shard_identity.stripe_size.0, tenant_conf, } } pub(crate) fn get_tenant_shard_id(&self) -> &TenantShardId { &self.tenant_shard_id } pub(crate) fn get_layers_for_eviction(self: &Arc<Self>) -> (DiskUsageEvictionInfo, usize) { self.detail.lock().unwrap().get_layers_for_eviction(self) } /// Cancellation safe, but on cancellation the eviction will go through #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline_id, name=%name))] pub(crate) async fn evict_layer(self: &Arc<Self>, timeline_id: TimelineId, name: LayerName) { debug_assert_current_span_has_tenant_id(); let guard = match self.gate.enter() { Ok(g) => g, Err(_) => { tracing::debug!("Dropping layer evictions, secondary tenant shutting down",); return; } }; let now = SystemTime::now(); tracing::info!("Evicting secondary layer"); let this = self.clone(); // spawn it to be cancellation safe tokio::task::spawn_blocking(move || { let _guard = guard; // Update the timeline's state. This does not have to be synchronized with // the download process, because: // - If downloader is racing with us to remove a file (e.g. because it is // removed from heatmap), then our mutual .remove() operations will both // succeed. // - If downloader is racing with us to download the object (this would require // multiple eviction iterations to race with multiple download iterations), then // if we remove it from the state, the worst that happens is the downloader // downloads it again before re-inserting, or we delete the file but it remains // in the state map (in which case it will be downloaded if this secondary // tenant transitions to attached and tries to access it) // // The important assumption here is that the secondary timeline state does not // have to 100% match what is on disk, because it's a best-effort warming // of the cache. let mut detail = this.detail.lock().unwrap(); if let Some(removed) = detail.evict_layer(name, &timeline_id, now, &this.resident_size_metric) { // We might race with removal of the same layer during downloads, so finding the layer we // were trying to remove is optional. Only issue the disk I/O to remove it if we found it. removed.remove_blocking(); } }) .await .expect("secondary eviction should not have panicked"); } /// Exhaustive check that incrementally updated metrics match the actual state. #[cfg(feature = "testing")] fn validate_metrics(&self) { let detail = self.detail.lock().unwrap(); let resident_size = detail.total_resident_size(); assert_eq!(resident_size, self.resident_size_metric.get()); } #[cfg(not(feature = "testing"))] fn validate_metrics(&self) { // No-op in non-testing builds } } /// The SecondaryController is a pseudo-rpc client for administrative control of secondary mode downloads, /// and heatmap uploads. This is not a hot data path: it's used for: /// - Live migrations, where we want to ensure a migration destination has the freshest possible /// content before trying to cut over. /// - Tests, where we want to immediately upload/download for a particular tenant. /// /// In normal operations, outside of migrations, uploads & downloads are autonomous and not driven by this interface. pub struct SecondaryController { upload_req_tx: tokio::sync::mpsc::Sender<CommandRequest<UploadCommand>>, download_req_tx: tokio::sync::mpsc::Sender<CommandRequest<DownloadCommand>>, } impl SecondaryController { async fn dispatch<T>( &self, queue: &tokio::sync::mpsc::Sender<CommandRequest<T>>, payload: T, ) -> Result<(), SecondaryTenantError> { let (response_tx, response_rx) = tokio::sync::oneshot::channel(); queue .send(CommandRequest { payload, response_tx, }) .await .map_err(|_| SecondaryTenantError::ShuttingDown)?; let response = response_rx .await .map_err(|_| SecondaryTenantError::ShuttingDown)?; response.result } pub(crate) async fn upload_tenant( &self, tenant_shard_id: TenantShardId, ) -> Result<(), SecondaryTenantError> { self.dispatch(&self.upload_req_tx, UploadCommand::Upload(tenant_shard_id)) .await } pub(crate) async fn download_tenant( &self, tenant_shard_id: TenantShardId, ) -> Result<(), SecondaryTenantError> { self.dispatch( &self.download_req_tx, DownloadCommand::Download(tenant_shard_id), ) .await } } pub struct GlobalTasks { cancel: CancellationToken, uploader: JoinHandle<()>, downloader: JoinHandle<()>, } impl GlobalTasks { /// Caller is responsible for requesting shutdown via the cancellation token that was /// passed to [`spawn_tasks`]. /// /// # Panics /// /// This method panics if that token is not cancelled. /// This is low-risk because we're calling this during process shutdown, so, a panic /// will be informative but not cause undue downtime. pub async fn wait(self) { let Self { cancel, uploader, downloader, } = self; assert!( cancel.is_cancelled(), "must cancel cancellation token, otherwise the tasks will not shut down" ); let (uploader, downloader) = futures::future::join(uploader, downloader).await; uploader.expect( "unreachable: exit_on_panic_or_error would catch the panic and exit the process", ); downloader.expect( "unreachable: exit_on_panic_or_error would catch the panic and exit the process", ); } } pub fn spawn_tasks( tenant_manager: Arc<TenantManager>, remote_storage: GenericRemoteStorage, background_jobs_can_start: Barrier, cancel: CancellationToken, ) -> (SecondaryController, GlobalTasks) { let mgr_clone = tenant_manager.clone(); let storage_clone = remote_storage.clone(); let bg_jobs_clone = background_jobs_can_start.clone(); let (download_req_tx, download_req_rx) = tokio::sync::mpsc::channel::<CommandRequest<DownloadCommand>>(16); let (upload_req_tx, upload_req_rx) = tokio::sync::mpsc::channel::<CommandRequest<UploadCommand>>(16); let cancel_clone = cancel.clone(); let downloader = BACKGROUND_RUNTIME.spawn(task_mgr::exit_on_panic_or_error( "secondary tenant downloads", async move { downloader_task( mgr_clone, storage_clone, download_req_rx, bg_jobs_clone, cancel_clone, RequestContext::new( TaskKind::SecondaryDownloads, crate::context::DownloadBehavior::Download, ), ) .await; anyhow::Ok(()) }, )); let cancel_clone = cancel.clone(); let uploader = BACKGROUND_RUNTIME.spawn(task_mgr::exit_on_panic_or_error( "heatmap uploads", async move { heatmap_uploader_task( tenant_manager, remote_storage, upload_req_rx, background_jobs_can_start, cancel_clone, ) .await; anyhow::Ok(()) }, )); ( SecondaryController { upload_req_tx, download_req_tx, }, GlobalTasks { cancel, uploader, downloader, }, ) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/gc_block.rs
pageserver/src/tenant/gc_block.rs
use std::collections::HashMap; use std::sync::Arc; use utils::id::TimelineId; use super::remote_timeline_client::index::GcBlockingReason; type Storage = HashMap<TimelineId, enumset::EnumSet<GcBlockingReason>>; /// GcBlock provides persistent (per-timeline) gc blocking. #[derive(Default)] pub(crate) struct GcBlock { /// The timelines which have current reasons to block gc. /// /// LOCK ORDER: this is held locked while scheduling the next index_part update. This is done /// to keep the this field up to date with RemoteTimelineClient `upload_queue.dirty`. reasons: std::sync::Mutex<Storage>, /// GC background task or manually run `Tenant::gc_iteration` holds a lock on this. /// /// Do not add any more features taking and forbidding taking this lock. It should be /// `tokio::sync::Notify`, but that is rarely used. On the other side, [`GcBlock::insert`] /// synchronizes with gc attempts by locking and unlocking this mutex. blocking: Arc<tokio::sync::Mutex<()>>, } impl GcBlock { /// Start another gc iteration. /// /// Returns a guard to be held for the duration of gc iteration to allow synchronizing with /// it's ending, or if not currently possible, a value describing the reasons why not. /// /// Cancellation safe. pub(super) async fn start(&self) -> Result<Guard, BlockingReasons> { let reasons = { let g = self.reasons.lock().unwrap(); // TODO: the assumption is that this method gets called periodically. in prod, we use 1h, in // tests, we use everything. we should warn if the gc has been consecutively blocked // for more than 1h (within single tenant session?). BlockingReasons::clean_and_summarize(g) }; if let Some(reasons) = reasons { Err(reasons) } else { Ok(Guard { _inner: self.blocking.clone().lock_owned().await, }) } } /// Describe the current gc blocking reasons. /// /// TODO: make this json serializable. pub(crate) fn summary(&self) -> Option<BlockingReasons> { let g = self.reasons.lock().unwrap(); BlockingReasons::summarize(&g) } /// Start blocking gc for this one timeline for the given reason. /// /// This is not a guard based API but instead it mimics set API. The returned future will not /// resolve until an existing gc round has completed. /// /// Returns true if this block was new, false if gc was already blocked for this reason. /// /// Cancellation safe: cancelling after first poll will keep the reason to block gc, but will /// keep the gc blocking reason. pub(crate) async fn insert( &self, timeline: &super::Timeline, reason: GcBlockingReason, ) -> anyhow::Result<bool> { let (added, uploaded) = { let mut g = self.reasons.lock().unwrap(); let set = g.entry(timeline.timeline_id).or_default(); let added = set.insert(reason); // LOCK ORDER: intentionally hold the lock, see self.reasons. let uploaded = timeline .remote_client .schedule_insert_gc_block_reason(reason)?; (added, uploaded) }; uploaded.await?; // ensure that any ongoing gc iteration has completed drop(self.blocking.lock().await); Ok(added) } /// Remove blocking gc for this one timeline and the given reason. pub(crate) async fn remove( &self, timeline: &super::Timeline, reason: GcBlockingReason, ) -> anyhow::Result<()> { use std::collections::hash_map::Entry; super::span::debug_assert_current_span_has_tenant_and_timeline_id(); let (remaining_blocks, uploaded) = { let mut g = self.reasons.lock().unwrap(); match g.entry(timeline.timeline_id) { Entry::Occupied(mut oe) => { let set = oe.get_mut(); set.remove(reason); if set.is_empty() { oe.remove(); } } Entry::Vacant(_) => { // we must still do the index_part.json update regardless, in case we had earlier // been cancelled } } let remaining_blocks = g.len(); // LOCK ORDER: intentionally hold the lock while scheduling; see self.reasons let uploaded = timeline .remote_client .schedule_remove_gc_block_reason(reason)?; (remaining_blocks, uploaded) }; uploaded.await?; // no need to synchronize with gc iteration again if remaining_blocks > 0 { tracing::info!(remaining_blocks, removed=?reason, "gc blocking removed, but gc remains blocked"); } else { tracing::info!("gc is now unblocked for the tenant"); } Ok(()) } pub(crate) fn before_delete(&self, timeline_id: &super::TimelineId) { let unblocked = { let mut g = self.reasons.lock().unwrap(); if g.is_empty() { return; } g.remove(timeline_id); BlockingReasons::clean_and_summarize(g).is_none() }; if unblocked { tracing::info!("gc is now unblocked following deletion"); } } /// Initialize with the non-deleted timelines of this tenant. pub(crate) fn set_scanned(&self, scanned: Storage) { let mut g = self.reasons.lock().unwrap(); assert!(g.is_empty()); g.extend(scanned.into_iter().filter(|(_, v)| !v.is_empty())); if let Some(reasons) = BlockingReasons::clean_and_summarize(g) { tracing::info!(summary=?reasons, "initialized with gc blocked"); } } } pub(crate) struct Guard { _inner: tokio::sync::OwnedMutexGuard<()>, } #[derive(Debug)] pub(crate) struct BlockingReasons { timelines: usize, reasons: enumset::EnumSet<GcBlockingReason>, } impl std::fmt::Display for BlockingReasons { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "{} timelines block for {:?}", self.timelines, self.reasons ) } } impl BlockingReasons { fn clean_and_summarize(mut g: std::sync::MutexGuard<'_, Storage>) -> Option<Self> { let mut reasons = enumset::EnumSet::empty(); g.retain(|_key, value| { reasons = reasons.union(*value); !value.is_empty() }); if !g.is_empty() { Some(BlockingReasons { timelines: g.len(), reasons, }) } else { None } } fn summarize(g: &std::sync::MutexGuard<'_, Storage>) -> Option<Self> { if g.is_empty() { None } else { let reasons = g .values() .fold(enumset::EnumSet::empty(), |acc, next| acc.union(*next)); Some(BlockingReasons { timelines: g.len(), reasons, }) } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/size.rs
pageserver/src/tenant/size.rs
use std::cmp; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use tenant_size_model::svg::SvgBranchKind; use tenant_size_model::{Segment, StorageModel}; use tokio::sync::Semaphore; use tokio::sync::oneshot::error::RecvError; use tokio_util::sync::CancellationToken; use tracing::*; use utils::id::TimelineId; use utils::lsn::Lsn; use super::{GcError, LogicalSizeCalculationCause, TenantShard}; use crate::context::RequestContext; use crate::pgdatadir_mapping::CalculateLogicalSizeError; use crate::tenant::{MaybeOffloaded, Timeline}; /// Inputs to the actual tenant sizing model /// /// Implements [`serde::Serialize`] but is not meant to be part of the public API, instead meant to /// be a transferrable format between execution environments and developer. /// /// This tracks more information than the actual StorageModel that calculation /// needs. We will convert this into a StorageModel when it's time to perform /// the calculation. /// #[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct ModelInputs { pub segments: Vec<SegmentMeta>, pub timeline_inputs: Vec<TimelineInputs>, } /// A [`Segment`], with some extra information for display purposes #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq, Eq)] pub struct SegmentMeta { pub segment: Segment, pub timeline_id: TimelineId, pub kind: LsnKind, } #[derive(thiserror::Error, Debug)] pub(crate) enum CalculateSyntheticSizeError { /// Something went wrong internally to the calculation of logical size at a particular branch point #[error("Failed to calculated logical size on timeline {timeline_id} at {lsn}: {error}")] LogicalSize { timeline_id: TimelineId, lsn: Lsn, error: CalculateLogicalSizeError, }, /// Something went wrong internally when calculating GC parameters at start of size calculation #[error(transparent)] GcInfo(GcError), /// Totally unexpected errors, like panics joining a task #[error(transparent)] Fatal(anyhow::Error), /// Tenant shut down while calculating size #[error("Cancelled")] Cancelled, } impl From<GcError> for CalculateSyntheticSizeError { fn from(value: GcError) -> Self { match value { GcError::TenantCancelled | GcError::TimelineCancelled => { CalculateSyntheticSizeError::Cancelled } other => CalculateSyntheticSizeError::GcInfo(other), } } } impl SegmentMeta { fn size_needed(&self) -> bool { match self.kind { LsnKind::BranchStart => { // If we don't have a later GcCutoff point on this branch, and // no ancestor, calculate size for the branch start point. self.segment.needed && self.segment.parent.is_none() } LsnKind::BranchPoint => true, LsnKind::GcCutOff => true, LsnKind::BranchEnd => false, LsnKind::LeasePoint => true, LsnKind::LeaseStart => false, LsnKind::LeaseEnd => false, } } } #[derive( Debug, Clone, Copy, Eq, Ord, PartialEq, PartialOrd, serde::Serialize, serde::Deserialize, )] pub enum LsnKind { /// A timeline starting here BranchStart, /// A child timeline branches off from here BranchPoint, /// GC cutoff point GcCutOff, /// Last record LSN BranchEnd, /// A LSN lease is granted here. LeasePoint, /// A lease starts from here. LeaseStart, /// Last record LSN for the lease (should have the same LSN as the previous [`LsnKind::LeaseStart`]). LeaseEnd, } impl From<LsnKind> for SvgBranchKind { fn from(kind: LsnKind) -> Self { match kind { LsnKind::LeasePoint | LsnKind::LeaseStart | LsnKind::LeaseEnd => SvgBranchKind::Lease, _ => SvgBranchKind::Timeline, } } } /// Collect all relevant LSNs to the inputs. These will only be helpful in the serialized form as /// part of [`ModelInputs`] from the HTTP api, explaining the inputs. #[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct TimelineInputs { pub timeline_id: TimelineId, pub ancestor_id: Option<TimelineId>, ancestor_lsn: Lsn, last_record: Lsn, latest_gc_cutoff: Lsn, /// Cutoff point based on GC settings next_pitr_cutoff: Lsn, /// Cutoff point calculated from the user-supplied 'max_retention_period' retention_param_cutoff: Option<Lsn>, /// Lease points on the timeline lease_points: Vec<Lsn>, } /// Gathers the inputs for the tenant sizing model. /// /// Tenant size does not consider the latest state, but only the state until next_pitr_cutoff, which /// is updated on-demand, during the start of this calculation and separate from the /// [`TimelineInputs::latest_gc_cutoff`]. /// /// For timelines in general: /// /// ```text /// 0-----|---------|----|------------| · · · · · |·> lsn /// initdb_lsn branchpoints* next_pitr_cutoff latest /// ``` pub(super) async fn gather_inputs( tenant: &TenantShard, limit: &Arc<Semaphore>, max_retention_period: Option<u64>, logical_size_cache: &mut HashMap<(TimelineId, Lsn), u64>, cause: LogicalSizeCalculationCause, cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<ModelInputs, CalculateSyntheticSizeError> { // refresh is needed to update [`timeline::GcCutoffs`] tenant.refresh_gc_info(cancel, ctx).await?; // Collect information about all the timelines let mut timelines = tenant.list_timelines(); if timelines.is_empty() { // perhaps the tenant has just been created, and as such doesn't have any data yet return Ok(ModelInputs { segments: vec![], timeline_inputs: Vec::new(), }); } // Filter out timelines that are not active // // There may be a race when a timeline is dropped, // but it is unlikely to cause any issues. In the worst case, // the calculation will error out. timelines.retain(|t| t.is_active()); // Also filter out archived timelines. timelines.retain(|t| t.is_archived() != Some(true)); // Build a map of branch points. let mut branchpoints: HashMap<TimelineId, HashSet<Lsn>> = HashMap::new(); for timeline in timelines.iter() { if let Some(ancestor_id) = timeline.get_ancestor_timeline_id() { branchpoints .entry(ancestor_id) .or_default() .insert(timeline.get_ancestor_lsn()); } } // These become the final result. let mut timeline_inputs = Vec::with_capacity(timelines.len()); let mut segments: Vec<SegmentMeta> = Vec::new(); // // Build Segments representing each timeline. As we do that, also remember // the branchpoints and branch startpoints in 'branchpoint_segments' and // 'branchstart_segments' // // BranchPoint segments of each timeline // (timeline, branchpoint LSN) -> segment_id let mut branchpoint_segments: HashMap<(TimelineId, Lsn), usize> = HashMap::new(); // timeline, Branchpoint seg id, (ancestor, ancestor LSN) type BranchStartSegment = (TimelineId, usize, Option<(TimelineId, Lsn)>); let mut branchstart_segments: Vec<BranchStartSegment> = Vec::new(); for timeline in timelines.iter() { let timeline_id = timeline.timeline_id; let last_record_lsn = timeline.get_last_record_lsn(); let ancestor_lsn = timeline.get_ancestor_lsn(); // there's a race between the update (holding tenant.gc_lock) and this read but it // might not be an issue, because it's not for Timeline::gc let gc_info = timeline.gc_info.read().unwrap(); // similar to gc, but Timeline::get_latest_gc_cutoff_lsn() will not be updated before a // new gc run, which we have no control over. however differently from `Timeline::gc` // we don't consider the `Timeline::disk_consistent_lsn` at all, because we are not // actually removing files. // // We only consider [`timeline::GcCutoffs::time`], and not [`timeline::GcCutoffs::space`], because from // a user's perspective they have only requested retention up to the time bound (pitr_cutoff), rather // than our internal space cutoff. This means that if someone drops a database and waits for their // PITR interval, they will see synthetic size decrease, even if we are still storing data inside // the space cutoff. let mut next_pitr_cutoff = gc_info.cutoffs.time.unwrap_or_default(); // TODO: handle None // If the caller provided a shorter retention period, use that instead of the GC cutoff. let retention_param_cutoff = if let Some(max_retention_period) = max_retention_period { let param_cutoff = Lsn(last_record_lsn.0.saturating_sub(max_retention_period)); if next_pitr_cutoff < param_cutoff { next_pitr_cutoff = param_cutoff; } Some(param_cutoff) } else { None }; let branch_is_invisible = timeline.is_invisible() == Some(true); let lease_points = gc_info .leases .keys() .filter(|&&lsn| lsn > ancestor_lsn) .copied() .collect::<Vec<_>>(); // next_pitr_cutoff in parent branch are not of interest (right now at least), nor do we // want to query any logical size before initdb_lsn. let branch_start_lsn = cmp::max(ancestor_lsn, timeline.initdb_lsn); // Build "interesting LSNs" on this timeline let mut lsns: Vec<(Lsn, LsnKind)> = gc_info .retain_lsns .iter() .filter(|(lsn, _child_id, is_offloaded)| { lsn > &ancestor_lsn && *is_offloaded == MaybeOffloaded::No }) .copied() // this assumes there are no other retain_lsns than the branchpoints .map(|(lsn, _child_id, _is_offloaded)| (lsn, LsnKind::BranchPoint)) .collect::<Vec<_>>(); if !branch_is_invisible { // Do not count lease points for invisible branches. lsns.extend(lease_points.iter().map(|&lsn| (lsn, LsnKind::LeasePoint))); } drop(gc_info); // Add branch points we collected earlier, just in case there were any that were // not present in retain_lsns. We will remove any duplicates below later. if let Some(this_branchpoints) = branchpoints.get(&timeline_id) { lsns.extend( this_branchpoints .iter() .map(|lsn| (*lsn, LsnKind::BranchPoint)), ) } // Add a point for the PITR cutoff let branch_start_needed = next_pitr_cutoff <= branch_start_lsn; if !branch_start_needed && !branch_is_invisible { // Only add the GcCutOff point when the timeline is visible; otherwise, do not compute the size for the LSN // range from the last branch point to the latest data. lsns.push((next_pitr_cutoff, LsnKind::GcCutOff)); } lsns.sort_unstable(); lsns.dedup(); // // Create Segments for the interesting points. // // Timeline start point let ancestor = timeline .get_ancestor_timeline_id() .map(|ancestor_id| (ancestor_id, ancestor_lsn)); branchstart_segments.push((timeline_id, segments.len(), ancestor)); segments.push(SegmentMeta { segment: Segment { parent: None, // filled in later lsn: branch_start_lsn.0, size: None, // filled in later needed: branch_start_needed, }, timeline_id: timeline.timeline_id, kind: LsnKind::BranchStart, }); // GC cutoff point, and any branch points, i.e. points where // other timelines branch off from this timeline. let mut parent = segments.len() - 1; for (lsn, kind) in lsns { if kind == LsnKind::BranchPoint { branchpoint_segments.insert((timeline_id, lsn), segments.len()); } segments.push(SegmentMeta { segment: Segment { parent: Some(parent), lsn: lsn.0, size: None, needed: lsn > next_pitr_cutoff, }, timeline_id: timeline.timeline_id, kind, }); parent = segments.len() - 1; if kind == LsnKind::LeasePoint { // Needs `LeaseStart` and `LeaseEnd` as well to model lease as a read-only branch that never writes data // (i.e. it's lsn has not advanced from ancestor_lsn), and therefore the three segments have the same LSN // value. Without the other two segments, the calculation code would not count the leased LSN as a point // to be retained. // Did not use `BranchStart` or `BranchEnd` so we can differentiate branches and leases during debug. // // Alt Design: rewrite the entire calculation code to be independent of timeline id. Both leases and // branch points can be given a synthetic id so we can unite them. let mut lease_parent = parent; // Start of a lease. segments.push(SegmentMeta { segment: Segment { parent: Some(lease_parent), lsn: lsn.0, size: None, // Filled in later, if necessary needed: lsn > next_pitr_cutoff, // only needed if the point is within rentention. }, timeline_id: timeline.timeline_id, kind: LsnKind::LeaseStart, }); lease_parent += 1; // End of the lease. segments.push(SegmentMeta { segment: Segment { parent: Some(lease_parent), lsn: lsn.0, size: None, // Filled in later, if necessary needed: true, // everything at the lease LSN must be readable => is needed }, timeline_id: timeline.timeline_id, kind: LsnKind::LeaseEnd, }); } } let branch_end_lsn = if branch_is_invisible { // If the branch is invisible, the branch end is the last requested LSN (likely a branch cutoff point). segments.last().unwrap().segment.lsn } else { // Otherwise, the branch end is the last record LSN. last_record_lsn.0 }; // Current end of the timeline segments.push(SegmentMeta { segment: Segment { parent: Some(parent), lsn: branch_end_lsn, size: None, // Filled in later, if necessary needed: true, }, timeline_id: timeline.timeline_id, kind: LsnKind::BranchEnd, }); timeline_inputs.push(TimelineInputs { timeline_id: timeline.timeline_id, ancestor_id: timeline.get_ancestor_timeline_id(), ancestor_lsn, last_record: last_record_lsn, // this is not used above, because it might not have updated recently enough latest_gc_cutoff: *timeline.get_applied_gc_cutoff_lsn(), next_pitr_cutoff, retention_param_cutoff, lease_points, }); } // We now have all segments from the timelines in 'segments'. The timelines // haven't been linked to each other yet, though. Do that. for (_timeline_id, seg_id, ancestor) in branchstart_segments { // Look up the branch point if let Some(ancestor) = ancestor { let parent_id = *branchpoint_segments.get(&ancestor).unwrap(); segments[seg_id].segment.parent = Some(parent_id); } } // We left the 'size' field empty in all of the Segments so far. // Now find logical sizes for all of the points that might need or benefit from them. fill_logical_sizes( &timelines, &mut segments, limit, logical_size_cache, cause, ctx, ) .await?; if tenant.cancel.is_cancelled() { // If we're shutting down, return an error rather than a sparse result that might include some // timelines from before we started shutting down return Err(CalculateSyntheticSizeError::Cancelled); } Ok(ModelInputs { segments, timeline_inputs, }) } /// Augment 'segments' with logical sizes /// /// This will leave segments' sizes as None if the Timeline associated with the segment is deleted concurrently /// (i.e. we cannot read its logical size at a particular LSN). async fn fill_logical_sizes( timelines: &[Arc<Timeline>], segments: &mut [SegmentMeta], limit: &Arc<Semaphore>, logical_size_cache: &mut HashMap<(TimelineId, Lsn), u64>, cause: LogicalSizeCalculationCause, ctx: &RequestContext, ) -> Result<(), CalculateSyntheticSizeError> { let timeline_hash: HashMap<TimelineId, Arc<Timeline>> = HashMap::from_iter( timelines .iter() .map(|timeline| (timeline.timeline_id, Arc::clone(timeline))), ); // record the used/inserted cache keys here, to remove extras not to start leaking // after initial run the cache should be quite stable, but live timelines will eventually // require new lsns to be inspected. let mut sizes_needed = HashMap::<(TimelineId, Lsn), Option<u64>>::new(); // with joinset, on drop, all of the tasks will just be de-scheduled, which we can use to // our advantage with `?` error handling. let mut joinset = tokio::task::JoinSet::new(); // For each point that would benefit from having a logical size available, // spawn a Task to fetch it, unless we have it cached already. for seg in segments.iter() { if !seg.size_needed() { continue; } let timeline_id = seg.timeline_id; let lsn = Lsn(seg.segment.lsn); if let Entry::Vacant(e) = sizes_needed.entry((timeline_id, lsn)) { let cached_size = logical_size_cache.get(&(timeline_id, lsn)).cloned(); if cached_size.is_none() { let timeline = Arc::clone(timeline_hash.get(&timeline_id).unwrap()); let parallel_size_calcs = Arc::clone(limit); let ctx = ctx.attached_child().with_scope_timeline(&timeline); joinset.spawn( calculate_logical_size(parallel_size_calcs, timeline, lsn, cause, ctx) .in_current_span(), ); } e.insert(cached_size); } } // Perform the size lookups let mut have_any_error = None; while let Some(res) = joinset.join_next().await { // each of these come with Result<anyhow::Result<_>, JoinError> // because of spawn + spawn_blocking match res { Err(join_error) if join_error.is_cancelled() => { unreachable!("we are not cancelling any of the futures, nor should be"); } Err(join_error) => { // cannot really do anything, as this panic is likely a bug error!( "task that calls spawn_ondemand_logical_size_calculation panicked: {join_error:#}" ); have_any_error = Some(CalculateSyntheticSizeError::Fatal( anyhow::anyhow!(join_error) .context("task that calls spawn_ondemand_logical_size_calculation"), )); } Ok(Err(recv_result_error)) => { // cannot really do anything, as this panic is likely a bug error!("failed to receive logical size query result: {recv_result_error:#}"); have_any_error = Some(CalculateSyntheticSizeError::Fatal( anyhow::anyhow!(recv_result_error) .context("Receiving logical size query result"), )); } Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Err(error)))) => { if matches!(error, CalculateLogicalSizeError::Cancelled) { // Skip this: it's okay if one timeline among many is shutting down while we // calculate inputs for the overall tenant. continue; } else { warn!( timeline_id=%timeline.timeline_id, "failed to calculate logical size at {lsn}: {error:#}" ); have_any_error = Some(CalculateSyntheticSizeError::LogicalSize { timeline_id: timeline.timeline_id, lsn, error, }); } } Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Ok(size)))) => { debug!(timeline_id=%timeline.timeline_id, %lsn, size, "size calculated"); logical_size_cache.insert((timeline.timeline_id, lsn), size); sizes_needed.insert((timeline.timeline_id, lsn), Some(size)); } } } // prune any keys not needed anymore; we record every used key and added key. logical_size_cache.retain(|key, _| sizes_needed.contains_key(key)); if let Some(error) = have_any_error { // we cannot complete this round, because we are missing data. // we have however cached all we were able to request calculation on. return Err(error); } // Insert the looked up sizes to the Segments for seg in segments.iter_mut() { if !seg.size_needed() { continue; } let timeline_id = seg.timeline_id; let lsn = Lsn(seg.segment.lsn); if let Some(Some(size)) = sizes_needed.get(&(timeline_id, lsn)) { seg.segment.size = Some(*size); } } Ok(()) } impl ModelInputs { pub fn calculate_model(&self) -> tenant_size_model::StorageModel { // Convert SegmentMetas into plain Segments StorageModel { segments: self .segments .iter() .map(|seg| seg.segment.clone()) .collect(), } } // calculate total project size pub fn calculate(&self) -> u64 { let storage = self.calculate_model(); let sizes = storage.calculate(); sizes.total_size } } /// Newtype around the tuple that carries the timeline at lsn logical size calculation. struct TimelineAtLsnSizeResult( Arc<crate::tenant::Timeline>, utils::lsn::Lsn, Result<u64, CalculateLogicalSizeError>, ); #[instrument(skip_all, fields(timeline_id=%timeline.timeline_id, lsn=%lsn))] async fn calculate_logical_size( limit: Arc<tokio::sync::Semaphore>, timeline: Arc<crate::tenant::Timeline>, lsn: utils::lsn::Lsn, cause: LogicalSizeCalculationCause, ctx: RequestContext, ) -> Result<TimelineAtLsnSizeResult, RecvError> { let _permit = tokio::sync::Semaphore::acquire_owned(limit) .await .expect("global semaphore should not had been closed"); let size_res = timeline .spawn_ondemand_logical_size_calculation(lsn, cause, ctx) .instrument(info_span!("spawn_ondemand_logical_size_calculation")) .await?; Ok(TimelineAtLsnSizeResult(timeline, lsn, size_res)) } #[cfg(test)] #[test] fn verify_size_for_multiple_branches() { // this is generated from integration test test_tenant_size_with_multiple_branches, but this way // it has the stable lsn's // // The timeline_inputs don't participate in the size calculation, and are here just to explain // the inputs. let doc = r#" { "segments": [ { "segment": { "parent": 9, "lsn": 26033560, "size": null, "needed": false }, "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce", "kind": "BranchStart" }, { "segment": { "parent": 0, "lsn": 35720400, "size": 25206784, "needed": false }, "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce", "kind": "GcCutOff" }, { "segment": { "parent": 1, "lsn": 35851472, "size": null, "needed": true }, "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce", "kind": "BranchEnd" }, { "segment": { "parent": 7, "lsn": 24566168, "size": null, "needed": false }, "timeline_id": "454626700469f0a9914949b9d018e876", "kind": "BranchStart" }, { "segment": { "parent": 3, "lsn": 25261936, "size": 26050560, "needed": false }, "timeline_id": "454626700469f0a9914949b9d018e876", "kind": "GcCutOff" }, { "segment": { "parent": 4, "lsn": 25393008, "size": null, "needed": true }, "timeline_id": "454626700469f0a9914949b9d018e876", "kind": "BranchEnd" }, { "segment": { "parent": null, "lsn": 23694408, "size": null, "needed": false }, "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f", "kind": "BranchStart" }, { "segment": { "parent": 6, "lsn": 24566168, "size": 25739264, "needed": false }, "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f", "kind": "BranchPoint" }, { "segment": { "parent": 7, "lsn": 25902488, "size": 26402816, "needed": false }, "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f", "kind": "GcCutOff" }, { "segment": { "parent": 8, "lsn": 26033560, "size": 26468352, "needed": true }, "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f", "kind": "BranchPoint" }, { "segment": { "parent": 9, "lsn": 26033560, "size": null, "needed": true }, "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f", "kind": "BranchEnd" } ], "timeline_inputs": [ { "timeline_id": "20b129c9b50cff7213e6503a31b2a5ce", "ancestor_lsn": "0/18D3D98", "last_record": "0/2230CD0", "latest_gc_cutoff": "0/1698C48", "next_pitr_cutoff": "0/2210CD0", "retention_param_cutoff": null, "lease_points": [] }, { "timeline_id": "454626700469f0a9914949b9d018e876", "ancestor_lsn": "0/176D998", "last_record": "0/1837770", "latest_gc_cutoff": "0/1698C48", "next_pitr_cutoff": "0/1817770", "retention_param_cutoff": null, "lease_points": [] }, { "timeline_id": "cb5e3cbe60a4afc00d01880e1a37047f", "ancestor_lsn": "0/0", "last_record": "0/18D3D98", "latest_gc_cutoff": "0/1698C48", "next_pitr_cutoff": "0/18B3D98", "retention_param_cutoff": null, "lease_points": [] } ] } "#; let inputs: ModelInputs = serde_json::from_str(doc).unwrap(); assert_eq!(inputs.calculate(), 37_851_408); } #[cfg(test)] #[test] fn verify_size_for_one_branch() { let doc = r#" { "segments": [ { "segment": { "parent": null, "lsn": 0, "size": null, "needed": false }, "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd", "kind": "BranchStart" }, { "segment": { "parent": 0, "lsn": 305547335776, "size": 220054675456, "needed": false }, "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd", "kind": "GcCutOff" }, { "segment": { "parent": 1, "lsn": 305614444640, "size": null, "needed": true }, "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd", "kind": "BranchEnd" } ], "timeline_inputs": [ { "timeline_id": "f15ae0cf21cce2ba27e4d80c6709a6cd", "ancestor_lsn": "0/0", "last_record": "47/280A5860", "latest_gc_cutoff": "47/240A5860", "next_pitr_cutoff": "47/240A5860", "retention_param_cutoff": "0/0", "lease_points": [] } ] }"#; let model: ModelInputs = serde_json::from_str(doc).unwrap(); let res = model.calculate_model().calculate(); println!("calculated synthetic size: {}", res.total_size); println!("result: {:?}", serde_json::to_string(&res.segments)); use utils::lsn::Lsn; let latest_gc_cutoff_lsn: Lsn = "47/240A5860".parse().unwrap(); let last_lsn: Lsn = "47/280A5860".parse().unwrap(); println!( "latest_gc_cutoff lsn 47/240A5860 is {}, last_lsn lsn 47/280A5860 is {}", u64::from(latest_gc_cutoff_lsn), u64::from(last_lsn) ); assert_eq!(res.total_size, 220121784320); }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/secondary/heatmap_uploader.rs
pageserver/src/tenant/secondary/heatmap_uploader.rs
use std::collections::HashMap; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; use futures::Future; use pageserver_api::shard::TenantShardId; use remote_storage::{GenericRemoteStorage, TimeoutOrCancel}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, info_span, instrument}; use utils::backoff; use utils::completion::Barrier; use utils::crashsafe::path_with_suffix_extension; use utils::yielding_loop::yielding_loop; use super::heatmap::HeatMapTenant; use super::scheduler::{ self, JobGenerator, RunningJob, SchedulingResult, TenantBackgroundJobs, period_jitter, period_warmup, }; use super::{CommandRequest, SecondaryTenantError, UploadCommand}; use crate::TEMP_FILE_SUFFIX; use crate::metrics::SECONDARY_MODE; use crate::tenant::TenantShard; use crate::tenant::config::AttachmentMode; use crate::tenant::mgr::{GetTenantError, TenantManager}; use crate::tenant::remote_timeline_client::remote_heatmap_path; use crate::tenant::span::debug_assert_current_span_has_tenant_id; use crate::tenant::tasks::{BackgroundLoopKind, warn_when_period_overrun}; use crate::virtual_file::VirtualFile; pub(super) async fn heatmap_uploader_task( tenant_manager: Arc<TenantManager>, remote_storage: GenericRemoteStorage, command_queue: tokio::sync::mpsc::Receiver<CommandRequest<UploadCommand>>, background_jobs_can_start: Barrier, cancel: CancellationToken, ) { let concurrency = tenant_manager.get_conf().heatmap_upload_concurrency; let generator = HeatmapUploader { tenant_manager, remote_storage, cancel: cancel.clone(), tenants: HashMap::new(), }; let mut scheduler = Scheduler::new(generator, concurrency); scheduler .run(command_queue, background_jobs_can_start, cancel) .instrument(info_span!("heatmap_upload_scheduler")) .await } /// This type is owned by a single task ([`heatmap_uploader_task`]) which runs an event /// handling loop and mutates it as needed: there are no locks here, because that event loop /// can hold &mut references to this type throughout. struct HeatmapUploader { tenant_manager: Arc<TenantManager>, remote_storage: GenericRemoteStorage, cancel: CancellationToken, tenants: HashMap<TenantShardId, UploaderTenantState>, } struct WriteInProgress { barrier: Barrier, } impl RunningJob for WriteInProgress { fn get_barrier(&self) -> Barrier { self.barrier.clone() } } struct UploadPending { tenant: Arc<TenantShard>, last_upload: Option<LastUploadState>, target_time: Option<Instant>, period: Option<Duration>, } impl scheduler::PendingJob for UploadPending { fn get_tenant_shard_id(&self) -> &TenantShardId { self.tenant.get_tenant_shard_id() } } struct WriteComplete { tenant_shard_id: TenantShardId, completed_at: Instant, uploaded: Option<LastUploadState>, next_upload: Option<Instant>, } impl scheduler::Completion for WriteComplete { fn get_tenant_shard_id(&self) -> &TenantShardId { &self.tenant_shard_id } } /// The heatmap uploader keeps a little bit of per-tenant state, mainly to remember /// when we last did a write. We only populate this after doing at least one /// write for a tenant -- this avoids holding state for tenants that have /// uploads disabled. struct UploaderTenantState { // This Weak only exists to enable culling idle instances of this type // when the Tenant has been deallocated. tenant: Weak<TenantShard>, /// Digest of the serialized heatmap that we last successfully uploaded last_upload_state: Option<LastUploadState>, /// When the last upload attempt completed (may have been successful or failed) last_upload: Option<Instant>, /// When should we next do an upload? None means never. next_upload: Option<Instant>, } type Scheduler = TenantBackgroundJobs< HeatmapUploader, UploadPending, WriteInProgress, WriteComplete, UploadCommand, >; impl JobGenerator<UploadPending, WriteInProgress, WriteComplete, UploadCommand> for HeatmapUploader { async fn schedule(&mut self) -> SchedulingResult<UploadPending> { // Cull any entries in self.tenants whose Arc<Tenant> is gone self.tenants .retain(|_k, v| v.tenant.upgrade().is_some() && v.next_upload.is_some()); let now = Instant::now(); let mut result = SchedulingResult { jobs: Vec::new(), want_interval: None, }; let tenants = self.tenant_manager.get_attached_active_tenant_shards(); yielding_loop(1000, &self.cancel, tenants.into_iter(), |tenant| { let period = match tenant.get_heatmap_period() { None => { // Heatmaps are disabled for this tenant return; } Some(period) => { // If any tenant has asked for uploads more frequent than our scheduling interval, // reduce it to match so that we can keep up. This is mainly useful in testing, where // we may set rather short intervals. result.want_interval = match result.want_interval { None => Some(period), Some(existing) => Some(std::cmp::min(period, existing)), }; period } }; // Stale attachments do not upload anything: if we are in this state, there is probably some // other attachment in mode Single or Multi running on another pageserver, and we don't // want to thrash and overwrite their heatmap uploads. if tenant.get_attach_mode() == AttachmentMode::Stale { return; } // Create an entry in self.tenants if one doesn't already exist: this will later be updated // with the completion time in on_completion. let state = self .tenants .entry(*tenant.get_tenant_shard_id()) .or_insert_with(|| UploaderTenantState { tenant: Arc::downgrade(&tenant), last_upload: None, next_upload: Some(now.checked_add(period_warmup(period)).unwrap_or(now)), last_upload_state: None, }); // Decline to do the upload if insufficient time has passed if state.next_upload.map(|nu| nu > now).unwrap_or(false) { return; } let last_upload = state.last_upload_state.clone(); result.jobs.push(UploadPending { tenant, last_upload, target_time: state.next_upload, period: Some(period), }); }) .await .ok(); result } fn spawn( &mut self, job: UploadPending, ) -> ( WriteInProgress, Pin<Box<dyn Future<Output = WriteComplete> + Send>>, ) { let UploadPending { tenant, last_upload, target_time, period, } = job; let remote_storage = self.remote_storage.clone(); let (completion, barrier) = utils::completion::channel(); let tenant_shard_id = *tenant.get_tenant_shard_id(); (WriteInProgress { barrier }, Box::pin(async move { // Guard for the barrier in [`WriteInProgress`] let _completion = completion; let started_at = Instant::now(); let uploaded = match upload_tenant_heatmap(remote_storage, &tenant, last_upload.clone()).await { Ok(UploadHeatmapOutcome::Uploaded(uploaded)) => { let duration = Instant::now().duration_since(started_at); SECONDARY_MODE .upload_heatmap_duration .observe(duration.as_secs_f64()); SECONDARY_MODE.upload_heatmap.inc(); Some(uploaded) } Ok(UploadHeatmapOutcome::NoChange | UploadHeatmapOutcome::Skipped) => last_upload, Err(UploadHeatmapError::Upload(e)) => { tracing::warn!( "Failed to upload heatmap for tenant {}: {e:#}", tenant.get_tenant_shard_id(), ); let duration = Instant::now().duration_since(started_at); SECONDARY_MODE .upload_heatmap_duration .observe(duration.as_secs_f64()); SECONDARY_MODE.upload_heatmap_errors.inc(); last_upload } Err(UploadHeatmapError::Cancelled) => { tracing::info!("Cancelled heatmap upload, shutting down"); last_upload } }; let now = Instant::now(); // If the job had a target execution time, we may check our final execution // time against that for observability purposes. if let (Some(target_time), Some(period)) = (target_time, period) { // Elapsed time includes any scheduling lag as well as the execution of the job let elapsed = now.duration_since(target_time); warn_when_period_overrun(elapsed, period, BackgroundLoopKind::HeatmapUpload); } let next_upload = tenant .get_heatmap_period() .and_then(|period| now.checked_add(period_jitter(period, 5))); WriteComplete { tenant_shard_id: *tenant.get_tenant_shard_id(), completed_at: now, uploaded, next_upload, } }.instrument(info_span!(parent: None, "heatmap_upload", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())))) } fn on_command( &mut self, command: UploadCommand, ) -> Result<UploadPending, SecondaryTenantError> { let tenant_shard_id = command.get_tenant_shard_id(); tracing::info!( tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Starting heatmap write on command"); let tenant = self .tenant_manager .get_attached_tenant_shard(*tenant_shard_id)?; if !tenant.is_active() { return Err(GetTenantError::NotActive(*tenant_shard_id).into()); } Ok(UploadPending { // Ignore our state for last digest: this forces an upload even if nothing has changed last_upload: None, tenant, target_time: None, period: None, }) } #[instrument(skip_all, fields(tenant_id=%completion.tenant_shard_id.tenant_id, shard_id=%completion.tenant_shard_id.shard_slug()))] fn on_completion(&mut self, completion: WriteComplete) { tracing::debug!("Heatmap upload completed"); let WriteComplete { tenant_shard_id, completed_at, uploaded, next_upload, } = completion; use std::collections::hash_map::Entry; match self.tenants.entry(tenant_shard_id) { Entry::Vacant(_) => { // Tenant state was dropped, nothing to update. } Entry::Occupied(mut entry) => { entry.get_mut().last_upload = Some(completed_at); entry.get_mut().last_upload_state = uploaded; entry.get_mut().next_upload = next_upload } } } } enum UploadHeatmapOutcome { /// We successfully wrote to remote storage, with this digest. Uploaded(LastUploadState), /// We did not upload because the heatmap digest was unchanged since the last upload NoChange, /// We skipped the upload for some reason, such as tenant/timeline not ready Skipped, } #[derive(thiserror::Error, Debug)] enum UploadHeatmapError { #[error("Cancelled")] Cancelled, #[error(transparent)] Upload(#[from] anyhow::Error), } /// Digests describing the heatmap we most recently uploaded successfully. /// /// md5 is generally a bad hash. We use it because it's convenient for interop with AWS S3's ETag, /// which is also an md5sum. #[derive(Clone)] struct LastUploadState { // Digest of json-encoded HeatMapTenant uploaded_digest: md5::Digest, // Digest without atimes set. layers_only_digest: md5::Digest, } /// The inner upload operation. This will skip if `last_digest` is Some and matches the digest /// of the object we would have uploaded. async fn upload_tenant_heatmap( remote_storage: GenericRemoteStorage, tenant: &Arc<TenantShard>, last_upload: Option<LastUploadState>, ) -> Result<UploadHeatmapOutcome, UploadHeatmapError> { debug_assert_current_span_has_tenant_id(); let generation = tenant.get_generation(); debug_assert!(!generation.is_none()); if generation.is_none() { // We do not expect this: None generations should only appear in historic layer metadata, not in running Tenants tracing::warn!("Skipping heatmap upload for tenant with generation==None"); return Ok(UploadHeatmapOutcome::Skipped); } let mut heatmap = HeatMapTenant { timelines: Vec::new(), generation, upload_period_ms: tenant.get_heatmap_period().map(|p| p.as_millis()), }; let timelines = tenant.timelines.lock().unwrap().clone(); // Ensure that Tenant::shutdown waits for any upload in flight: this is needed because otherwise // when we delete a tenant, we might race with an upload in flight and end up leaving a heatmap behind // in remote storage. let Ok(_guard) = tenant.gate.enter() else { tracing::info!("Skipping heatmap upload for tenant which is shutting down"); return Err(UploadHeatmapError::Cancelled); }; for (timeline_id, timeline) in timelines { let heatmap_timeline = timeline.generate_heatmap().await; match heatmap_timeline { None => { tracing::debug!( "Skipping heatmap upload because timeline {timeline_id} is not ready" ); return Ok(UploadHeatmapOutcome::Skipped); } Some(heatmap_timeline) => { heatmap.timelines.push(heatmap_timeline); } } } // Serialize the heatmap let bytes = serde_json::to_vec(&heatmap).map_err(|e| anyhow::anyhow!(e))?; // Drop out early if nothing changed since our last upload let digest = md5::compute(&bytes); if Some(&digest) == last_upload.as_ref().map(|d| &d.uploaded_digest) { return Ok(UploadHeatmapOutcome::NoChange); } // Calculate a digest that omits atimes, so that we can distinguish actual changes in // layers from changes only in atimes. let heatmap_size_bytes = heatmap.get_stats().bytes; let layers_only_bytes = serde_json::to_vec(&heatmap.strip_atimes()).map_err(|e| anyhow::anyhow!(e))?; let layers_only_digest = md5::compute(&layers_only_bytes); if heatmap_size_bytes < tenant.get_checkpoint_distance() { // For small tenants, skip upload if only atimes changed. This avoids doing frequent // uploads from long-idle tenants whose atimes are just incremented by periodic // size calculations. if Some(&layers_only_digest) == last_upload.as_ref().map(|d| &d.layers_only_digest) { return Ok(UploadHeatmapOutcome::NoChange); } } let bytes = bytes::Bytes::from(bytes); let size = bytes.len(); let path = remote_heatmap_path(tenant.get_tenant_shard_id()); let cancel = &tenant.cancel; tracing::debug!("Uploading {size} byte heatmap to {path}"); if let Err(e) = backoff::retry( || async { let bytes = futures::stream::once(futures::future::ready(Ok(bytes.clone()))); remote_storage .upload_storage_object(bytes, size, &path, cancel) .await }, TimeoutOrCancel::caused_by_cancel, 3, u32::MAX, "Uploading heatmap", cancel, ) .await .ok_or_else(|| anyhow::anyhow!("Shutting down")) .and_then(|x| x) { if cancel.is_cancelled() { return Err(UploadHeatmapError::Cancelled); } else { return Err(e.into()); } } // After a successful upload persist the fresh heatmap to disk. // When restarting, the tenant will read the heatmap from disk // and additively generate a new heatmap (see [`Timeline::generate_heatmap`]). // If the heatmap is stale, the additive generation can lead to keeping previously // evicted timelines on the secondarie's disk. let tenant_shard_id = tenant.get_tenant_shard_id(); let heatmap_path = tenant.conf.tenant_heatmap_path(tenant_shard_id); let temp_path = path_with_suffix_extension(&heatmap_path, TEMP_FILE_SUFFIX); if let Err(err) = VirtualFile::crashsafe_overwrite(heatmap_path, temp_path, bytes).await { tracing::warn!("Non fatal IO error writing to disk after heatmap upload: {err}"); } tracing::info!("Successfully uploaded {size} byte heatmap to {path}"); Ok(UploadHeatmapOutcome::Uploaded(LastUploadState { uploaded_digest: digest, layers_only_digest, })) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/secondary/downloader.rs
pageserver/src/tenant/secondary/downloader.rs
use std::collections::{HashMap, HashSet}; use std::pin::Pin; use std::str::FromStr; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime}; use crate::metrics::{STORAGE_IO_SIZE, StorageIoSizeOperation}; use camino::Utf8PathBuf; use chrono::format::{DelayedFormat, StrftimeItems}; use futures::Future; use metrics::UIntGauge; use pageserver_api::models::SecondaryProgress; use pageserver_api::shard::TenantShardId; use remote_storage::{DownloadError, DownloadKind, DownloadOpts, Etag, GenericRemoteStorage}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, info_span, instrument, warn}; use utils::completion::Barrier; use utils::crashsafe::path_with_suffix_extension; use utils::id::TimelineId; use utils::{backoff, failpoint_support, fs_ext, pausable_failpoint, serde_system_time}; use super::heatmap::{HeatMapLayer, HeatMapTenant, HeatMapTimeline}; use super::scheduler::{ self, Completion, JobGenerator, SchedulingResult, TenantBackgroundJobs, period_jitter, period_warmup, }; use super::{ CommandRequest, DownloadCommand, GetTenantError, SecondaryTenant, SecondaryTenantError, }; use crate::TEMP_FILE_SUFFIX; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::disk_usage_eviction_task::{ DiskUsageEvictionInfo, EvictionCandidate, EvictionLayer, EvictionSecondaryLayer, finite_f32, }; use crate::metrics::SECONDARY_MODE; use crate::tenant::config::SecondaryLocationConfig; use crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::ephemeral_file::is_ephemeral_file; use crate::tenant::mgr::TenantManager; use crate::tenant::remote_timeline_client::download::download_layer_file; use crate::tenant::remote_timeline_client::index::LayerFileMetadata; use crate::tenant::remote_timeline_client::{ FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, is_temp_download_file, remote_heatmap_path, }; use crate::tenant::span::debug_assert_current_span_has_tenant_id; use crate::tenant::storage_layer::layer::local_layer_path; use crate::tenant::storage_layer::{LayerName, LayerVisibilityHint}; use crate::tenant::tasks::{BackgroundLoopKind, warn_when_period_overrun}; use crate::virtual_file::{MaybeFatalIo, VirtualFile, on_fatal_io_error}; /// For each tenant, default period for how long must have passed since the last download_tenant call before /// calling it again. This default is replaced with the value of [`HeatMapTenant::upload_period_ms`] after first /// download, if the uploader populated it. const DEFAULT_DOWNLOAD_INTERVAL: Duration = Duration::from_millis(60000); pub(super) async fn downloader_task( tenant_manager: Arc<TenantManager>, remote_storage: GenericRemoteStorage, command_queue: tokio::sync::mpsc::Receiver<CommandRequest<DownloadCommand>>, background_jobs_can_start: Barrier, cancel: CancellationToken, root_ctx: RequestContext, ) { let concurrency = tenant_manager.get_conf().secondary_download_concurrency; let generator = SecondaryDownloader { tenant_manager, remote_storage, root_ctx, }; let mut scheduler = Scheduler::new(generator, concurrency); scheduler .run(command_queue, background_jobs_can_start, cancel) .instrument(info_span!("secondary_download_scheduler")) .await } struct SecondaryDownloader { tenant_manager: Arc<TenantManager>, remote_storage: GenericRemoteStorage, root_ctx: RequestContext, } #[derive(Debug, Clone)] pub(super) struct OnDiskState { metadata: LayerFileMetadata, access_time: SystemTime, local_path: Utf8PathBuf, } impl OnDiskState { fn new( _conf: &'static PageServerConf, _tenant_shard_id: &TenantShardId, _imeline_id: &TimelineId, _ame: LayerName, metadata: LayerFileMetadata, access_time: SystemTime, local_path: Utf8PathBuf, ) -> Self { Self { metadata, access_time, local_path, } } // This is infallible, because all errors are either acceptable (ENOENT), or totally // unexpected (fatal). pub(super) fn remove_blocking(&self) { // We tolerate ENOENT, because between planning eviction and executing // it, the secondary downloader could have seen an updated heatmap that // resulted in a layer being deleted. // Other local I/O errors are process-fatal: these should never happen. std::fs::remove_file(&self.local_path) .or_else(fs_ext::ignore_not_found) .fatal_err("Deleting secondary layer") } pub(crate) fn file_size(&self) -> u64 { self.metadata.file_size } } pub(super) struct SecondaryDetailTimeline { on_disk_layers: HashMap<LayerName, OnDiskState>, /// We remember when layers were evicted, to prevent re-downloading them. pub(super) evicted_at: HashMap<LayerName, SystemTime>, ctx: RequestContext, } impl Clone for SecondaryDetailTimeline { fn clone(&self) -> Self { Self { on_disk_layers: self.on_disk_layers.clone(), evicted_at: self.evicted_at.clone(), // This is a bit awkward. The downloader code operates on a snapshot // of the secondary list to avoid locking it for extended periods of time. // No particularly strong reason to chose [`RequestContext::detached_child`], // but makes more sense than [`RequestContext::attached_child`]. ctx: self .ctx .detached_child(self.ctx.task_kind(), self.ctx.download_behavior()), } } } impl std::fmt::Debug for SecondaryDetailTimeline { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("SecondaryDetailTimeline") .field("on_disk_layers", &self.on_disk_layers) .field("evicted_at", &self.evicted_at) .finish() } } impl SecondaryDetailTimeline { pub(super) fn empty(ctx: RequestContext) -> Self { SecondaryDetailTimeline { on_disk_layers: Default::default(), evicted_at: Default::default(), ctx, } } pub(super) fn context(&self) -> &RequestContext { &self.ctx } pub(super) fn remove_layer( &mut self, name: &LayerName, resident_metric: &UIntGauge, ) -> Option<OnDiskState> { let removed = self.on_disk_layers.remove(name); if let Some(removed) = &removed { resident_metric.sub(removed.file_size()); } removed } /// `local_path` fn touch_layer<F>( &mut self, conf: &'static PageServerConf, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, touched: &HeatMapLayer, resident_metric: &UIntGauge, local_path: F, ) where F: FnOnce() -> Utf8PathBuf, { use std::collections::hash_map::Entry; match self.on_disk_layers.entry(touched.name.clone()) { Entry::Occupied(mut v) => { v.get_mut().access_time = touched.access_time; } Entry::Vacant(e) => { e.insert(OnDiskState::new( conf, tenant_shard_id, timeline_id, touched.name.clone(), touched.metadata.clone(), touched.access_time, local_path(), )); resident_metric.add(touched.metadata.file_size); } } } } // Aspects of a heatmap that we remember after downloading it #[derive(Clone, Debug)] struct DownloadSummary { etag: Etag, #[allow(unused)] mtime: SystemTime, upload_period: Duration, } /// This state is written by the secondary downloader, it is opaque /// to TenantManager #[derive(Debug)] pub(super) struct SecondaryDetail { pub(super) config: SecondaryLocationConfig, last_download: Option<DownloadSummary>, next_download: Option<Instant>, timelines: HashMap<TimelineId, SecondaryDetailTimeline>, } /// Helper for logging SystemTime fn strftime(t: &'_ SystemTime) -> DelayedFormat<StrftimeItems<'_>> { let datetime: chrono::DateTime<chrono::Utc> = (*t).into(); datetime.format("%d/%m/%Y %T") } /// Information returned from download function when it detects the heatmap has changed struct HeatMapModified { etag: Etag, last_modified: SystemTime, bytes: Vec<u8>, } enum HeatMapDownload { // The heatmap's etag has changed: return the new etag, mtime and the body bytes Modified(HeatMapModified), // The heatmap's etag is unchanged Unmodified, } impl SecondaryDetail { pub(super) fn new(config: SecondaryLocationConfig) -> Self { Self { config, last_download: None, next_download: None, timelines: HashMap::new(), } } #[cfg(feature = "testing")] pub(crate) fn total_resident_size(&self) -> u64 { self.timelines .values() .map(|tl| { tl.on_disk_layers .values() .map(|v| v.metadata.file_size) .sum::<u64>() }) .sum::<u64>() } pub(super) fn evict_layer( &mut self, name: LayerName, timeline_id: &TimelineId, now: SystemTime, resident_metric: &UIntGauge, ) -> Option<OnDiskState> { let timeline = self.timelines.get_mut(timeline_id)?; let removed = timeline.remove_layer(&name, resident_metric); if removed.is_some() { timeline.evicted_at.insert(name, now); } removed } pub(super) fn remove_timeline( &mut self, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, resident_metric: &UIntGauge, ) { let removed = self.timelines.remove(timeline_id); if let Some(removed) = removed { Self::clear_timeline_metrics(tenant_shard_id, timeline_id, removed, resident_metric); } } pub(super) fn drain_timelines( &mut self, tenant_shard_id: &TenantShardId, resident_metric: &UIntGauge, ) { for (timeline_id, removed) in self.timelines.drain() { Self::clear_timeline_metrics(tenant_shard_id, &timeline_id, removed, resident_metric); } } fn clear_timeline_metrics( tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, detail: SecondaryDetailTimeline, resident_metric: &UIntGauge, ) { resident_metric.sub( detail .on_disk_layers .values() .map(|l| l.metadata.file_size) .sum(), ); let shard_id = format!("{}", tenant_shard_id.shard_slug()); let tenant_id = tenant_shard_id.tenant_id.to_string(); let timeline_id = timeline_id.to_string(); for op in StorageIoSizeOperation::VARIANTS { let _ = STORAGE_IO_SIZE.remove_label_values(&[ op, tenant_id.as_str(), shard_id.as_str(), timeline_id.as_str(), ]); } } /// Additionally returns the total number of layers, used for more stable relative access time /// based eviction. pub(super) fn get_layers_for_eviction( &self, parent: &Arc<SecondaryTenant>, ) -> (DiskUsageEvictionInfo, usize) { let mut result = DiskUsageEvictionInfo::default(); let mut total_layers = 0; for (timeline_id, timeline_detail) in &self.timelines { result .resident_layers .extend(timeline_detail.on_disk_layers.iter().map(|(name, ods)| { EvictionCandidate { layer: EvictionLayer::Secondary(EvictionSecondaryLayer { secondary_tenant: parent.clone(), timeline_id: *timeline_id, name: name.clone(), metadata: ods.metadata.clone(), }), last_activity_ts: ods.access_time, relative_last_activity: finite_f32::FiniteF32::ZERO, // Secondary location layers are presumed visible, because Covered layers // are excluded from the heatmap visibility: LayerVisibilityHint::Visible, } })); // total might be missing currently downloading layers, but as a lower than actual // value it is good enough approximation. total_layers += timeline_detail.on_disk_layers.len() + timeline_detail.evicted_at.len(); } result.max_layer_size = result .resident_layers .iter() .map(|l| l.layer.get_file_size()) .max(); tracing::debug!( "eviction: secondary tenant {} found {} timelines, {} layers", parent.get_tenant_shard_id(), self.timelines.len(), result.resident_layers.len() ); (result, total_layers) } } struct PendingDownload { secondary_state: Arc<SecondaryTenant>, last_download: Option<DownloadSummary>, target_time: Option<Instant>, } impl scheduler::PendingJob for PendingDownload { fn get_tenant_shard_id(&self) -> &TenantShardId { self.secondary_state.get_tenant_shard_id() } } struct RunningDownload { barrier: Barrier, } impl scheduler::RunningJob for RunningDownload { fn get_barrier(&self) -> Barrier { self.barrier.clone() } } struct CompleteDownload { secondary_state: Arc<SecondaryTenant>, completed_at: Instant, result: Result<(), UpdateError>, } impl scheduler::Completion for CompleteDownload { fn get_tenant_shard_id(&self) -> &TenantShardId { self.secondary_state.get_tenant_shard_id() } } type Scheduler = TenantBackgroundJobs< SecondaryDownloader, PendingDownload, RunningDownload, CompleteDownload, DownloadCommand, >; impl JobGenerator<PendingDownload, RunningDownload, CompleteDownload, DownloadCommand> for SecondaryDownloader { #[instrument(skip_all, fields(tenant_id=%completion.get_tenant_shard_id().tenant_id, shard_id=%completion.get_tenant_shard_id().shard_slug()))] fn on_completion(&mut self, completion: CompleteDownload) { let CompleteDownload { secondary_state, completed_at: _completed_at, result, } = completion; tracing::debug!("Secondary tenant download completed"); let mut detail = secondary_state.detail.lock().unwrap(); match result { Err(UpdateError::Restart) => { // Start downloading again as soon as we can. This will involve waiting for the scheduler's // scheduling interval. This slightly reduces the peak download speed of tenants that hit their // deadline and keep restarting, but that also helps give other tenants a chance to execute rather // that letting one big tenant dominate for a long time. detail.next_download = Some(Instant::now()); } _ => { let period = detail .last_download .as_ref() .map(|d| d.upload_period) .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL); // We advance next_download irrespective of errors: we don't want error cases to result in // expensive busy-polling. detail.next_download = Some(Instant::now() + period_jitter(period, 5)); } } } async fn schedule(&mut self) -> SchedulingResult<PendingDownload> { let mut result = SchedulingResult { jobs: Vec::new(), want_interval: None, }; // Step 1: identify some tenants that we may work on let mut tenants: Vec<Arc<SecondaryTenant>> = Vec::new(); self.tenant_manager .foreach_secondary_tenants(|_id, secondary_state| { tenants.push(secondary_state.clone()); }); // Step 2: filter out tenants which are not yet elegible to run let now = Instant::now(); result.jobs = tenants .into_iter() .filter_map(|secondary_tenant| { let (last_download, next_download) = { let mut detail = secondary_tenant.detail.lock().unwrap(); if !detail.config.warm { // Downloads are disabled for this tenant detail.next_download = None; return None; } if detail.next_download.is_none() { // Initialize randomly in the range from 0 to our interval: this uniformly spreads the start times. Subsequent // rounds will use a smaller jitter to avoid accidentally synchronizing later. detail.next_download = Some(now.checked_add(period_warmup(DEFAULT_DOWNLOAD_INTERVAL)).expect( "Using our constant, which is known to be small compared with clock range", )); } (detail.last_download.clone(), detail.next_download.unwrap()) }; if now > next_download { Some(PendingDownload { secondary_state: secondary_tenant, last_download, target_time: Some(next_download), }) } else { None } }) .collect(); // Step 3: sort by target execution time to run most urgent first. result.jobs.sort_by_key(|j| j.target_time); result } fn on_command( &mut self, command: DownloadCommand, ) -> Result<PendingDownload, SecondaryTenantError> { let tenant_shard_id = command.get_tenant_shard_id(); let tenant = self .tenant_manager .get_secondary_tenant_shard(*tenant_shard_id) .ok_or(GetTenantError::ShardNotFound(*tenant_shard_id))?; Ok(PendingDownload { target_time: None, last_download: None, secondary_state: tenant, }) } fn spawn( &mut self, job: PendingDownload, ) -> ( RunningDownload, Pin<Box<dyn Future<Output = CompleteDownload> + Send>>, ) { let PendingDownload { secondary_state, last_download, target_time, } = job; let (completion, barrier) = utils::completion::channel(); let remote_storage = self.remote_storage.clone(); let conf = self.tenant_manager.get_conf(); let tenant_shard_id = *secondary_state.get_tenant_shard_id(); let download_ctx = self .root_ctx .attached_child() .with_scope_secondary_tenant(&tenant_shard_id); (RunningDownload { barrier }, Box::pin(async move { let _completion = completion; let result = TenantDownloader::new(conf, &remote_storage, &secondary_state) .download(&download_ctx) .await; match &result { Err(UpdateError::NoData) => { tracing::info!("No heatmap found for tenant. This is fine if it is new."); }, Err(UpdateError::NoSpace) => { tracing::warn!("Insufficient space while downloading. Will retry later."); } Err(UpdateError::Cancelled) => { tracing::info!("Shut down while downloading"); }, Err(UpdateError::Deserialize(e)) => { tracing::error!("Corrupt content while downloading tenant: {e}"); }, Err(e @ (UpdateError::DownloadError(_) | UpdateError::Other(_))) => { tracing::error!("Error while downloading tenant: {e}"); }, Err(UpdateError::Restart) => { tracing::info!("Download reached deadline & will restart to update heatmap") } Ok(()) => {} }; // Irrespective of the result, we will reschedule ourselves to run after our usual period. // If the job had a target execution time, we may check our final execution // time against that for observability purposes. if let (Some(target_time), Some(last_download)) = (target_time, last_download) { // Elapsed time includes any scheduling lag as well as the execution of the job let elapsed = Instant::now().duration_since(target_time); warn_when_period_overrun( elapsed, last_download.upload_period, BackgroundLoopKind::SecondaryDownload, ); } CompleteDownload { secondary_state, completed_at: Instant::now(), result } }.instrument(info_span!(parent: None, "secondary_download", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())))) } } enum LayerAction { Download, NoAction, Skip, Touch, } /// This type is a convenience to group together the various functions involved in /// freshening a secondary tenant. struct TenantDownloader<'a> { conf: &'static PageServerConf, remote_storage: &'a GenericRemoteStorage, secondary_state: &'a SecondaryTenant, } /// Errors that may be encountered while updating a tenant #[derive(thiserror::Error, Debug)] enum UpdateError { /// This is not a true failure, but it's how a download indicates that it would like to be restarted by /// the scheduler, to pick up the latest heatmap #[error("Reached deadline, restarting downloads")] Restart, #[error("No remote data found")] NoData, #[error("Insufficient local storage space")] NoSpace, #[error("Failed to download: {0}")] DownloadError(DownloadError), #[error(transparent)] Deserialize(#[from] serde_json::Error), #[error("Cancelled")] Cancelled, #[error(transparent)] Other(#[from] anyhow::Error), } impl From<DownloadError> for UpdateError { fn from(value: DownloadError) -> Self { match &value { DownloadError::Cancelled => Self::Cancelled, DownloadError::NotFound => Self::NoData, _ => Self::DownloadError(value), } } } impl From<std::io::Error> for UpdateError { fn from(value: std::io::Error) -> Self { if let Some(nix::errno::Errno::ENOSPC) = value.raw_os_error().map(nix::errno::Errno::from_raw) { UpdateError::NoSpace } else if value .get_ref() .and_then(|x| x.downcast_ref::<DownloadError>()) .is_some() { UpdateError::from(DownloadError::from(value)) } else { // An I/O error from e.g. tokio::io::copy_buf is most likely a remote storage issue UpdateError::Other(anyhow::anyhow!(value)) } } } impl<'a> TenantDownloader<'a> { fn new( conf: &'static PageServerConf, remote_storage: &'a GenericRemoteStorage, secondary_state: &'a SecondaryTenant, ) -> Self { Self { conf, remote_storage, secondary_state, } } async fn download(&self, ctx: &RequestContext) -> Result<(), UpdateError> { debug_assert_current_span_has_tenant_id(); // For the duration of a download, we must hold the SecondaryTenant::gate, to ensure // cover our access to local storage. let Ok(_guard) = self.secondary_state.gate.enter() else { // Shutting down return Err(UpdateError::Cancelled); }; let tenant_shard_id = self.secondary_state.get_tenant_shard_id(); // We will use the etag from last successful download to make the download conditional on changes let last_download = self .secondary_state .detail .lock() .unwrap() .last_download .clone(); // Download the tenant's heatmap let HeatMapModified { last_modified: heatmap_mtime, etag: heatmap_etag, bytes: heatmap_bytes, } = match tokio::select!( bytes = self.download_heatmap(last_download.as_ref().map(|d| &d.etag)) => {bytes?}, _ = self.secondary_state.cancel.cancelled() => return Ok(()) ) { HeatMapDownload::Unmodified => { tracing::info!("Heatmap unchanged since last successful download"); return Ok(()); } HeatMapDownload::Modified(m) => m, }; // Heatmap storage location let heatmap_path = self.conf.tenant_heatmap_path(tenant_shard_id); let last_heatmap = if last_download.is_none() { match load_heatmap(&heatmap_path, ctx).await { Ok(htm) => htm, Err(e) => { tracing::warn!("Couldn't load heatmap from {heatmap_path}: {e:?}"); None } } } else { None }; let last_heatmap_timelines = last_heatmap.as_ref().map(|htm| { htm.timelines .iter() .map(|tl| (tl.timeline_id, tl)) .collect::<HashMap<_, _>>() }); let heatmap = serde_json::from_slice::<HeatMapTenant>(&heatmap_bytes)?; let temp_path = path_with_suffix_extension(&heatmap_path, TEMP_FILE_SUFFIX); let context_msg = format!("write tenant {tenant_shard_id} heatmap to {heatmap_path}"); let heatmap_path_bg = heatmap_path.clone(); VirtualFile::crashsafe_overwrite(heatmap_path_bg, temp_path, heatmap_bytes) .await .maybe_fatal_err(&context_msg)?; tracing::debug!( "Wrote local heatmap to {}, with {} timelines", heatmap_path, heatmap.timelines.len() ); // Get or initialize the local disk state for the timelines we will update let mut timeline_states = HashMap::new(); for timeline in &heatmap.timelines { let timeline_state = self .secondary_state .detail .lock() .unwrap() .timelines .get(&timeline.timeline_id) .cloned(); let timeline_state = match timeline_state { Some(t) => t, None => { let last_heatmap = last_heatmap_timelines .as_ref() .and_then(|last_heatmap_timelines| { last_heatmap_timelines.get(&timeline.timeline_id).copied() }); // We have no existing state: need to scan local disk for layers first. let timeline_state = init_timeline_state( self.conf, tenant_shard_id, last_heatmap, timeline, &self.secondary_state.resident_size_metric, ctx, ) .await; // Re-acquire detail lock now that we're done with async load from local FS self.secondary_state .detail .lock() .unwrap() .timelines .insert(timeline.timeline_id, timeline_state.clone()); timeline_state } }; timeline_states.insert(timeline.timeline_id, timeline_state); } // Clean up any local layers that aren't in the heatmap. We do this first for all timelines, on the general // principle that deletions should be done before writes wherever possible, and so that we can use this // phase to initialize our SecondaryProgress. { *self.secondary_state.progress.lock().unwrap() = self.prepare_timelines(&heatmap, heatmap_mtime).await?; } // Calculate a deadline for downloads: if downloading takes longer than this, it is useful to drop out and start again, // so that we are always using reasonably a fresh heatmap. Otherwise, if we had really huge content to download, we might // spend 10s of minutes downloading layers we don't need. // (see https://github.com/neondatabase/neon/issues/8182) let deadline = { let period = self .secondary_state .detail .lock() .unwrap() .last_download .as_ref() .map(|d| d.upload_period) .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL); // Use double the period: we are not promising to complete within the period, this is just a heuristic // to keep using a "reasonably fresh" heatmap. Instant::now() + period * 2 }; // Download the layers in the heatmap for timeline in heatmap.timelines { let timeline_state = timeline_states .remove(&timeline.timeline_id) .expect("Just populated above"); if self.secondary_state.cancel.is_cancelled() { tracing::debug!( "Cancelled before downloading timeline {}", timeline.timeline_id ); return Ok(()); } let timeline_id = timeline.timeline_id; self.download_timeline(timeline, timeline_state, deadline, ctx) .instrument(tracing::info_span!( "secondary_download_timeline", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id )) .await?; } // Metrics consistency check in testing builds self.secondary_state.validate_metrics(); // Only update last_etag after a full successful download: this way will not skip // the next download, even if the heatmap's actual etag is unchanged. self.secondary_state.detail.lock().unwrap().last_download = Some(DownloadSummary { etag: heatmap_etag, mtime: heatmap_mtime, upload_period: heatmap .upload_period_ms .map(|ms| Duration::from_millis(ms as u64)) .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL), }); // Robustness: we should have updated progress properly, but in case we didn't, make sure // we don't leave the tenant in a state where we claim to have successfully downloaded // everything, but our progress is incomplete. The invariant here should be that if // we have set `last_download` to this heatmap's etag, then the next time we see that // etag we can safely do no work (i.e. we must be complete). let mut progress = self.secondary_state.progress.lock().unwrap(); debug_assert!(progress.layers_downloaded == progress.layers_total); debug_assert!(progress.bytes_downloaded == progress.bytes_total); if progress.layers_downloaded != progress.layers_total || progress.bytes_downloaded != progress.bytes_total { tracing::warn!("Correcting drift in progress stats ({progress:?})"); progress.layers_downloaded = progress.layers_total; progress.bytes_downloaded = progress.bytes_total; } Ok(()) } /// Do any fast local cleanup that comes before the much slower process of downloading /// layers from remote storage. In the process, initialize the SecondaryProgress object /// that will later be updated incrementally as we download layers. async fn prepare_timelines(
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/secondary/heatmap.rs
pageserver/src/tenant/secondary/heatmap.rs
use std::collections::HashMap; use std::time::SystemTime; use serde::{Deserialize, Serialize}; use serde_with::{DisplayFromStr, TimestampSeconds, serde_as}; use utils::generation::Generation; use utils::id::TimelineId; use crate::tenant::remote_timeline_client::index::LayerFileMetadata; use crate::tenant::storage_layer::LayerName; #[derive(Serialize, Deserialize)] pub(crate) struct HeatMapTenant { /// Generation of the attached location that uploaded the heatmap: this is not required /// for correctness, but acts as a hint to secondary locations in order to detect thrashing /// in the unlikely event that two attached locations are both uploading conflicting heatmaps. pub(super) generation: Generation, pub(super) timelines: Vec<HeatMapTimeline>, /// Uploaders provide their own upload period in the heatmap, as a hint to downloaders /// of how frequently it is worthwhile to check for updates. /// /// This is optional for backward compat, and because we sometimes might upload /// a heatmap explicitly via API for a tenant that has no periodic upload configured. #[serde(default)] pub(super) upload_period_ms: Option<u128>, } impl HeatMapTenant { pub(crate) fn into_timelines_index(self) -> HashMap<TimelineId, HeatMapTimeline> { self.timelines .into_iter() .map(|htl| (htl.timeline_id, htl)) .collect() } } #[serde_as] #[derive(Serialize, Deserialize, Clone)] pub(crate) struct HeatMapTimeline { #[serde_as(as = "DisplayFromStr")] pub(crate) timeline_id: TimelineId, layers: Vec<HeatMapLayer>, } #[serde_as] #[derive(Serialize, Deserialize, Clone)] pub(crate) struct HeatMapLayer { pub(crate) name: LayerName, pub(crate) metadata: LayerFileMetadata, #[serde_as(as = "TimestampSeconds<i64>")] pub(crate) access_time: SystemTime, #[serde(default)] pub(crate) cold: bool, // TODO: an actual 'heat' score that would let secondary locations prioritize downloading // the hottest layers, rather than trying to simply mirror whatever layers are on-disk on the primary. } impl HeatMapLayer { pub(crate) fn new( name: LayerName, metadata: LayerFileMetadata, access_time: SystemTime, cold: bool, ) -> Self { Self { name, metadata, access_time, cold, } } } impl HeatMapTimeline { pub(crate) fn new(timeline_id: TimelineId, layers: Vec<HeatMapLayer>) -> Self { Self { timeline_id, layers, } } pub(crate) fn into_hot_layers(self) -> impl Iterator<Item = HeatMapLayer> { self.layers.into_iter().filter(|l| !l.cold) } pub(crate) fn hot_layers(&self) -> impl Iterator<Item = &HeatMapLayer> { self.layers.iter().filter(|l| !l.cold) } pub(crate) fn all_layers(&self) -> impl Iterator<Item = &HeatMapLayer> { self.layers.iter() } } pub(crate) struct HeatMapStats { pub(crate) bytes: u64, pub(crate) layers: usize, } impl HeatMapTenant { pub(crate) fn get_stats(&self) -> HeatMapStats { let mut stats = HeatMapStats { bytes: 0, layers: 0, }; for timeline in &self.timelines { for layer in timeline.hot_layers() { stats.layers += 1; stats.bytes += layer.metadata.file_size; } } stats } pub(crate) fn strip_atimes(self) -> Self { Self { timelines: self .timelines .into_iter() .map(|mut tl| { for layer in &mut tl.layers { layer.access_time = SystemTime::UNIX_EPOCH; } tl }) .collect(), generation: self.generation, upload_period_ms: self.upload_period_ms, } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/secondary/scheduler.rs
pageserver/src/tenant/secondary/scheduler.rs
use std::collections::HashMap; use std::marker::PhantomData; use std::pin::Pin; use std::time::{Duration, Instant}; use futures::Future; use pageserver_api::shard::TenantShardId; use rand::Rng; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use utils::completion::Barrier; use utils::yielding_loop::yielding_loop; use super::{CommandRequest, CommandResponse, SecondaryTenantError}; /// Scheduling interval is the time between calls to JobGenerator::schedule. /// When we schedule jobs, the job generator may provide a hint of its preferred /// interval, which we will respect within these intervals. const MAX_SCHEDULING_INTERVAL: Duration = Duration::from_secs(10); const MIN_SCHEDULING_INTERVAL: Duration = Duration::from_secs(1); /// Jitter a Duration by an integer percentage. Returned values are uniform /// in the range 100-pct..100+pct (i.e. a 5% jitter is 5% either way: a ~10% range) pub(super) fn period_jitter(d: Duration, pct: u32) -> Duration { if d == Duration::ZERO { d } else { rand::rng().random_range((d * (100 - pct)) / 100..(d * (100 + pct)) / 100) } } /// When a periodic task first starts, it should wait for some time in the range 0..period, so /// that starting many such tasks at the same time spreads them across the time range. pub(super) fn period_warmup(period: Duration) -> Duration { if period == Duration::ZERO { period } else { rand::rng().random_range(Duration::ZERO..period) } } /// Scheduling helper for background work across many tenants. /// /// Systems that need to run background work across many tenants may use this type /// to schedule jobs within a concurrency limit, along with their own [`JobGenerator`] /// implementation to provide the work to execute. This is a simple scheduler that just /// polls the generator for outstanding work, replacing its queue of pending work with /// what the generator yields on each call: the job generator can change its mind about /// the order of jobs between calls. The job generator is notified when jobs complete, /// and additionally may expose a command hook to generate jobs on-demand (e.g. to implement /// admin APIs). /// /// For an example see [`crate::tenant::secondary::heatmap_uploader`] /// /// G: A JobGenerator that this scheduler will poll to find pending jobs /// PJ: 'Pending Job': type for job descriptors that are ready to run /// RJ: 'Running Job' type' for jobs that have been spawned /// C : 'Completion' type that spawned jobs will send when they finish /// CMD: 'Command' type that the job generator will accept to create jobs on-demand pub(super) struct TenantBackgroundJobs<G, PJ, RJ, C, CMD> where G: JobGenerator<PJ, RJ, C, CMD>, C: Completion, PJ: PendingJob, RJ: RunningJob, { generator: G, /// Ready to run. Will progress to `running` once concurrent limit is satisfied, or /// be removed on next scheduling pass. pending: std::collections::VecDeque<PJ>, /// Tasks currently running in Self::tasks for these tenants. Check this map /// before pushing more work into pending for the same tenant. running: HashMap<TenantShardId, RJ>, tasks: JoinSet<C>, concurrency: usize, /// How often we would like schedule_interval to be called. pub(super) scheduling_interval: Duration, _phantom: PhantomData<(PJ, RJ, C, CMD)>, } pub(crate) trait JobGenerator<PJ, RJ, C, CMD> where C: Completion, PJ: PendingJob, RJ: RunningJob, { /// Called at each scheduling interval. Return a list of jobs to run, most urgent first. /// /// This function may be expensive (e.g. walk all tenants), but should not do any I/O. /// Implementations should take care to yield the executor periodically if running /// very long loops. /// /// Yielding a job here does _not_ guarantee that it will run: if the queue of pending /// jobs is not drained by the next scheduling interval, pending jobs will be cleared /// and re-generated. async fn schedule(&mut self) -> SchedulingResult<PJ>; /// Called when a pending job is ready to be run. /// /// The job generation provides a future, and a RJ (Running Job) descriptor that tracks it. fn spawn(&mut self, pending_job: PJ) -> (RJ, Pin<Box<dyn Future<Output = C> + Send>>); /// Called when a job previously spawned with spawn() transmits its completion fn on_completion(&mut self, completion: C); /// Called when a command is received. A job will be spawned immediately if the return /// value is Some, ignoring concurrency limits and the pending queue. fn on_command(&mut self, cmd: CMD) -> Result<PJ, SecondaryTenantError>; } /// [`JobGenerator`] returns this to provide pending jobs, and hints about scheduling pub(super) struct SchedulingResult<PJ> { pub(super) jobs: Vec<PJ>, /// The job generator would like to be called again this soon pub(super) want_interval: Option<Duration>, } /// See [`TenantBackgroundJobs`]. pub(super) trait PendingJob { fn get_tenant_shard_id(&self) -> &TenantShardId; } /// See [`TenantBackgroundJobs`]. pub(super) trait Completion: Send + 'static { fn get_tenant_shard_id(&self) -> &TenantShardId; } /// See [`TenantBackgroundJobs`]. pub(super) trait RunningJob { fn get_barrier(&self) -> Barrier; } impl<G, PJ, RJ, C, CMD> TenantBackgroundJobs<G, PJ, RJ, C, CMD> where C: Completion, PJ: PendingJob, RJ: RunningJob, G: JobGenerator<PJ, RJ, C, CMD>, { pub(super) fn new(generator: G, concurrency: usize) -> Self { Self { generator, pending: std::collections::VecDeque::new(), running: HashMap::new(), tasks: JoinSet::new(), concurrency, scheduling_interval: MAX_SCHEDULING_INTERVAL, _phantom: PhantomData, } } pub(super) async fn run( &mut self, mut command_queue: tokio::sync::mpsc::Receiver<CommandRequest<CMD>>, background_jobs_can_start: Barrier, cancel: CancellationToken, ) { tracing::info!("Waiting for background_jobs_can start..."); background_jobs_can_start.wait().await; tracing::info!("background_jobs_can is ready, proceeding."); while !cancel.is_cancelled() { // Look for new work: this is relatively expensive because we have to go acquire the lock on // the tenant manager to retrieve tenants, and then iterate over them to figure out which ones // require an upload. self.schedule_iteration(&cancel).await; if cancel.is_cancelled() { return; } // Schedule some work, if concurrency limit permits it self.spawn_pending(); // This message is printed every scheduling iteration as proof of liveness when looking at logs tracing::info!( "Status: {} tasks running, {} pending", self.running.len(), self.pending.len() ); // Between scheduling iterations, we will: // - Drain any complete tasks and spawn pending tasks // - Handle incoming administrative commands // - Check our cancellation token let next_scheduling_iteration = Instant::now() .checked_add(self.scheduling_interval) .unwrap_or_else(|| { tracing::warn!( "Scheduling interval invalid ({}s)", self.scheduling_interval.as_secs_f64() ); // unwrap(): this constant is small, cannot fail to add to time unless // we are close to the end of the universe. Instant::now().checked_add(MIN_SCHEDULING_INTERVAL).unwrap() }); loop { tokio::select! { _ = cancel.cancelled() => { tracing::info!("joining tasks"); // We do not simply drop the JoinSet, in order to have an orderly shutdown without cancellation. // It is the callers responsibility to make sure that the tasks they scheduled // respect an appropriate cancellation token, to shut down promptly. It is only // safe to wait on joining these tasks because we can see the cancellation token // has been set. while let Some(_r) = self.tasks.join_next().await {} tracing::info!("terminating on cancellation token."); break; }, _ = tokio::time::sleep(next_scheduling_iteration.duration_since(Instant::now())) => { tracing::debug!("woke for scheduling interval"); break;}, cmd = command_queue.recv() => { tracing::debug!("woke for command queue"); let cmd = match cmd { Some(c) =>c, None => { // SecondaryController was destroyed, and this has raced with // our CancellationToken tracing::info!("terminating on command queue destruction"); cancel.cancel(); break; } }; let CommandRequest{ response_tx, payload } = cmd; self.handle_command(payload, response_tx); }, _ = async { let completion = self.process_next_completion().await; match completion { Some(c) => { self.generator.on_completion(c); if !cancel.is_cancelled() { self.spawn_pending(); } }, None => { // Nothing is running, so just wait: expect that this future // will be dropped when something in the outer select! fires. cancel.cancelled().await; } } } => {} } } } } fn do_spawn(&mut self, job: PJ) { let tenant_shard_id = *job.get_tenant_shard_id(); let (in_progress, fut) = self.generator.spawn(job); self.tasks.spawn(fut); let replaced = self.running.insert(tenant_shard_id, in_progress); debug_assert!(replaced.is_none()); if replaced.is_some() { tracing::warn!(%tenant_shard_id, "Unexpectedly spawned a task when one was already running") } } /// For all pending tenants that are elegible for execution, spawn their task. /// /// Caller provides the spawn operation, we track the resulting execution. fn spawn_pending(&mut self) { while !self.pending.is_empty() && self.running.len() < self.concurrency { // unwrap: loop condition includes !is_empty() let pending = self.pending.pop_front().unwrap(); if !self.running.contains_key(pending.get_tenant_shard_id()) { self.do_spawn(pending); } } } /// For administrative commands: skip the pending queue, ignore concurrency limits fn spawn_now(&mut self, job: PJ) -> &RJ { let tenant_shard_id = *job.get_tenant_shard_id(); self.do_spawn(job); self.running .get(&tenant_shard_id) .expect("We just inserted this") } /// Wait until the next task completes, and handle its completion /// /// Cancellation: this method is cancel-safe. async fn process_next_completion(&mut self) -> Option<C> { match self.tasks.join_next().await { Some(r) => { // We use a channel to drive completions, but also // need to drain the JoinSet to avoid completed tasks // accumulating. These calls are 1:1 because every task // we spawn into this joinset submits is result to the channel. let completion = r.expect("Panic in background task"); self.running.remove(completion.get_tenant_shard_id()); Some(completion) } None => { // Nothing is running, so we have nothing to wait for. We may drop out: the // main even loop will call us again after the next time it has run something. None } } } /// Convert the command into a pending job, spawn it, and when the spawned /// job completes, send the result down `response_tx`. fn handle_command( &mut self, cmd: CMD, response_tx: tokio::sync::oneshot::Sender<CommandResponse>, ) { let job = match self.generator.on_command(cmd) { Ok(j) => j, Err(e) => { response_tx.send(CommandResponse { result: Err(e) }).ok(); return; } }; let tenant_shard_id = job.get_tenant_shard_id(); let barrier = if let Some(barrier) = self.get_running(tenant_shard_id) { tracing::info!( tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Command already running, waiting for it" ); barrier } else { let running = self.spawn_now(job); running.get_barrier().clone() }; // This task does no I/O: it only listens for a barrier's completion and then // sends to the command response channel. It is therefore safe to spawn this without // any gates/task_mgr hooks. tokio::task::spawn(async move { barrier.wait().await; response_tx.send(CommandResponse { result: Ok(()) }).ok(); }); } fn get_running(&self, tenant_shard_id: &TenantShardId) -> Option<Barrier> { self.running.get(tenant_shard_id).map(|r| r.get_barrier()) } /// Periodic execution phase: inspect all attached tenants and schedule any work they require. /// /// The type in `tenants` should be a tenant-like structure, e.g. [`crate::tenant::TenantShard`] or [`crate::tenant::secondary::SecondaryTenant`] /// /// This function resets the pending list: it is assumed that the caller may change their mind about /// which tenants need work between calls to schedule_iteration. async fn schedule_iteration(&mut self, cancel: &CancellationToken) { let SchedulingResult { jobs, want_interval, } = self.generator.schedule().await; // Adjust interval based on feedback from the job generator if let Some(want_interval) = want_interval { // Calculation uses second granularity: this scheduler is not intended for high frequency tasks self.scheduling_interval = Duration::from_secs(std::cmp::min( std::cmp::max(MIN_SCHEDULING_INTERVAL.as_secs(), want_interval.as_secs()), MAX_SCHEDULING_INTERVAL.as_secs(), )); } // The priority order of previously scheduled work may be invalidated by current state: drop // all pending work (it will be re-scheduled if still needed) self.pending.clear(); // While iterating over the potentially-long list of tenants, we will periodically yield // to avoid blocking executor. yielding_loop(1000, cancel, jobs.into_iter(), |job| { // Skip tenants that already have a write in flight if !self.running.contains_key(job.get_tenant_shard_id()) { self.pending.push_back(job); } }) .await .ok(); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/errors.rs
pageserver/src/tenant/storage_layer/errors.rs
use crate::tenant::blob_io::WriteBlobError; #[derive(Debug, thiserror::Error)] pub enum PutError { #[error(transparent)] WriteBlob(WriteBlobError), #[error(transparent)] Other(anyhow::Error), } impl PutError { pub fn is_cancel(&self) -> bool { match self { PutError::WriteBlob(e) => e.is_cancel(), PutError::Other(_) => false, } } pub fn into_anyhow(self) -> anyhow::Error { match self { PutError::WriteBlob(e) => e.into_anyhow(), PutError::Other(e) => e, } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/delta_layer.rs
pageserver/src/tenant/storage_layer/delta_layer.rs
//! A DeltaLayer represents a collection of WAL records or page images in a range of //! LSNs, and in a range of Keys. It is stored on a file on disk. //! //! Usually a delta layer only contains differences, in the form of WAL records //! against a base LSN. However, if a relation extended or a whole new relation //! is created, there would be no base for the new pages. The entries for them //! must be page images or WAL records with the 'will_init' flag set, so that //! they can be replayed without referring to an older page version. //! //! The delta files are stored in `timelines/<timeline_id>` directory. Currently, //! there are no subdirectories, and each delta file is named like this: //! //! ```text //! <key start>-<key end>__<start LSN>-<end LSN> //! ``` //! //! For example: //! //! ```text //! 000000067F000032BE0000400000000020B6-000000067F000032BE0000400000000030B6__000000578C6B29-0000000057A50051 //! ``` //! //! Every delta file consists of three parts: "summary", "values", and //! "index". The summary is a fixed size header at the beginning of the file, //! and it contains basic information about the layer, and offsets to the other //! parts. The "index" is a B-tree, mapping from Key and LSN to an offset in the //! "values" part. The actual page images and WAL records are stored in the //! "values" part. //! use std::collections::{HashMap, VecDeque}; use std::fs::File; use std::ops::Range; use std::os::unix::fs::FileExt; use std::str::FromStr; use std::sync::Arc; use std::sync::atomic::AtomicU64; use anyhow::{Context, Result, bail, ensure}; use camino::{Utf8Path, Utf8PathBuf}; use futures::StreamExt; use itertools::Itertools; use pageserver_api::config::MaxVectoredReadBytes; use pageserver_api::key::{DBDIR_KEY, KEY_SIZE, Key}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::ImageCompressionAlgorithm; use pageserver_api::shard::TenantShardId; use serde::{Deserialize, Serialize}; use tokio::sync::OnceCell; use tokio_epoll_uring::IoBuf; use tokio_util::sync::CancellationToken; use tracing::*; use utils::bin_ser::BeSer; use utils::bin_ser::SerializeError; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; use wal_decoder::models::value::Value; use super::errors::PutError; use super::{ AsLayerDesc, LayerName, OnDiskValue, OnDiskValueIo, PersistentLayerDesc, ResidentLayer, ValuesReconstructState, }; use crate::config::PageServerConf; use crate::context::{PageContentKind, RequestContext, RequestContextBuilder}; use crate::page_cache::{self, FileId, PAGE_SZ}; use crate::tenant::blob_io::BlobWriter; use crate::tenant::block_io::{BlockBuf, BlockCursor, BlockLease, BlockReader, FileBlockReader}; use crate::tenant::disk_btree::{ DiskBtreeBuilder, DiskBtreeIterator, DiskBtreeReader, VisitDirection, }; use crate::tenant::storage_layer::layer::S3_UPLOAD_LIMIT; use crate::tenant::timeline::GetVectoredError; use crate::tenant::vectored_blob_io::{ BlobFlag, BufView, StreamingVectoredReadPlanner, VectoredBlobReader, VectoredRead, VectoredReadPlanner, }; use crate::virtual_file::TempVirtualFile; use crate::virtual_file::owned_buffers_io::io_buf_ext::{FullSlice, IoBufExt}; use crate::virtual_file::owned_buffers_io::write::{Buffer, BufferedWriterShutdownMode}; use crate::virtual_file::{self, IoBuffer, IoBufferMut, MaybeFatalIo, VirtualFile}; use crate::{DELTA_FILE_MAGIC, STORAGE_FORMAT_VERSION, TEMP_FILE_SUFFIX}; /// /// Header stored in the beginning of the file /// /// After this comes the 'values' part, starting on block 1. After that, /// the 'index' starts at the block indicated by 'index_start_blk' /// #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct Summary { /// Magic value to identify this as a neon delta file. Always DELTA_FILE_MAGIC. pub magic: u16, pub format_version: u16, pub tenant_id: TenantId, pub timeline_id: TimelineId, pub key_range: Range<Key>, pub lsn_range: Range<Lsn>, /// Block number where the 'index' part of the file begins. pub index_start_blk: u32, /// Block within the 'index', where the B-tree root page is stored pub index_root_blk: u32, } impl From<&DeltaLayer> for Summary { fn from(layer: &DeltaLayer) -> Self { Self::expected( layer.desc.tenant_shard_id.tenant_id, layer.desc.timeline_id, layer.desc.key_range.clone(), layer.desc.lsn_range.clone(), ) } } impl Summary { /// Serializes the summary header into an aligned buffer of lenth `PAGE_SZ`. pub fn ser_into_page(&self) -> Result<IoBuffer, SerializeError> { let mut buf = IoBufferMut::with_capacity(PAGE_SZ); Self::ser_into(self, &mut buf)?; // Pad zeroes to the buffer so the length is a multiple of the alignment. buf.extend_with(0, buf.capacity() - buf.len()); Ok(buf.freeze()) } pub(super) fn expected( tenant_id: TenantId, timeline_id: TimelineId, keys: Range<Key>, lsns: Range<Lsn>, ) -> Self { Self { magic: DELTA_FILE_MAGIC, format_version: STORAGE_FORMAT_VERSION, tenant_id, timeline_id, key_range: keys, lsn_range: lsns, index_start_blk: 0, index_root_blk: 0, } } } // Flag indicating that this version initialize the page const WILL_INIT: u64 = 1; /// Struct representing reference to BLOB in layers. /// /// Reference contains BLOB offset, and for WAL records it also contains /// `will_init` flag. The flag helps to determine the range of records /// that needs to be applied, without reading/deserializing records themselves. #[derive(Debug, Serialize, Deserialize, Copy, Clone)] pub struct BlobRef(pub u64); impl BlobRef { pub fn will_init(&self) -> bool { (self.0 & WILL_INIT) != 0 } pub fn pos(&self) -> u64 { self.0 >> 1 } pub fn new(pos: u64, will_init: bool) -> BlobRef { let mut blob_ref = pos << 1; if will_init { blob_ref |= WILL_INIT; } BlobRef(blob_ref) } } pub const DELTA_KEY_SIZE: usize = KEY_SIZE + 8; struct DeltaKey([u8; DELTA_KEY_SIZE]); /// This is the key of the B-tree index stored in the delta layer. It consists /// of the serialized representation of a Key and LSN. impl DeltaKey { fn from_slice(buf: &[u8]) -> Self { let mut bytes: [u8; DELTA_KEY_SIZE] = [0u8; DELTA_KEY_SIZE]; bytes.copy_from_slice(buf); DeltaKey(bytes) } fn from_key_lsn(key: &Key, lsn: Lsn) -> Self { let mut bytes: [u8; DELTA_KEY_SIZE] = [0u8; DELTA_KEY_SIZE]; key.write_to_byte_slice(&mut bytes[0..KEY_SIZE]); bytes[KEY_SIZE..].copy_from_slice(&u64::to_be_bytes(lsn.0)); DeltaKey(bytes) } fn key(&self) -> Key { Key::from_slice(&self.0) } fn lsn(&self) -> Lsn { Lsn(u64::from_be_bytes(self.0[KEY_SIZE..].try_into().unwrap())) } fn extract_lsn_from_buf(buf: &[u8]) -> Lsn { let mut lsn_buf = [0u8; 8]; lsn_buf.copy_from_slice(&buf[KEY_SIZE..]); Lsn(u64::from_be_bytes(lsn_buf)) } } /// This is used only from `pagectl`. Within pageserver, all layers are /// [`crate::tenant::storage_layer::Layer`], which can hold a [`DeltaLayerInner`]. pub struct DeltaLayer { path: Utf8PathBuf, pub desc: PersistentLayerDesc, inner: OnceCell<Arc<DeltaLayerInner>>, } impl std::fmt::Debug for DeltaLayer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use super::RangeDisplayDebug; f.debug_struct("DeltaLayer") .field("key_range", &RangeDisplayDebug(&self.desc.key_range)) .field("lsn_range", &self.desc.lsn_range) .field("file_size", &self.desc.file_size) .field("inner", &self.inner) .finish() } } /// `DeltaLayerInner` is the in-memory data structure associated with an on-disk delta /// file. pub struct DeltaLayerInner { // values copied from summary index_start_blk: u32, index_root_blk: u32, file: Arc<VirtualFile>, file_id: FileId, layer_key_range: Range<Key>, layer_lsn_range: Range<Lsn>, max_vectored_read_bytes: Option<MaxVectoredReadBytes>, } impl DeltaLayerInner { pub(crate) fn layer_dbg_info(&self) -> String { format!( "delta {}..{} {}..{}", self.key_range().start, self.key_range().end, self.lsn_range().start, self.lsn_range().end ) } } impl std::fmt::Debug for DeltaLayerInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DeltaLayerInner") .field("index_start_blk", &self.index_start_blk) .field("index_root_blk", &self.index_root_blk) .finish() } } /// Boilerplate to implement the Layer trait, always use layer_desc for persistent layers. impl std::fmt::Display for DeltaLayer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.layer_desc().short_id()) } } impl AsLayerDesc for DeltaLayer { fn layer_desc(&self) -> &PersistentLayerDesc { &self.desc } } impl DeltaLayer { pub async fn dump(&self, verbose: bool, ctx: &RequestContext) -> Result<()> { self.desc.dump(); if !verbose { return Ok(()); } let inner = self.load(ctx).await?; inner.dump(ctx).await } fn temp_path_for( conf: &PageServerConf, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, key_start: Key, lsn_range: &Range<Lsn>, ) -> Utf8PathBuf { // TempVirtualFile requires us to never reuse a filename while an old // instance of TempVirtualFile created with that filename is not done dropping yet. // So, we use a monotonic counter to disambiguate the filenames. static NEXT_TEMP_DISAMBIGUATOR: AtomicU64 = AtomicU64::new(1); let filename_disambiguator = NEXT_TEMP_DISAMBIGUATOR.fetch_add(1, std::sync::atomic::Ordering::Relaxed); conf.timeline_path(tenant_shard_id, timeline_id) .join(format!( "{}-XXX__{:016X}-{:016X}.{:x}.{}", key_start, u64::from(lsn_range.start), u64::from(lsn_range.end), filename_disambiguator, TEMP_FILE_SUFFIX, )) } /// /// Open the underlying file and read the metadata into memory, if it's /// not loaded already. /// async fn load(&self, ctx: &RequestContext) -> Result<&Arc<DeltaLayerInner>> { // Quick exit if already loaded self.inner .get_or_try_init(|| self.load_inner(ctx)) .await .with_context(|| format!("Failed to load delta layer {}", self.path())) } async fn load_inner(&self, ctx: &RequestContext) -> anyhow::Result<Arc<DeltaLayerInner>> { let path = self.path(); let loaded = DeltaLayerInner::load(&path, None, None, ctx).await?; // not production code let actual_layer_name = LayerName::from_str(path.file_name().unwrap()).unwrap(); let expected_layer_name = self.layer_desc().layer_name(); if actual_layer_name != expected_layer_name { println!("warning: filename does not match what is expected from in-file summary"); println!("actual: {:?}", actual_layer_name.to_string()); println!("expected: {:?}", expected_layer_name.to_string()); } Ok(Arc::new(loaded)) } /// Create a DeltaLayer struct representing an existing file on disk. /// /// This variant is only used for debugging purposes, by the 'pagectl' binary. pub fn new_for_path(path: &Utf8Path, file: File) -> Result<Self> { let mut summary_buf = vec![0; PAGE_SZ]; file.read_exact_at(&mut summary_buf, 0)?; let summary = Summary::des_prefix(&summary_buf)?; let metadata = file .metadata() .context("get file metadata to determine size")?; // This function is never used for constructing layers in a running pageserver, // so it does not need an accurate TenantShardId. let tenant_shard_id = TenantShardId::unsharded(summary.tenant_id); Ok(DeltaLayer { path: path.to_path_buf(), desc: PersistentLayerDesc::new_delta( tenant_shard_id, summary.timeline_id, summary.key_range, summary.lsn_range, metadata.len(), ), inner: OnceCell::new(), }) } /// Path to the layer file in pageserver workdir. fn path(&self) -> Utf8PathBuf { self.path.clone() } } /// A builder object for constructing a new delta layer. /// /// Usage: /// /// 1. Create the DeltaLayerWriter by calling DeltaLayerWriter::new(...) /// /// 2. Write the contents by calling `put_value` for every page /// version to store in the layer. /// /// 3. Call `finish`. /// struct DeltaLayerWriterInner { pub path: Utf8PathBuf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, key_start: Key, lsn_range: Range<Lsn>, tree: DiskBtreeBuilder<BlockBuf, DELTA_KEY_SIZE>, blob_writer: BlobWriter<TempVirtualFile>, // Number of key-lsns in the layer. num_keys: usize, } impl DeltaLayerWriterInner { /// /// Start building a new delta layer. /// #[allow(clippy::too_many_arguments)] async fn new( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, key_start: Key, lsn_range: Range<Lsn>, gate: &utils::sync::gate::Gate, cancel: CancellationToken, ctx: &RequestContext, ) -> anyhow::Result<Self> { // Create the file initially with a temporary filename. We don't know // the end key yet, so we cannot form the final filename yet. We will // rename it when we're done. let path = DeltaLayer::temp_path_for(conf, &tenant_shard_id, &timeline_id, key_start, &lsn_range); let file = TempVirtualFile::new( VirtualFile::open_with_options_v2( &path, virtual_file::OpenOptions::new() .create_new(true) .write(true), ctx, ) .await?, gate.enter()?, ); // Start at PAGE_SZ, make room for the header block let blob_writer = BlobWriter::new( file, PAGE_SZ as u64, gate, cancel, ctx, info_span!(parent: None, "delta_layer_writer_flush_task", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), timeline_id=%timeline_id, path = %path), )?; // Initialize the b-tree index builder let block_buf = BlockBuf::new(); let tree_builder = DiskBtreeBuilder::new(block_buf); Ok(Self { path, timeline_id, tenant_shard_id, key_start, lsn_range, tree: tree_builder, blob_writer, num_keys: 0, }) } /// /// Append a key-value pair to the file. /// /// The values must be appended in key, lsn order. /// async fn put_value( &mut self, key: Key, lsn: Lsn, val: Value, ctx: &RequestContext, ) -> Result<(), PutError> { let (_, res) = self .put_value_bytes( key, lsn, Value::ser(&val) .map_err(anyhow::Error::new) .map_err(PutError::Other)? .slice_len(), val.will_init(), ctx, ) .await; res } async fn put_value_bytes<Buf>( &mut self, key: Key, lsn: Lsn, val: FullSlice<Buf>, will_init: bool, ctx: &RequestContext, ) -> (FullSlice<Buf>, Result<(), PutError>) where Buf: IoBuf + Send, { assert!( self.lsn_range.start <= lsn, "lsn_start={}, lsn={}", self.lsn_range.start, lsn ); // We don't want to use compression in delta layer creation let compression = ImageCompressionAlgorithm::Disabled; let (val, res) = self .blob_writer .write_blob_maybe_compressed(val, ctx, compression) .await; let res = res.map_err(PutError::WriteBlob); let off = match res { Ok((off, _)) => off, Err(e) => return (val, Err(e)), }; let blob_ref = BlobRef::new(off, will_init); let delta_key = DeltaKey::from_key_lsn(&key, lsn); let res = self .tree .append(&delta_key.0, blob_ref.0) .map_err(anyhow::Error::new) .map_err(PutError::Other); self.num_keys += 1; (val, res) } fn size(&self) -> u64 { self.blob_writer.size() + self.tree.borrow_writer().size() } /// /// Finish writing the delta layer. /// async fn finish( self, key_end: Key, ctx: &RequestContext, ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> { let index_start_blk = self.blob_writer.size().div_ceil(PAGE_SZ as u64) as u32; let file = self .blob_writer .shutdown( BufferedWriterShutdownMode::ZeroPadToNextMultiple(PAGE_SZ), ctx, ) .await?; // Write out the index let (index_root_blk, block_buf) = self.tree.finish()?; let mut offset = index_start_blk as u64 * PAGE_SZ as u64; // TODO(yuchen): https://github.com/neondatabase/neon/issues/10092 // Should we just replace BlockBuf::blocks with one big buffer for buf in block_buf.blocks { let (_buf, res) = file.write_all_at(buf.slice_len(), offset, ctx).await; res?; offset += PAGE_SZ as u64; } assert!(self.lsn_range.start < self.lsn_range.end); // Fill in the summary on blk 0 let summary = Summary { magic: DELTA_FILE_MAGIC, format_version: STORAGE_FORMAT_VERSION, tenant_id: self.tenant_shard_id.tenant_id, timeline_id: self.timeline_id, key_range: self.key_start..key_end, lsn_range: self.lsn_range.clone(), index_start_blk, index_root_blk, }; // Writes summary at the first block (offset 0). let buf = summary.ser_into_page()?; let (_buf, res) = file.write_all_at(buf.slice_len(), 0, ctx).await; res?; let metadata = file .metadata() .await .context("get file metadata to determine size")?; // 5GB limit for objects without multipart upload (which we don't want to use) // Make it a little bit below to account for differing GB units // https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html ensure!( metadata.len() <= S3_UPLOAD_LIMIT, "Created delta layer file at {} of size {} above limit {S3_UPLOAD_LIMIT}!", file.path(), metadata.len() ); // Note: Because we opened the file in write-only mode, we cannot // reuse the same VirtualFile for reading later. That's why we don't // set inner.file here. The first read will have to re-open it. let desc = PersistentLayerDesc::new_delta( self.tenant_shard_id, self.timeline_id, self.key_start..key_end, self.lsn_range.clone(), metadata.len(), ); // fsync the file file.sync_all() .await .maybe_fatal_err("delta_layer sync_all")?; trace!("created delta layer {}", self.path); // The gate guard stored in `destination_file` is dropped. Callers (e.g.. flush loop or compaction) // keep the gate open also, so that it's safe for them to rename the file to its final destination. file.disarm_into_inner(); Ok((desc, self.path)) } } /// A builder object for constructing a new delta layer. /// /// Usage: /// /// 1. Create the DeltaLayerWriter by calling DeltaLayerWriter::new(...) /// /// 2. Write the contents by calling `put_value` for every page /// version to store in the layer. /// /// 3. Call `finish`. /// /// # Note /// /// As described in <https://github.com/neondatabase/neon/issues/2650>, it's /// possible for the writer to drop before `finish` is actually called. So this /// could lead to odd temporary files in the directory, exhausting file system. /// This structure wraps `DeltaLayerWriterInner` and also contains `Drop` /// implementation that cleans up the temporary file in failure. It's not /// possible to do this directly in `DeltaLayerWriterInner` since `finish` moves /// out some fields, making it impossible to implement `Drop`. /// #[must_use] pub struct DeltaLayerWriter { inner: Option<DeltaLayerWriterInner>, } impl DeltaLayerWriter { /// /// Start building a new delta layer. /// #[allow(clippy::too_many_arguments)] pub async fn new( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, key_start: Key, lsn_range: Range<Lsn>, gate: &utils::sync::gate::Gate, cancel: CancellationToken, ctx: &RequestContext, ) -> anyhow::Result<Self> { Ok(Self { inner: Some( DeltaLayerWriterInner::new( conf, timeline_id, tenant_shard_id, key_start, lsn_range, gate, cancel, ctx, ) .await?, ), }) } pub fn is_empty(&self) -> bool { self.inner.as_ref().unwrap().num_keys == 0 } /// /// Append a key-value pair to the file. /// /// The values must be appended in key, lsn order. /// pub async fn put_value( &mut self, key: Key, lsn: Lsn, val: Value, ctx: &RequestContext, ) -> Result<(), PutError> { self.inner .as_mut() .unwrap() .put_value(key, lsn, val, ctx) .await } pub async fn put_value_bytes<Buf>( &mut self, key: Key, lsn: Lsn, val: FullSlice<Buf>, will_init: bool, ctx: &RequestContext, ) -> (FullSlice<Buf>, Result<(), PutError>) where Buf: IoBuf + Send, { self.inner .as_mut() .unwrap() .put_value_bytes(key, lsn, val, will_init, ctx) .await } pub fn size(&self) -> u64 { self.inner.as_ref().unwrap().size() } /// /// Finish writing the delta layer. /// pub(crate) async fn finish( mut self, key_end: Key, ctx: &RequestContext, ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> { self.inner.take().unwrap().finish(key_end, ctx).await } pub(crate) fn num_keys(&self) -> usize { self.inner.as_ref().unwrap().num_keys } pub(crate) fn estimated_size(&self) -> u64 { let inner = self.inner.as_ref().unwrap(); inner.blob_writer.size() + inner.tree.borrow_writer().size() + PAGE_SZ as u64 } } #[derive(thiserror::Error, Debug)] pub enum RewriteSummaryError { #[error("magic mismatch")] MagicMismatch, #[error(transparent)] Other(#[from] anyhow::Error), } impl From<std::io::Error> for RewriteSummaryError { fn from(e: std::io::Error) -> Self { Self::Other(anyhow::anyhow!(e)) } } impl DeltaLayer { pub async fn rewrite_summary<F>( path: &Utf8Path, rewrite: F, ctx: &RequestContext, ) -> Result<(), RewriteSummaryError> where F: Fn(Summary) -> Summary, { let file = VirtualFile::open_with_options_v2( path, virtual_file::OpenOptions::new().read(true).write(true), ctx, ) .await .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; let actual_summary = Summary::des_prefix(summary_blk.as_ref()).context("deserialize")?; if actual_summary.magic != DELTA_FILE_MAGIC { return Err(RewriteSummaryError::MagicMismatch); } let new_summary = rewrite(actual_summary); let buf = new_summary.ser_into_page().context("serialize")?; let (_buf, res) = file.write_all_at(buf.slice_len(), 0, ctx).await; res?; Ok(()) } } impl DeltaLayerInner { pub(crate) fn key_range(&self) -> &Range<Key> { &self.layer_key_range } pub(crate) fn lsn_range(&self) -> &Range<Lsn> { &self.layer_lsn_range } pub(super) async fn load( path: &Utf8Path, summary: Option<Summary>, max_vectored_read_bytes: Option<MaxVectoredReadBytes>, ctx: &RequestContext, ) -> anyhow::Result<Self> { let file = Arc::new( VirtualFile::open_v2(path, ctx) .await .context("open layer file")?, ); let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader .read_blk(0, ctx) .await .context("read first block")?; // TODO: this should be an assertion instead; see ImageLayerInner::load let actual_summary = Summary::des_prefix(summary_blk.as_ref()).context("deserialize first block")?; if let Some(mut expected_summary) = summary { // production code path expected_summary.index_start_blk = actual_summary.index_start_blk; expected_summary.index_root_blk = actual_summary.index_root_blk; // mask out the timeline_id, but still require the layers to be from the same tenant expected_summary.timeline_id = actual_summary.timeline_id; if actual_summary != expected_summary { bail!( "in-file summary does not match expected summary. actual = {:?} expected = {:?}", actual_summary, expected_summary ); } } Ok(DeltaLayerInner { file, file_id, index_start_blk: actual_summary.index_start_blk, index_root_blk: actual_summary.index_root_blk, max_vectored_read_bytes, layer_key_range: actual_summary.key_range, layer_lsn_range: actual_summary.lsn_range, }) } // Look up the keys in the provided keyspace and update // the reconstruct state with whatever is found. // // Currently, the index is visited for each range, but this // can be further optimised to visit the index only once. pub(super) async fn get_values_reconstruct_data( &self, this: ResidentLayer, keyspace: KeySpace, lsn_range: Range<Lsn>, reconstruct_state: &mut ValuesReconstructState, ctx: &RequestContext, ) -> Result<(), GetVectoredError> { let block_reader = FileBlockReader::new(&self.file, self.file_id); let index_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new( self.index_start_blk, self.index_root_blk, block_reader, ); let planner = VectoredReadPlanner::new( self.max_vectored_read_bytes .expect("Layer is loaded with max vectored bytes config") .0 .into(), ); let data_end_offset = self.index_start_offset(); let reads = Self::plan_reads( &keyspace, lsn_range.clone(), data_end_offset, index_reader, planner, ctx, ) .await .map_err(GetVectoredError::Other)?; self.do_reads_and_update_state(this, reads, reconstruct_state, ctx) .await; Ok(()) } async fn plan_reads<Reader>( keyspace: &KeySpace, lsn_range: Range<Lsn>, data_end_offset: u64, index_reader: DiskBtreeReader<Reader, DELTA_KEY_SIZE>, mut planner: VectoredReadPlanner, ctx: &RequestContext, ) -> anyhow::Result<Vec<VectoredRead>> where Reader: BlockReader + Clone, { let ctx = RequestContextBuilder::from(ctx) .page_content_kind(PageContentKind::DeltaLayerBtreeNode) .attached_child(); for range in keyspace.ranges.iter() { let mut range_end_handled = false; let start_key = DeltaKey::from_key_lsn(&range.start, lsn_range.start); let index_stream = index_reader.clone().into_stream(&start_key.0, &ctx); let mut index_stream = std::pin::pin!(index_stream); while let Some(index_entry) = index_stream.next().await { let (raw_key, value) = index_entry?; let key = Key::from_slice(&raw_key[..KEY_SIZE]); let lsn = DeltaKey::extract_lsn_from_buf(&raw_key); let blob_ref = BlobRef(value); // Lsns are not monotonically increasing across keys, so we don't assert on them. assert!(key >= range.start); let outside_lsn_range = !lsn_range.contains(&lsn); let flag = { if outside_lsn_range { BlobFlag::Ignore } else if blob_ref.will_init() { BlobFlag::ReplaceAll } else { // Usual path: add blob to the read BlobFlag::None } }; if key >= range.end || (key.next() == range.end && lsn >= lsn_range.end) { planner.handle_range_end(blob_ref.pos()); range_end_handled = true; break; } else { planner.handle(key, lsn, blob_ref.pos(), flag); } } if !range_end_handled { tracing::debug!("Handling range end fallback at {}", data_end_offset); planner.handle_range_end(data_end_offset); } } Ok(planner.finish()) } fn get_min_read_buffer_size( planned_reads: &[VectoredRead], read_size_soft_max: usize, ) -> usize { let Some(largest_read) = planned_reads.iter().max_by_key(|read| read.size()) else { return read_size_soft_max; }; let largest_read_size = largest_read.size(); if largest_read_size > read_size_soft_max { // If the read is oversized, it should only contain one key. let offenders = largest_read .blobs_at .as_slice() .iter() .filter_map(|(_, blob_meta)| { if blob_meta.key.is_rel_dir_key() || blob_meta.key == DBDIR_KEY || blob_meta.key.is_aux_file_key() { // The size of values for these keys is unbounded and can // grow very large in pathological cases. None } else { Some(format!("{}@{}", blob_meta.key, blob_meta.lsn)) } }) .join(", "); if !offenders.is_empty() { tracing::warn!( "Oversized vectored read ({} > {}) for keys {}", largest_read_size, read_size_soft_max, offenders ); } } largest_read_size } async fn do_reads_and_update_state( &self, this: ResidentLayer, reads: Vec<VectoredRead>,
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/merge_iterator.rs
pageserver/src/tenant/storage_layer/merge_iterator.rs
use std::cmp::Ordering; use std::collections::{BinaryHeap, binary_heap}; use std::sync::Arc; use anyhow::bail; use pageserver_api::key::Key; use utils::lsn::Lsn; use wal_decoder::models::value::Value; use super::delta_layer::{DeltaLayerInner, DeltaLayerIterator}; use super::image_layer::{ImageLayerInner, ImageLayerIterator}; use super::{PersistentLayerDesc, PersistentLayerKey}; use crate::context::RequestContext; #[derive(Clone, Copy)] pub(crate) enum LayerRef<'a> { Image(&'a ImageLayerInner), Delta(&'a DeltaLayerInner), } impl<'a> LayerRef<'a> { fn iter_with_options( self, ctx: &'a RequestContext, max_read_size: u64, max_batch_size: usize, ) -> LayerIterRef<'a> { match self { Self::Image(x) => { LayerIterRef::Image(x.iter_with_options(ctx, max_read_size, max_batch_size)) } Self::Delta(x) => { LayerIterRef::Delta(x.iter_with_options(ctx, max_read_size, max_batch_size)) } } } fn layer_dbg_info(&self) -> String { match self { Self::Image(x) => x.layer_dbg_info(), Self::Delta(x) => x.layer_dbg_info(), } } } enum LayerIterRef<'a> { Image(ImageLayerIterator<'a>), Delta(DeltaLayerIterator<'a>), } impl LayerIterRef<'_> { async fn next(&mut self) -> anyhow::Result<Option<(Key, Lsn, Value)>> { match self { Self::Delta(x) => x.next().await, Self::Image(x) => x.next().await, } } fn layer_dbg_info(&self) -> String { match self { Self::Image(x) => x.layer_dbg_info(), Self::Delta(x) => x.layer_dbg_info(), } } } /// This type plays several roles at once /// 1. Unified iterator for image and delta layers. /// 2. `Ord` for use in [`MergeIterator::heap`] (for the k-merge). /// 3. Lazy creation of the real delta/image iterator. #[allow(clippy::large_enum_variant, reason = "TODO")] pub(crate) enum IteratorWrapper<'a> { NotLoaded { ctx: &'a RequestContext, first_key_lower_bound: (Key, Lsn), layer: LayerRef<'a>, source_desc: Arc<PersistentLayerKey>, max_read_size: u64, max_batch_size: usize, }, Loaded { iter: PeekableLayerIterRef<'a>, source_desc: Arc<PersistentLayerKey>, }, } pub(crate) struct PeekableLayerIterRef<'a> { iter: LayerIterRef<'a>, peeked: Option<(Key, Lsn, Value)>, // None == end } impl<'a> PeekableLayerIterRef<'a> { async fn create(mut iter: LayerIterRef<'a>) -> anyhow::Result<Self> { let peeked = iter.next().await?; Ok(Self { iter, peeked }) } fn peek(&self) -> &Option<(Key, Lsn, Value)> { &self.peeked } async fn next(&mut self) -> anyhow::Result<Option<(Key, Lsn, Value)>> { let result = self.peeked.take(); self.peeked = self.iter.next().await?; if let (Some((k1, l1, _)), Some((k2, l2, _))) = (&self.peeked, &result) { if (k1, l1) < (k2, l2) { bail!("iterator is not ordered: {}", self.iter.layer_dbg_info()); } } Ok(result) } } impl std::cmp::PartialEq for IteratorWrapper<'_> { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } impl std::cmp::Eq for IteratorWrapper<'_> {} impl std::cmp::PartialOrd for IteratorWrapper<'_> { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl std::cmp::Ord for IteratorWrapper<'_> { fn cmp(&self, other: &Self) -> std::cmp::Ordering { use std::cmp::Ordering; let a = self.peek_next_key_lsn_value(); let b = other.peek_next_key_lsn_value(); match (a, b) { (Some((k1, l1, v1)), Some((k2, l2, v2))) => { fn map_value_to_num(val: &Option<&Value>) -> usize { match val { None => 0, Some(Value::Image(_)) => 1, Some(Value::WalRecord(_)) => 2, } } let order_1 = map_value_to_num(&v1); let order_2 = map_value_to_num(&v2); // When key_lsn are the same, the unloaded iter will always appear before the loaded one. // And note that we do a reverse at the end of the comparison, so it works with the max heap. (k1, l1, order_1).cmp(&(k2, l2, order_2)) } (Some(_), None) => Ordering::Less, (None, Some(_)) => Ordering::Greater, (None, None) => Ordering::Equal, } .reverse() } } impl<'a> IteratorWrapper<'a> { pub fn create_from_image_layer( image_layer: &'a ImageLayerInner, ctx: &'a RequestContext, max_read_size: u64, max_batch_size: usize, ) -> Self { Self::NotLoaded { layer: LayerRef::Image(image_layer), first_key_lower_bound: (image_layer.key_range().start, image_layer.lsn()), ctx, source_desc: PersistentLayerKey { key_range: image_layer.key_range().clone(), lsn_range: PersistentLayerDesc::image_layer_lsn_range(image_layer.lsn()), is_delta: false, } .into(), max_read_size, max_batch_size, } } pub fn create_from_delta_layer( delta_layer: &'a DeltaLayerInner, ctx: &'a RequestContext, max_read_size: u64, max_batch_size: usize, ) -> Self { Self::NotLoaded { layer: LayerRef::Delta(delta_layer), first_key_lower_bound: (delta_layer.key_range().start, delta_layer.lsn_range().start), ctx, source_desc: PersistentLayerKey { key_range: delta_layer.key_range().clone(), lsn_range: delta_layer.lsn_range().clone(), is_delta: true, } .into(), max_read_size, max_batch_size, } } fn peek_next_key_lsn_value(&self) -> Option<(&Key, Lsn, Option<&Value>)> { match self { Self::Loaded { iter, .. } => iter .peek() .as_ref() .map(|(key, lsn, val)| (key, *lsn, Some(val))), Self::NotLoaded { first_key_lower_bound: (key, lsn), .. } => Some((key, *lsn, None)), } } // CORRECTNESS: this function must always take `&mut self`, never `&self`. // // The reason is that `impl Ord for Self` evaluates differently after this function // returns. We're called through a `PeekMut::deref_mut`, which causes heap repair when // the PeekMut gets returned. So, it's critical that we actually run through `PeekMut::deref_mut` // and not just `PeekMut::deref` // If we don't take `&mut self` async fn load(&mut self) -> anyhow::Result<()> { assert!(!self.is_loaded()); let Self::NotLoaded { ctx, first_key_lower_bound, layer, source_desc, max_read_size, max_batch_size, } = self else { unreachable!() }; let iter = layer.iter_with_options(ctx, *max_read_size, *max_batch_size); let iter = PeekableLayerIterRef::create(iter).await?; if let Some((k1, l1, _)) = iter.peek() { let (k2, l2) = first_key_lower_bound; if (k1, l1) < (k2, l2) { bail!( "layer key range did not include the first key in the layer: {}", layer.layer_dbg_info() ); } } *self = Self::Loaded { iter, source_desc: source_desc.clone(), }; Ok(()) } fn is_loaded(&self) -> bool { matches!(self, Self::Loaded { .. }) } /// Correctness: must load the iterator before using. /// /// Given this iterator wrapper is private to the merge iterator, users won't be able to mis-use it. /// The public interfaces to use are [`crate::tenant::storage_layer::delta_layer::DeltaLayerIterator`] and /// [`crate::tenant::storage_layer::image_layer::ImageLayerIterator`]. async fn next(&mut self) -> anyhow::Result<Option<(Key, Lsn, Value)>> { let Self::Loaded { iter, .. } = self else { panic!("must load the iterator before using") }; iter.next().await } /// Get the persistent layer key corresponding to this iterator fn trace_source(&self) -> Arc<PersistentLayerKey> { match self { Self::Loaded { source_desc, .. } => source_desc.clone(), Self::NotLoaded { source_desc, .. } => source_desc.clone(), } } } /// A merge iterator over delta/image layer iterators. /// /// When duplicated records are found, the iterator will not perform any /// deduplication, and the caller should handle these situation. By saying /// duplicated records, there are many possibilities: /// /// * Two same delta at the same LSN. /// * Two same image at the same LSN. /// * Delta/image at the same LSN where the image has already applied the delta. /// /// The iterator will always put the image before the delta. pub struct MergeIterator<'a> { heap: BinaryHeap<IteratorWrapper<'a>>, } pub(crate) trait MergeIteratorItem { fn new(item: (Key, Lsn, Value), iterator: &IteratorWrapper<'_>) -> Self; fn key_lsn_value(&self) -> &(Key, Lsn, Value); } impl MergeIteratorItem for (Key, Lsn, Value) { fn new(item: (Key, Lsn, Value), _: &IteratorWrapper<'_>) -> Self { item } fn key_lsn_value(&self) -> &(Key, Lsn, Value) { self } } impl MergeIteratorItem for ((Key, Lsn, Value), Arc<PersistentLayerKey>) { fn new(item: (Key, Lsn, Value), iter: &IteratorWrapper<'_>) -> Self { (item, iter.trace_source().clone()) } fn key_lsn_value(&self) -> &(Key, Lsn, Value) { &self.0 } } impl<'a> MergeIterator<'a> { #[cfg(test)] pub(crate) fn create_for_testing( deltas: &[&'a DeltaLayerInner], images: &[&'a ImageLayerInner], ctx: &'a RequestContext, ) -> Self { Self::create_with_options(deltas, images, ctx, 1024 * 8192, 1024) } /// Create a new merge iterator with custom options. /// /// Adjust `max_read_size` and `max_batch_size` to trade memory usage for performance. The size should scale /// with the number of layers to compact. If there are a lot of layers, consider reducing the values, so that /// the buffer does not take too much memory. /// /// The default options for L0 compactions are: /// - max_read_size: 1024 * 8192 (8MB) /// - max_batch_size: 1024 /// /// The default options for gc-compaction are: /// - max_read_size: 128 * 8192 (1MB) /// - max_batch_size: 128 pub fn create_with_options( deltas: &[&'a DeltaLayerInner], images: &[&'a ImageLayerInner], ctx: &'a RequestContext, max_read_size: u64, max_batch_size: usize, ) -> Self { let mut heap = Vec::with_capacity(images.len() + deltas.len()); for image in images { heap.push(IteratorWrapper::create_from_image_layer( image, ctx, max_read_size, max_batch_size, )); } for delta in deltas { heap.push(IteratorWrapper::create_from_delta_layer( delta, ctx, max_read_size, max_batch_size, )); } Self { heap: BinaryHeap::from(heap), } } pub(crate) async fn next_inner<R: MergeIteratorItem>(&mut self) -> anyhow::Result<Option<R>> { while let Some(mut iter) = self.heap.peek_mut() { if !iter.is_loaded() { // Once we load the iterator, we can know the real first key-value pair in the iterator. // We put it back into the heap so that a potentially unloaded layer may have a key between // [potential_first_key, loaded_first_key). iter.load().await?; continue; } let Some(item) = iter.next().await? else { // If the iterator returns None, we pop this iterator. Actually, in the current implementation, // we order None > Some, and all the rest of the iterators should return None. binary_heap::PeekMut::pop(iter); continue; }; return Ok(Some(R::new(item, &iter))); } Ok(None) } /// Get the next key-value pair from the iterator. pub async fn next(&mut self) -> anyhow::Result<Option<(Key, Lsn, Value)>> { self.next_inner().await } /// Get the next key-value pair from the iterator, and trace where the key comes from. pub async fn next_with_trace( &mut self, ) -> anyhow::Result<Option<((Key, Lsn, Value), Arc<PersistentLayerKey>)>> { self.next_inner().await } } #[cfg(test)] mod tests { use itertools::Itertools; use pageserver_api::key::Key; use utils::lsn::Lsn; #[cfg(feature = "testing")] use wal_decoder::models::record::NeonWalRecord; use super::*; use crate::DEFAULT_PG_VERSION; use crate::tenant::harness::{TIMELINE_ID, TenantHarness}; #[cfg(feature = "testing")] use crate::tenant::storage_layer::delta_layer::test::sort_delta_value; use crate::tenant::storage_layer::delta_layer::test::{produce_delta_layer, sort_delta}; async fn assert_merge_iter_equal( merge_iter: &mut MergeIterator<'_>, expect: &[(Key, Lsn, Value)], ) { let mut expect_iter = expect.iter(); loop { let o1 = merge_iter.next().await.unwrap(); let o2 = expect_iter.next(); assert_eq!(o1.is_some(), o2.is_some()); if o1.is_none() && o2.is_none() { break; } let (k1, l1, v1) = o1.unwrap(); let (k2, l2, v2) = o2.unwrap(); assert_eq!(&k1, k2); assert_eq!(l1, *l2); assert_eq!(&v1, v2); } } #[tokio::test] async fn merge_in_between() { use bytes::Bytes; let harness = TenantHarness::create("merge_iterator_merge_in_between") .await .unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); fn get_key(id: u32) -> Key { let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); key.field6 = id; key } let test_deltas1 = vec![ ( get_key(0), Lsn(0x10), Value::Image(Bytes::copy_from_slice(b"test")), ), ( get_key(5), Lsn(0x10), Value::Image(Bytes::copy_from_slice(b"test")), ), ]; let resident_layer_1 = produce_delta_layer(&tenant, &tline, test_deltas1.clone(), &ctx) .await .unwrap(); let test_deltas2 = vec![ ( get_key(3), Lsn(0x10), Value::Image(Bytes::copy_from_slice(b"test")), ), ( get_key(4), Lsn(0x10), Value::Image(Bytes::copy_from_slice(b"test")), ), ]; let resident_layer_2 = produce_delta_layer(&tenant, &tline, test_deltas2.clone(), &ctx) .await .unwrap(); let mut merge_iter = MergeIterator::create_for_testing( &[ resident_layer_2.get_as_delta(&ctx).await.unwrap(), resident_layer_1.get_as_delta(&ctx).await.unwrap(), ], &[], &ctx, ); let mut expect = Vec::new(); expect.extend(test_deltas1); expect.extend(test_deltas2); expect.sort_by(sort_delta); assert_merge_iter_equal(&mut merge_iter, &expect).await; } #[tokio::test] async fn delta_merge() { use bytes::Bytes; let harness = TenantHarness::create("merge_iterator_delta_merge") .await .unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); fn get_key(id: u32) -> Key { let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); key.field6 = id; key } const N: usize = 1000; let test_deltas1 = (0..N) .map(|idx| { ( get_key(idx as u32 / 10), Lsn(0x20 * ((idx as u64) % 10 + 1)), Value::Image(Bytes::from(format!("img{idx:05}"))), ) }) .collect_vec(); let resident_layer_1 = produce_delta_layer(&tenant, &tline, test_deltas1.clone(), &ctx) .await .unwrap(); let test_deltas2 = (0..N) .map(|idx| { ( get_key(idx as u32 / 10), Lsn(0x20 * ((idx as u64) % 10 + 1) + 0x10), Value::Image(Bytes::from(format!("img{idx:05}"))), ) }) .collect_vec(); let resident_layer_2 = produce_delta_layer(&tenant, &tline, test_deltas2.clone(), &ctx) .await .unwrap(); let test_deltas3 = (0..N) .map(|idx| { ( get_key(idx as u32 / 10 + N as u32), Lsn(0x10 * ((idx as u64) % 10 + 1)), Value::Image(Bytes::from(format!("img{idx:05}"))), ) }) .collect_vec(); let resident_layer_3 = produce_delta_layer(&tenant, &tline, test_deltas3.clone(), &ctx) .await .unwrap(); let mut merge_iter = MergeIterator::create_for_testing( &[ resident_layer_1.get_as_delta(&ctx).await.unwrap(), resident_layer_2.get_as_delta(&ctx).await.unwrap(), resident_layer_3.get_as_delta(&ctx).await.unwrap(), ], &[], &ctx, ); let mut expect = Vec::new(); expect.extend(test_deltas1); expect.extend(test_deltas2); expect.extend(test_deltas3); expect.sort_by(sort_delta); assert_merge_iter_equal(&mut merge_iter, &expect).await; // TODO: test layers are loaded only when needed, reducing num of active iterators in k-merge } #[cfg(feature = "testing")] #[tokio::test] async fn delta_image_mixed_merge() { use bytes::Bytes; let harness = TenantHarness::create("merge_iterator_delta_image_mixed_merge") .await .unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); fn get_key(id: u32) -> Key { let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); key.field6 = id; key } // In this test case, we want to test if the iterator still works correctly with multiple copies // of a delta+image at the same LSN, for example, the following sequence a@10=+a, a@10=+a, a@10=ab, a@10=ab. // Duplicated deltas/images are possible for old tenants before the full L0 compaction file name fix. // An incomplete compaction could produce multiple exactly-the-same delta layers. Force image generation // could produce overlapping images. Apart from duplicated deltas/images, in the current storage implementation // one key-lsn could have a delta in the delta layer and one image in the image layer. The iterator should // correctly process these situations and return everything as-is, and the upper layer of the system // will handle duplicated LSNs. let test_deltas1 = vec![ ( get_key(0), Lsn(0x10), Value::WalRecord(NeonWalRecord::wal_init("")), ), ( get_key(0), Lsn(0x18), Value::WalRecord(NeonWalRecord::wal_append("a")), ), ( get_key(5), Lsn(0x10), Value::WalRecord(NeonWalRecord::wal_init("")), ), ( get_key(5), Lsn(0x18), Value::WalRecord(NeonWalRecord::wal_append("b")), ), ]; let resident_layer_1 = produce_delta_layer(&tenant, &tline, test_deltas1.clone(), &ctx) .await .unwrap(); let mut test_deltas2 = test_deltas1.clone(); test_deltas2.push(( get_key(10), Lsn(0x20), Value::Image(Bytes::copy_from_slice(b"test")), )); let resident_layer_2 = produce_delta_layer(&tenant, &tline, test_deltas2.clone(), &ctx) .await .unwrap(); let test_deltas3 = vec![ ( get_key(0), Lsn(0x10), Value::Image(Bytes::copy_from_slice(b"")), ), ( get_key(5), Lsn(0x18), Value::Image(Bytes::copy_from_slice(b"b")), ), ( get_key(15), Lsn(0x20), Value::Image(Bytes::copy_from_slice(b"test")), ), ]; let resident_layer_3 = produce_delta_layer(&tenant, &tline, test_deltas3.clone(), &ctx) .await .unwrap(); let mut test_deltas4 = test_deltas3.clone(); test_deltas4.push(( get_key(20), Lsn(0x20), Value::Image(Bytes::copy_from_slice(b"test")), )); let resident_layer_4 = produce_delta_layer(&tenant, &tline, test_deltas4.clone(), &ctx) .await .unwrap(); let mut expect = Vec::new(); expect.extend(test_deltas1); expect.extend(test_deltas2); expect.extend(test_deltas3); expect.extend(test_deltas4); expect.sort_by(sort_delta_value); // Test with different layer order for MergeIterator::create to ensure the order // is stable. let mut merge_iter = MergeIterator::create_for_testing( &[ resident_layer_4.get_as_delta(&ctx).await.unwrap(), resident_layer_1.get_as_delta(&ctx).await.unwrap(), resident_layer_3.get_as_delta(&ctx).await.unwrap(), resident_layer_2.get_as_delta(&ctx).await.unwrap(), ], &[], &ctx, ); assert_merge_iter_equal(&mut merge_iter, &expect).await; let mut merge_iter = MergeIterator::create_for_testing( &[ resident_layer_1.get_as_delta(&ctx).await.unwrap(), resident_layer_4.get_as_delta(&ctx).await.unwrap(), resident_layer_3.get_as_delta(&ctx).await.unwrap(), resident_layer_2.get_as_delta(&ctx).await.unwrap(), ], &[], &ctx, ); assert_merge_iter_equal(&mut merge_iter, &expect).await; is_send(merge_iter); } #[cfg(feature = "testing")] fn is_send(_: impl Send) {} }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/batch_split_writer.rs
pageserver/src/tenant/storage_layer/batch_split_writer.rs
use std::future::Future; use std::ops::Range; use std::sync::Arc; use bytes::Bytes; use pageserver_api::key::{KEY_SIZE, Key}; use tokio_util::sync::CancellationToken; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::shard::TenantShardId; use wal_decoder::models::value::Value; use super::errors::PutError; use super::layer::S3_UPLOAD_LIMIT; use super::{ DeltaLayerWriter, ImageLayerWriter, PersistentLayerDesc, PersistentLayerKey, ResidentLayer, }; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::tenant::Timeline; use crate::tenant::storage_layer::Layer; pub(crate) enum BatchWriterResult { Produced(ResidentLayer), Discarded(PersistentLayerKey), } #[cfg(test)] impl BatchWriterResult { fn into_resident_layer(self) -> ResidentLayer { match self { BatchWriterResult::Produced(layer) => layer, BatchWriterResult::Discarded(_) => panic!("unexpected discarded layer"), } } fn into_discarded_layer(self) -> PersistentLayerKey { match self { BatchWriterResult::Produced(_) => panic!("unexpected produced layer"), BatchWriterResult::Discarded(layer) => layer, } } } enum LayerWriterWrapper { Image(ImageLayerWriter), Delta(DeltaLayerWriter), } /// An layer writer that takes unfinished layers and finish them atomically. #[must_use] pub struct BatchLayerWriter { generated_layer_writers: Vec<(LayerWriterWrapper, PersistentLayerKey)>, conf: &'static PageServerConf, } impl BatchLayerWriter { pub fn new(conf: &'static PageServerConf) -> Self { Self { generated_layer_writers: Vec::new(), conf, } } pub fn add_unfinished_image_writer( &mut self, writer: ImageLayerWriter, key_range: Range<Key>, lsn: Lsn, ) { self.generated_layer_writers.push(( LayerWriterWrapper::Image(writer), PersistentLayerKey { key_range, lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn), is_delta: false, }, )); } pub fn add_unfinished_delta_writer( &mut self, writer: DeltaLayerWriter, key_range: Range<Key>, lsn_range: Range<Lsn>, ) { self.generated_layer_writers.push(( LayerWriterWrapper::Delta(writer), PersistentLayerKey { key_range, lsn_range, is_delta: true, }, )); } pub(crate) async fn finish( self, tline: &Arc<Timeline>, ctx: &RequestContext, ) -> anyhow::Result<Vec<ResidentLayer>> { let res = self .finish_with_discard_fn(tline, ctx, |_| async { false }) .await?; let mut output = Vec::new(); for r in res { if let BatchWriterResult::Produced(layer) = r { output.push(layer); } } Ok(output) } pub(crate) async fn finish_with_discard_fn<D, F>( self, tline: &Arc<Timeline>, ctx: &RequestContext, discard_fn: D, ) -> anyhow::Result<Vec<BatchWriterResult>> where D: Fn(&PersistentLayerKey) -> F, F: Future<Output = bool>, { let Self { generated_layer_writers, .. } = self; let clean_up_layers = |generated_layers: Vec<BatchWriterResult>| { for produced_layer in generated_layers { if let BatchWriterResult::Produced(resident_layer) = produced_layer { let layer: Layer = resident_layer.into(); layer.delete_on_drop(); } } }; // BEGIN: catch every error and do the recovery in the below section let mut generated_layers: Vec<BatchWriterResult> = Vec::new(); for (inner, layer_key) in generated_layer_writers { if discard_fn(&layer_key).await { generated_layers.push(BatchWriterResult::Discarded(layer_key)); } else { let res = match inner { LayerWriterWrapper::Delta(writer) => { writer.finish(layer_key.key_range.end, ctx).await } LayerWriterWrapper::Image(writer) => { writer .finish_with_end_key(layer_key.key_range.end, ctx) .await } }; let layer = match res { Ok((desc, path)) => { match Layer::finish_creating(self.conf, tline, desc, &path) { Ok(layer) => layer, Err(e) => { tokio::fs::remove_file(&path).await.ok(); clean_up_layers(generated_layers); return Err(e); } } } Err(e) => { // Image/DeltaLayerWriter::finish will clean up the temporary layer if anything goes wrong, // so we don't need to remove the layer we just failed to create by ourselves. clean_up_layers(generated_layers); return Err(e); } }; generated_layers.push(BatchWriterResult::Produced(layer)); } } // END: catch every error and do the recovery in the above section Ok(generated_layers) } pub fn pending_layer_num(&self) -> usize { self.generated_layer_writers.len() } } /// An image writer that takes images and produces multiple image layers. #[must_use] pub struct SplitImageLayerWriter<'a> { inner: Option<ImageLayerWriter>, target_layer_size: u64, lsn: Lsn, conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, batches: BatchLayerWriter, start_key: Key, gate: &'a utils::sync::gate::Gate, cancel: CancellationToken, } impl<'a> SplitImageLayerWriter<'a> { #[allow(clippy::too_many_arguments)] pub fn new( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, start_key: Key, lsn: Lsn, target_layer_size: u64, gate: &'a utils::sync::gate::Gate, cancel: CancellationToken, ) -> Self { Self { target_layer_size, inner: None, conf, timeline_id, tenant_shard_id, batches: BatchLayerWriter::new(conf), lsn, start_key, gate, cancel, } } pub async fn put_image( &mut self, key: Key, img: Bytes, ctx: &RequestContext, ) -> Result<(), PutError> { if self.inner.is_none() { self.inner = Some( ImageLayerWriter::new( self.conf, self.timeline_id, self.tenant_shard_id, &(self.start_key..Key::MAX), self.lsn, self.gate, self.cancel.clone(), ctx, ) .await .map_err(PutError::Other)?, ); } let inner = self.inner.as_mut().unwrap(); // The current estimation is an upper bound of the space that the key/image could take // because we did not consider compression in this estimation. The resulting image layer // could be smaller than the target size. let addition_size_estimation = KEY_SIZE as u64 + img.len() as u64; if inner.num_keys() >= 1 && inner.estimated_size() + addition_size_estimation >= self.target_layer_size { let next_image_writer = ImageLayerWriter::new( self.conf, self.timeline_id, self.tenant_shard_id, &(key..Key::MAX), self.lsn, self.gate, self.cancel.clone(), ctx, ) .await .map_err(PutError::Other)?; let prev_image_writer = std::mem::replace(inner, next_image_writer); self.batches.add_unfinished_image_writer( prev_image_writer, self.start_key..key, self.lsn, ); self.start_key = key; } inner.put_image(key, img, ctx).await } pub(crate) async fn finish_with_discard_fn<D, F>( self, tline: &Arc<Timeline>, ctx: &RequestContext, end_key: Key, discard_fn: D, ) -> anyhow::Result<Vec<BatchWriterResult>> where D: Fn(&PersistentLayerKey) -> F, F: Future<Output = bool>, { let Self { mut batches, inner, .. } = self; if let Some(inner) = inner { if inner.num_keys() != 0 { batches.add_unfinished_image_writer(inner, self.start_key..end_key, self.lsn); } } batches.finish_with_discard_fn(tline, ctx, discard_fn).await } #[cfg(test)] pub(crate) async fn finish( self, tline: &Arc<Timeline>, ctx: &RequestContext, end_key: Key, ) -> anyhow::Result<Vec<BatchWriterResult>> { self.finish_with_discard_fn(tline, ctx, end_key, |_| async { false }) .await } } /// A delta writer that takes key-lsn-values and produces multiple delta layers. /// /// Note that if updates of a single key exceed the target size limit, all of the updates will be batched /// into a single file. This behavior might change in the future. For reference, the legacy compaction algorithm /// will split them into multiple files based on size. #[must_use] pub struct SplitDeltaLayerWriter<'a> { inner: Option<(Key, DeltaLayerWriter)>, target_layer_size: u64, conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, lsn_range: Range<Lsn>, last_key_written: Key, batches: BatchLayerWriter, gate: &'a utils::sync::gate::Gate, cancel: CancellationToken, } impl<'a> SplitDeltaLayerWriter<'a> { pub fn new( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, lsn_range: Range<Lsn>, target_layer_size: u64, gate: &'a utils::sync::gate::Gate, cancel: CancellationToken, ) -> Self { Self { target_layer_size, inner: None, conf, timeline_id, tenant_shard_id, lsn_range, last_key_written: Key::MIN, batches: BatchLayerWriter::new(conf), gate, cancel, } } pub async fn put_value( &mut self, key: Key, lsn: Lsn, val: Value, ctx: &RequestContext, ) -> Result<(), PutError> { // The current estimation is key size plus LSN size plus value size estimation. This is not an accurate // number, and therefore the final layer size could be a little bit larger or smaller than the target. // // Also, keep all updates of a single key in a single file. TODO: split them using the legacy compaction // strategy. https://github.com/neondatabase/neon/issues/8837 if self.inner.is_none() { self.inner = Some(( key, DeltaLayerWriter::new( self.conf, self.timeline_id, self.tenant_shard_id, key, self.lsn_range.clone(), self.gate, self.cancel.clone(), ctx, ) .await .map_err(PutError::Other)?, )); } let (_, inner) = self.inner.as_mut().unwrap(); let addition_size_estimation = KEY_SIZE as u64 + 8 /* LSN u64 size */ + 80 /* value size estimation */; if inner.num_keys() >= 1 && inner.estimated_size() + addition_size_estimation >= self.target_layer_size { if key != self.last_key_written { let next_delta_writer = DeltaLayerWriter::new( self.conf, self.timeline_id, self.tenant_shard_id, key, self.lsn_range.clone(), self.gate, self.cancel.clone(), ctx, ) .await .map_err(PutError::Other)?; let (start_key, prev_delta_writer) = self.inner.replace((key, next_delta_writer)).unwrap(); self.batches.add_unfinished_delta_writer( prev_delta_writer, start_key..key, self.lsn_range.clone(), ); } else if inner.estimated_size() >= S3_UPLOAD_LIMIT { // We have to produce a very large file b/c a key is updated too often. return Err(PutError::Other(anyhow::anyhow!( "a single key is updated too often: key={}, estimated_size={}, and the layer file cannot be produced", key, inner.estimated_size() ))); } } self.last_key_written = key; let (_, inner) = self.inner.as_mut().unwrap(); inner.put_value(key, lsn, val, ctx).await } pub(crate) async fn finish_with_discard_fn<D, F>( self, tline: &Arc<Timeline>, ctx: &RequestContext, discard_fn: D, ) -> anyhow::Result<Vec<BatchWriterResult>> where D: Fn(&PersistentLayerKey) -> F, F: Future<Output = bool>, { let Self { mut batches, inner, .. } = self; if let Some((start_key, writer)) = inner { if writer.num_keys() != 0 { let end_key = self.last_key_written.next(); batches.add_unfinished_delta_writer( writer, start_key..end_key, self.lsn_range.clone(), ); } } batches.finish_with_discard_fn(tline, ctx, discard_fn).await } #[cfg(test)] pub(crate) async fn finish( self, tline: &Arc<Timeline>, ctx: &RequestContext, ) -> anyhow::Result<Vec<BatchWriterResult>> { self.finish_with_discard_fn(tline, ctx, |_| async { false }) .await } } #[cfg(test)] mod tests { use itertools::Itertools; use rand::{RngCore, SeedableRng}; use super::*; use crate::DEFAULT_PG_VERSION; use crate::tenant::harness::{TIMELINE_ID, TenantHarness}; use crate::tenant::storage_layer::AsLayerDesc; fn get_key(id: u32) -> Key { let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); key.field6 = id; key } fn get_img(id: u32) -> Bytes { format!("{id:064}").into() } fn get_large_img() -> Bytes { let mut rng = rand::rngs::SmallRng::seed_from_u64(42); let mut data = vec![0; 8192]; rng.fill_bytes(&mut data); data.into() } #[tokio::test] async fn write_one_image() { let harness = TenantHarness::create("split_writer_write_one_image") .await .unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); let mut image_writer = SplitImageLayerWriter::new( tenant.conf, tline.timeline_id, tenant.tenant_shard_id, get_key(0), Lsn(0x18), 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), ); let mut delta_writer = SplitDeltaLayerWriter::new( tenant.conf, tline.timeline_id, tenant.tenant_shard_id, Lsn(0x18)..Lsn(0x20), 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), ); image_writer .put_image(get_key(0), get_img(0), &ctx) .await .unwrap(); let layers = image_writer .finish(&tline, &ctx, get_key(10)) .await .unwrap(); assert_eq!(layers.len(), 1); delta_writer .put_value(get_key(0), Lsn(0x18), Value::Image(get_img(0)), &ctx) .await .unwrap(); let layers = delta_writer.finish(&tline, &ctx).await.unwrap(); assert_eq!(layers.len(), 1); assert_eq!( layers .into_iter() .next() .unwrap() .into_resident_layer() .layer_desc() .key(), PersistentLayerKey { key_range: get_key(0)..get_key(1), lsn_range: Lsn(0x18)..Lsn(0x20), is_delta: true } ); } #[tokio::test] async fn write_split() { // Test the split writer with retaining all the layers we have produced (discard=false) write_split_helper("split_writer_write_split", false).await; } #[tokio::test] async fn write_split_discard() { // Test the split writer with discarding all the layers we have produced (discard=true) write_split_helper("split_writer_write_split_discard", true).await; } /// Test the image+delta writer by writing a large number of images and deltas. If discard is /// set to true, all layers will be discarded. async fn write_split_helper(harness_name: &'static str, discard: bool) { let harness = TenantHarness::create(harness_name).await.unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); let mut image_writer = SplitImageLayerWriter::new( tenant.conf, tline.timeline_id, tenant.tenant_shard_id, get_key(0), Lsn(0x18), 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), ); let mut delta_writer = SplitDeltaLayerWriter::new( tenant.conf, tline.timeline_id, tenant.tenant_shard_id, Lsn(0x18)..Lsn(0x20), 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), ); const N: usize = 2000; for i in 0..N { let i = i as u32; image_writer .put_image(get_key(i), get_large_img(), &ctx) .await .unwrap(); delta_writer .put_value(get_key(i), Lsn(0x20), Value::Image(get_large_img()), &ctx) .await .unwrap(); } let image_layers = image_writer .finish_with_discard_fn(&tline, &ctx, get_key(N as u32), |_| async { discard }) .await .unwrap(); let delta_layers = delta_writer .finish_with_discard_fn(&tline, &ctx, |_| async { discard }) .await .unwrap(); let image_layers = image_layers .into_iter() .map(|x| { if discard { x.into_discarded_layer() } else { x.into_resident_layer().layer_desc().key() } }) .collect_vec(); let delta_layers = delta_layers .into_iter() .map(|x| { if discard { x.into_discarded_layer() } else { x.into_resident_layer().layer_desc().key() } }) .collect_vec(); assert_eq!(image_layers.len(), N / 512 + 1); assert_eq!(delta_layers.len(), N / 512 + 1); assert_eq!(delta_layers.first().unwrap().key_range.start, get_key(0)); assert_eq!( delta_layers.last().unwrap().key_range.end, get_key(N as u32) ); for idx in 0..image_layers.len() { assert_ne!(image_layers[idx].key_range.start, Key::MIN); assert_ne!(image_layers[idx].key_range.end, Key::MAX); assert_ne!(delta_layers[idx].key_range.start, Key::MIN); assert_ne!(delta_layers[idx].key_range.end, Key::MAX); if idx > 0 { assert_eq!( image_layers[idx - 1].key_range.end, image_layers[idx].key_range.start ); assert_eq!( delta_layers[idx - 1].key_range.end, delta_layers[idx].key_range.start ); } } } #[tokio::test] async fn write_large_img() { let harness = TenantHarness::create("split_writer_write_large_img") .await .unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); let mut image_writer = SplitImageLayerWriter::new( tenant.conf, tline.timeline_id, tenant.tenant_shard_id, get_key(0), Lsn(0x18), 4 * 1024, &tline.gate, tline.cancel.clone(), ); let mut delta_writer = SplitDeltaLayerWriter::new( tenant.conf, tline.timeline_id, tenant.tenant_shard_id, Lsn(0x18)..Lsn(0x20), 4 * 1024, &tline.gate, tline.cancel.clone(), ); image_writer .put_image(get_key(0), get_img(0), &ctx) .await .unwrap(); image_writer .put_image(get_key(1), get_large_img(), &ctx) .await .unwrap(); let layers = image_writer .finish(&tline, &ctx, get_key(10)) .await .unwrap(); assert_eq!(layers.len(), 2); delta_writer .put_value(get_key(0), Lsn(0x18), Value::Image(get_img(0)), &ctx) .await .unwrap(); delta_writer .put_value(get_key(1), Lsn(0x1A), Value::Image(get_large_img()), &ctx) .await .unwrap(); let layers = delta_writer.finish(&tline, &ctx).await.unwrap(); assert_eq!(layers.len(), 2); let mut layers_iter = layers.into_iter(); assert_eq!( layers_iter .next() .unwrap() .into_resident_layer() .layer_desc() .key(), PersistentLayerKey { key_range: get_key(0)..get_key(1), lsn_range: Lsn(0x18)..Lsn(0x20), is_delta: true } ); assert_eq!( layers_iter .next() .unwrap() .into_resident_layer() .layer_desc() .key(), PersistentLayerKey { key_range: get_key(1)..get_key(2), lsn_range: Lsn(0x18)..Lsn(0x20), is_delta: true } ); } #[tokio::test] async fn write_split_single_key() { let harness = TenantHarness::create("split_writer_write_split_single_key") .await .unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); const N: usize = 2000; let mut delta_writer = SplitDeltaLayerWriter::new( tenant.conf, tline.timeline_id, tenant.tenant_shard_id, Lsn(0x10)..Lsn(N as u64 * 16 + 0x10), 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), ); for i in 0..N { let i = i as u32; delta_writer .put_value( get_key(0), Lsn(i as u64 * 16 + 0x10), Value::Image(get_large_img()), &ctx, ) .await .unwrap(); } let delta_layers = delta_writer.finish(&tline, &ctx).await.unwrap(); assert_eq!(delta_layers.len(), 1); let delta_layer = delta_layers .into_iter() .next() .unwrap() .into_resident_layer(); assert_eq!( delta_layer.layer_desc().key(), PersistentLayerKey { key_range: get_key(0)..get_key(1), lsn_range: Lsn(0x10)..Lsn(N as u64 * 16 + 0x10), is_delta: true } ); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/layer.rs
pageserver/src/tenant/storage_layer/layer.rs
use std::ops::Range; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; use std::time::{Duration, SystemTime}; use crate::PERF_TRACE_TARGET; use crate::metrics::{ONDEMAND_DOWNLOAD_BYTES, ONDEMAND_DOWNLOAD_COUNT}; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::HistoricLayerInfo; use pageserver_api::shard::{ShardIdentity, ShardIndex, TenantShardId}; use tracing::{Instrument, info_span}; use utils::generation::Generation; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::sync::{gate, heavier_once_cell}; use super::delta_layer::{self}; use super::image_layer::{self}; use super::{ AsLayerDesc, ImageLayerWriter, LayerAccessStats, LayerAccessStatsReset, LayerName, LayerVisibilityHint, PerfInstrumentFutureExt, PersistentLayerDesc, ValuesReconstructState, }; use crate::config::PageServerConf; use crate::context::{RequestContext, RequestContextBuilder}; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::task_mgr::TaskKind; use crate::tenant::Timeline; use crate::tenant::remote_timeline_client::LayerFileMetadata; use crate::tenant::timeline::{CompactionError, GetVectoredError}; #[cfg(test)] mod tests; #[cfg(test)] mod failpoints; pub const S3_UPLOAD_LIMIT: u64 = 4_500_000_000; /// A Layer contains all data in a "rectangle" consisting of a range of keys and /// range of LSNs. /// /// There are two kinds of layers, in-memory and on-disk layers. In-memory /// layers are used to ingest incoming WAL, and provide fast access to the /// recent page versions. On-disk layers are stored as files on disk, and are /// immutable. This type represents the on-disk kind while in-memory kind are represented by /// [`InMemoryLayer`]. /// /// Furthermore, there are two kinds of on-disk layers: delta and image layers. /// A delta layer contains all modifications within a range of LSNs and keys. /// An image layer is a snapshot of all the data in a key-range, at a single /// LSN. /// /// This type models the on-disk layers, which can be evicted and on-demand downloaded. As a /// general goal, read accesses should always win eviction and eviction should not wait for /// download. /// /// ### State transitions /// /// The internal state of `Layer` is composed of most importantly the on-filesystem state and the /// [`ResidentOrWantedEvicted`] enum. On-filesystem state can be either present (fully downloaded, /// right size) or deleted. /// /// Reads will always win requests to evict until `wait_for_turn_and_evict` has acquired the /// `heavier_once_cell::InitPermit` and has started to `evict_blocking`. Before the /// `heavier_once_cell::InitPermit` has been acquired, any read request /// (`get_or_maybe_download`) can "re-initialize" using the existing downloaded file and thus /// cancelling the eviction. /// /// ```text /// +-----------------+ get_or_maybe_download +--------------------------------+ /// | not initialized |--------------------------->| Resident(Arc<DownloadedLayer>) | /// | ENOENT | /->| | /// +-----------------+ | +--------------------------------+ /// ^ | | ^ /// | get_or_maybe_download | | | get_or_maybe_download, either: /// evict_blocking | /-------------------------/ | | - upgrade weak to strong /// | | | | - re-initialize without download /// | | evict_and_wait | | /// +-----------------+ v | /// | not initialized | on_downloaded_layer_drop +--------------------------------------+ /// | file is present |<---------------------------| WantedEvicted(Weak<DownloadedLayer>) | /// +-----------------+ +--------------------------------------+ /// ``` /// /// ### Unsupported /// /// - Evicting by the operator deleting files from the filesystem /// /// [`InMemoryLayer`]: super::inmemory_layer::InMemoryLayer #[derive(Clone)] pub(crate) struct Layer(Arc<LayerInner>); impl std::fmt::Display for Layer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "{}{}", self.layer_desc().short_id(), self.0.generation.get_suffix() ) } } impl std::fmt::Debug for Layer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{self}") } } impl AsLayerDesc for Layer { fn layer_desc(&self) -> &PersistentLayerDesc { self.0.layer_desc() } } impl PartialEq for Layer { fn eq(&self, other: &Self) -> bool { Arc::as_ptr(&self.0) == Arc::as_ptr(&other.0) } } pub(crate) fn local_layer_path( conf: &PageServerConf, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, layer_file_name: &LayerName, generation: &Generation, ) -> Utf8PathBuf { let timeline_path = conf.timeline_path(tenant_shard_id, timeline_id); if generation.is_none() { // Without a generation, we may only use legacy path style timeline_path.join(layer_file_name.to_string()) } else { timeline_path.join(format!("{}-v1{}", layer_file_name, generation.get_suffix())) } } pub(crate) enum LastEviction { Never, At(std::time::Instant), Evicting, } impl LastEviction { pub(crate) fn happened_after(&self, timepoint: std::time::Instant) -> bool { match self { LastEviction::Never => false, LastEviction::At(evicted_at) => evicted_at > &timepoint, LastEviction::Evicting => true, } } } impl Layer { /// Creates a layer value for a file we know to not be resident. pub(crate) fn for_evicted( conf: &'static PageServerConf, timeline: &Arc<Timeline>, file_name: LayerName, metadata: LayerFileMetadata, ) -> Self { let local_path = local_layer_path( conf, &timeline.tenant_shard_id, &timeline.timeline_id, &file_name, &metadata.generation, ); let desc = PersistentLayerDesc::from_filename( timeline.tenant_shard_id, timeline.timeline_id, file_name, metadata.file_size, ); let owner = Layer(Arc::new(LayerInner::new( conf, timeline, local_path, desc, None, metadata.generation, metadata.shard, ))); debug_assert!(owner.0.needs_download_blocking().unwrap().is_some()); owner } /// Creates a Layer value for a file we know to be resident in timeline directory. pub(crate) fn for_resident( conf: &'static PageServerConf, timeline: &Arc<Timeline>, local_path: Utf8PathBuf, file_name: LayerName, metadata: LayerFileMetadata, ) -> ResidentLayer { let desc = PersistentLayerDesc::from_filename( timeline.tenant_shard_id, timeline.timeline_id, file_name, metadata.file_size, ); let mut resident = None; let owner = Layer(Arc::new_cyclic(|owner| { let inner = Arc::new(DownloadedLayer { owner: owner.clone(), kind: tokio::sync::OnceCell::default(), version: 0, }); resident = Some(inner.clone()); LayerInner::new( conf, timeline, local_path, desc, Some(inner), metadata.generation, metadata.shard, ) })); let downloaded = resident.expect("just initialized"); debug_assert!(owner.0.needs_download_blocking().unwrap().is_none()); timeline .metrics .resident_physical_size_add(metadata.file_size); ResidentLayer { downloaded, owner } } /// Creates a Layer value for freshly written out new layer file by renaming it from a /// temporary path. pub(crate) fn finish_creating( conf: &'static PageServerConf, timeline: &Arc<Timeline>, desc: PersistentLayerDesc, temp_path: &Utf8Path, ) -> anyhow::Result<ResidentLayer> { let mut resident = None; let owner = Layer(Arc::new_cyclic(|owner| { let inner = Arc::new(DownloadedLayer { owner: owner.clone(), kind: tokio::sync::OnceCell::default(), version: 0, }); resident = Some(inner.clone()); let local_path = local_layer_path( conf, &timeline.tenant_shard_id, &timeline.timeline_id, &desc.layer_name(), &timeline.generation, ); LayerInner::new( conf, timeline, local_path, desc, Some(inner), timeline.generation, timeline.get_shard_index(), ) })); let downloaded = resident.expect("just initialized"); // We never want to overwrite an existing file, so we use `RENAME_NOREPLACE`. // TODO: this leaves the temp file in place if the rename fails, risking us running // out of space. Should we clean it up here or does the calling context deal with this? utils::fs_ext::rename_noreplace(temp_path.as_std_path(), owner.local_path().as_std_path()) .with_context(|| format!("rename temporary file as correct path for {owner}"))?; Ok(ResidentLayer { downloaded, owner }) } /// Requests the layer to be evicted and waits for this to be done. /// /// If the file is not resident, an [`EvictionError::NotFound`] is returned. /// /// If for a bad luck or blocking of the executor, we miss the actual eviction and the layer is /// re-downloaded, [`EvictionError::Downloaded`] is returned. /// /// Timeout is mandatory, because waiting for eviction is only needed for our tests; eviction /// will happen regardless the future returned by this method completing unless there is a /// read access before eviction gets to complete. /// /// Technically cancellation safe, but cancelling might shift the viewpoint of what generation /// of download-evict cycle on retry. pub(crate) async fn evict_and_wait(&self, timeout: Duration) -> Result<(), EvictionError> { self.0.evict_and_wait(timeout).await } /// Delete the layer file when the `self` gets dropped, also try to schedule a remote index upload /// then. /// /// On drop, this will cause a call to [`crate::tenant::remote_timeline_client::RemoteTimelineClient::schedule_deletion_of_unlinked`]. /// This means that the unlinking by [gc] or [compaction] must have happened strictly before /// the value this is called on gets dropped. /// /// This is ensured by both of those methods accepting references to Layer. /// /// [gc]: [`RemoteTimelineClient::schedule_gc_update`] /// [compaction]: [`RemoteTimelineClient::schedule_compaction_update`] pub(crate) fn delete_on_drop(&self) { self.0.delete_on_drop(); } pub(crate) async fn get_values_reconstruct_data( &self, keyspace: KeySpace, lsn_range: Range<Lsn>, reconstruct_data: &mut ValuesReconstructState, ctx: &RequestContext, ) -> Result<(), GetVectoredError> { let downloaded = { let ctx = RequestContextBuilder::from(ctx) .perf_span(|crnt_perf_span| { info_span!( target: PERF_TRACE_TARGET, parent: crnt_perf_span, "GET_LAYER", ) }) .attached_child(); self.0 .get_or_maybe_download(true, &ctx) .maybe_perf_instrument(&ctx, |crnt_perf_context| crnt_perf_context.clone()) .await .map_err(|err| match err { DownloadError::TimelineShutdown | DownloadError::DownloadCancelled => { GetVectoredError::Cancelled } other => GetVectoredError::Other(anyhow::anyhow!(other)), })? }; let this = ResidentLayer { downloaded: downloaded.clone(), owner: self.clone(), }; self.record_access(ctx); let ctx = RequestContextBuilder::from(ctx) .perf_span(|crnt_perf_span| { info_span!( target: PERF_TRACE_TARGET, parent: crnt_perf_span, "VISIT_LAYER", ) }) .attached_child(); downloaded .get_values_reconstruct_data(this, keyspace, lsn_range, reconstruct_data, &ctx) .instrument(tracing::debug_span!("get_values_reconstruct_data", layer=%self)) .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone()) .await .map_err(|err| match err { GetVectoredError::Other(err) => GetVectoredError::Other( err.context(format!("get_values_reconstruct_data for layer {self}")), ), err => err, }) } /// Download the layer if evicted. /// /// Will not error when the layer is already downloaded. pub(crate) async fn download(&self, ctx: &RequestContext) -> Result<(), DownloadError> { self.0.get_or_maybe_download(true, ctx).await?; Ok(()) } pub(crate) async fn needs_download(&self) -> Result<Option<NeedsDownload>, std::io::Error> { self.0.needs_download().await } /// Assuming the layer is already downloaded, returns a guard which will prohibit eviction /// while the guard exists. /// /// Returns None if the layer is currently evicted or becoming evicted. pub(crate) async fn keep_resident(&self) -> Option<ResidentLayer> { let downloaded = self.0.inner.get().and_then(|rowe| rowe.get())?; Some(ResidentLayer { downloaded, owner: self.clone(), }) } /// Weak indicator of is the layer resident or not. Good enough for eviction, which can deal /// with `EvictionError::NotFound`. /// /// Returns `true` if this layer might be resident, or `false`, if it most likely evicted or /// will be unless a read happens soon. pub(crate) fn is_likely_resident(&self) -> bool { self.0 .inner .get() .map(|rowe| rowe.is_likely_resident()) .unwrap_or(false) } /// Downloads if necessary and creates a guard, which will keep this layer from being evicted. pub(crate) async fn download_and_keep_resident( &self, ctx: &RequestContext, ) -> Result<ResidentLayer, DownloadError> { let downloaded = self.0.get_or_maybe_download(true, ctx).await?; Ok(ResidentLayer { downloaded, owner: self.clone(), }) } pub(crate) fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo { self.0.info(reset) } pub(crate) fn latest_activity(&self) -> SystemTime { self.0.access_stats.latest_activity() } pub(crate) fn visibility(&self) -> LayerVisibilityHint { self.0.access_stats.visibility() } pub(crate) fn local_path(&self) -> &Utf8Path { &self.0.path } pub(crate) fn metadata(&self) -> LayerFileMetadata { self.0.metadata() } pub(crate) fn last_evicted_at(&self) -> LastEviction { match self.0.last_evicted_at.try_lock() { Ok(lock) => match *lock { None => LastEviction::Never, Some(at) => LastEviction::At(at), }, Err(std::sync::TryLockError::WouldBlock) => LastEviction::Evicting, Err(std::sync::TryLockError::Poisoned(p)) => panic!("Lock poisoned: {p}"), } } pub(crate) fn get_timeline_id(&self) -> Option<TimelineId> { self.0 .timeline .upgrade() .map(|timeline| timeline.timeline_id) } /// Traditional debug dumping facility #[allow(unused)] pub(crate) async fn dump(&self, verbose: bool, ctx: &RequestContext) -> anyhow::Result<()> { self.0.desc.dump(); if verbose { // for now, unconditionally download everything, even if that might not be wanted. let l = self.0.get_or_maybe_download(true, ctx).await?; l.dump(&self.0, ctx).await? } Ok(()) } /// Waits until this layer has been dropped (and if needed, local file deletion and remote /// deletion scheduling has completed). /// /// Does not start local deletion, use [`Self::delete_on_drop`] for that /// separatedly. #[cfg(any(feature = "testing", test))] pub(crate) fn wait_drop(&self) -> impl std::future::Future<Output = ()> + 'static { let mut rx = self.0.status.as_ref().unwrap().subscribe(); async move { loop { if rx.changed().await.is_err() { break; } } } } fn record_access(&self, ctx: &RequestContext) { if self.0.access_stats.record_access(ctx) { // Visibility was modified to Visible: maybe log about this match ctx.task_kind() { TaskKind::CalculateSyntheticSize | TaskKind::OndemandLogicalSizeCalculation | TaskKind::GarbageCollector | TaskKind::MgmtRequest => { // This situation is expected in code paths do binary searches of the LSN space to resolve // an LSN to a timestamp, which happens during GC, during GC cutoff calculations in synthetic size, // and on-demand for certain HTTP API requests. On-demand logical size calculation is also included // because it is run as a sub-task of synthetic size. } _ => { // In all other contexts, it is unusual to do I/O involving layers which are not visible at // some branch tip, so we log the fact that we are accessing something that the visibility // calculation thought should not be visible. // // This case is legal in brief time windows: for example an in-flight getpage request can hold on to a layer object // which was covered by a concurrent compaction. tracing::info!( layer=%self, "became visible as a result of access", ); } } // Update the timeline's visible bytes count if let Some(tl) = self.0.timeline.upgrade() { tl.metrics .visible_physical_size_gauge .add(self.0.desc.file_size) } } } pub(crate) fn set_visibility(&self, visibility: LayerVisibilityHint) { let old_visibility = self.0.access_stats.set_visibility(visibility.clone()); use LayerVisibilityHint::*; match (old_visibility, visibility) { (Visible, Covered) => { // Subtract this layer's contribution to the visible size metric if let Some(tl) = self.0.timeline.upgrade() { debug_assert!( tl.metrics.visible_physical_size_gauge.get() >= self.0.desc.file_size ); tl.metrics .visible_physical_size_gauge .sub(self.0.desc.file_size) } } (Covered, Visible) => { // Add this layer's contribution to the visible size metric if let Some(tl) = self.0.timeline.upgrade() { tl.metrics .visible_physical_size_gauge .add(self.0.desc.file_size) } } (Covered, Covered) | (Visible, Visible) => { // no change } } } } /// The download-ness ([`DownloadedLayer`]) can be either resident or wanted evicted. /// /// However when we want something evicted, we cannot evict it right away as there might be current /// reads happening on it. For example: it has been searched from [`LayerMap::search`] but not yet /// read with [`Layer::get_values_reconstruct_data`]. /// /// [`LayerMap::search`]: crate::tenant::layer_map::LayerMap::search #[derive(Debug)] enum ResidentOrWantedEvicted { Resident(Arc<DownloadedLayer>), WantedEvicted(Weak<DownloadedLayer>, usize), } impl ResidentOrWantedEvicted { /// Non-mutating access to the a DownloadedLayer, if possible. /// /// This is not used on the read path (anything that calls /// [`LayerInner::get_or_maybe_download`]) because it was decided that reads always win /// evictions, and part of that winning is using [`ResidentOrWantedEvicted::get_and_upgrade`]. fn get(&self) -> Option<Arc<DownloadedLayer>> { match self { ResidentOrWantedEvicted::Resident(strong) => Some(strong.clone()), ResidentOrWantedEvicted::WantedEvicted(weak, _) => weak.upgrade(), } } /// Best-effort query for residency right now, not as strong guarantee as receiving a strong /// reference from `ResidentOrWantedEvicted::get`. fn is_likely_resident(&self) -> bool { match self { ResidentOrWantedEvicted::Resident(_) => true, ResidentOrWantedEvicted::WantedEvicted(weak, _) => weak.strong_count() > 0, } } /// Upgrades any weak to strong if possible. /// /// Returns a strong reference if possible, along with a boolean telling if an upgrade /// happened. fn get_and_upgrade(&mut self) -> Option<(Arc<DownloadedLayer>, bool)> { match self { ResidentOrWantedEvicted::Resident(strong) => Some((strong.clone(), false)), ResidentOrWantedEvicted::WantedEvicted(weak, _) => match weak.upgrade() { Some(strong) => { LAYER_IMPL_METRICS.inc_raced_wanted_evicted_accesses(); *self = ResidentOrWantedEvicted::Resident(strong.clone()); Some((strong, true)) } None => None, }, } } /// When eviction is first requested, drop down to holding a [`Weak`]. /// /// Returns `Some` if this was the first time eviction was requested. Care should be taken to /// drop the possibly last strong reference outside of the mutex of /// [`heavier_once_cell::OnceCell`]. fn downgrade(&mut self) -> Option<Arc<DownloadedLayer>> { match self { ResidentOrWantedEvicted::Resident(strong) => { let weak = Arc::downgrade(strong); let mut temp = ResidentOrWantedEvicted::WantedEvicted(weak, strong.version); std::mem::swap(self, &mut temp); match temp { ResidentOrWantedEvicted::Resident(strong) => Some(strong), ResidentOrWantedEvicted::WantedEvicted(..) => unreachable!("just swapped"), } } ResidentOrWantedEvicted::WantedEvicted(..) => None, } } } struct LayerInner { /// Only needed to check ondemand_download_behavior_treat_error_as_warn and creation of /// [`Self::path`]. conf: &'static PageServerConf, /// Full path to the file; unclear if this should exist anymore. path: Utf8PathBuf, desc: PersistentLayerDesc, /// Timeline access is needed for remote timeline client and metrics. /// /// There should not be an access to timeline for any reason without entering the /// [`Timeline::gate`] at the same time. timeline: Weak<Timeline>, access_stats: LayerAccessStats, /// This custom OnceCell is backed by std mutex, but only held for short time periods. /// /// Filesystem changes (download, evict) are only done while holding a permit which the /// `heavier_once_cell` provides. /// /// A number of fields in `Layer` are meant to only be updated when holding the InitPermit, but /// possibly read while not holding it. inner: heavier_once_cell::OnceCell<ResidentOrWantedEvicted>, /// Do we want to delete locally and remotely this when `LayerInner` is dropped wanted_deleted: AtomicBool, /// Version is to make sure we will only evict a specific initialization of the downloaded file. /// /// Incremented for each initialization, stored in `DownloadedLayer::version` or /// `ResidentOrWantedEvicted::WantedEvicted`. version: AtomicUsize, /// Allow subscribing to when the layer actually gets evicted, a non-cancellable download /// starts, or completes. /// /// Updates must only be posted while holding the InitPermit or the heavier_once_cell::Guard. /// Holding the InitPermit is the only time we can do state transitions, but we also need to /// cancel a pending eviction on upgrading a [`ResidentOrWantedEvicted::WantedEvicted`] back to /// [`ResidentOrWantedEvicted::Resident`] on access. /// /// The sender is wrapped in an Option to facilitate moving it out on [`LayerInner::drop`]. status: Option<tokio::sync::watch::Sender<Status>>, /// Counter for exponential backoff with the download. /// /// This is atomic only for the purposes of having additional data only accessed while holding /// the InitPermit. consecutive_failures: AtomicUsize, /// The generation of this Layer. /// /// For loaded layers (resident or evicted) this comes from [`LayerFileMetadata::generation`], /// for created layers from [`Timeline::generation`]. generation: Generation, /// The shard of this Layer. /// /// For layers created in this process, this will always be the [`ShardIndex`] of the /// current `ShardIdentity`` (TODO: add link once it's introduced). /// /// For loaded layers, this may be some other value if the tenant has undergone /// a shard split since the layer was originally written. shard: ShardIndex, /// When the Layer was last evicted but has not been downloaded since. /// /// This is used for skipping evicted layers from the previous heatmap (see /// `[Timeline::generate_heatmap]`) and for updating metrics /// (see [`LayerImplMetrics::redownload_after`]). last_evicted_at: std::sync::Mutex<Option<std::time::Instant>>, #[cfg(test)] failpoints: std::sync::Mutex<Vec<failpoints::Failpoint>>, } impl std::fmt::Display for LayerInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.layer_desc().short_id()) } } impl AsLayerDesc for LayerInner { fn layer_desc(&self) -> &PersistentLayerDesc { &self.desc } } #[derive(Debug, Clone, Copy)] enum Status { Resident, Evicted, Downloading, } impl Drop for LayerInner { fn drop(&mut self) { // if there was a pending eviction, mark it cancelled here to balance metrics if let Some((ResidentOrWantedEvicted::WantedEvicted(..), _)) = self.inner.take_and_deinit() { // eviction has already been started LAYER_IMPL_METRICS.inc_eviction_cancelled(EvictionCancelled::LayerGone); // eviction request is intentionally not honored as no one is present to wait for it // and we could be delaying shutdown for nothing. } let timeline = self.timeline.upgrade(); if let Some(timeline) = timeline.as_ref() { // Only need to decrement metrics if the timeline still exists: otherwise // it will have already de-registered these metrics via TimelineMetrics::shutdown timeline.metrics.dec_layer(&self.desc); if matches!(self.access_stats.visibility(), LayerVisibilityHint::Visible) { debug_assert!( timeline.metrics.visible_physical_size_gauge.get() >= self.desc.file_size ); timeline .metrics .visible_physical_size_gauge .sub(self.desc.file_size); } } if !*self.wanted_deleted.get_mut() { return; } let span = tracing::info_span!(parent: None, "layer_delete", tenant_id = %self.layer_desc().tenant_shard_id.tenant_id, shard_id=%self.layer_desc().tenant_shard_id.shard_slug(), timeline_id = %self.layer_desc().timeline_id); let path = std::mem::take(&mut self.path); let file_name = self.layer_desc().layer_name(); let file_size = self.layer_desc().file_size; let meta = self.metadata(); let status = self.status.take(); Self::spawn_blocking(move || { let _g = span.entered(); // carry this until we are finished for [`Layer::wait_drop`] support let _status = status; let Some(timeline) = timeline else { // no need to nag that timeline is gone: under normal situation on // task_mgr::remove_tenant_from_memory the timeline is gone before we get dropped. LAYER_IMPL_METRICS.inc_deletes_failed(DeleteFailed::TimelineGone); return; }; let Ok(_guard) = timeline.gate.enter() else { LAYER_IMPL_METRICS.inc_deletes_failed(DeleteFailed::TimelineGone); return; }; let removed = match std::fs::remove_file(path) { Ok(()) => true, Err(e) if e.kind() == std::io::ErrorKind::NotFound => { // until we no longer do detaches by removing all local files before removing the // tenant from the global map, we will always get these errors even if we knew what // is the latest state. // // we currently do not track the latest state, so we'll also end up here on evicted // layers. false } Err(e) => { tracing::error!("failed to remove wanted deleted layer: {e}"); LAYER_IMPL_METRICS.inc_delete_removes_failed(); false } }; if removed { timeline.metrics.resident_physical_size_sub(file_size); } let res = timeline .remote_client .schedule_deletion_of_unlinked(vec![(file_name, meta)]); if let Err(e) = res { // test_timeline_deletion_with_files_stuck_in_upload_queue is good at // demonstrating this deadlock (without spawn_blocking): stop will drop // queued items, which will have ResidentLayer's, and those drops would try // to re-entrantly lock the RemoteTimelineClient inner state. if !timeline.is_active() { tracing::info!("scheduling deletion on drop failed: {e:#}"); } else { tracing::warn!("scheduling deletion on drop failed: {e:#}"); } LAYER_IMPL_METRICS.inc_deletes_failed(DeleteFailed::DeleteSchedulingFailed); } else { LAYER_IMPL_METRICS.inc_completed_deletes(); } }); } } impl LayerInner { #[allow(clippy::too_many_arguments)] fn new( conf: &'static PageServerConf, timeline: &Arc<Timeline>, local_path: Utf8PathBuf, desc: PersistentLayerDesc, downloaded: Option<Arc<DownloadedLayer>>, generation: Generation, shard: ShardIndex, ) -> Self { let (inner, version, init_status) = if let Some(inner) = downloaded { let version = inner.version;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/image_layer.rs
pageserver/src/tenant/storage_layer/image_layer.rs
//! An ImageLayer represents an image or a snapshot of a key-range at //! one particular LSN. //! //! It contains an image of all key-value pairs in its key-range. Any key //! that falls into the image layer's range but does not exist in the layer, //! does not exist. //! //! An image layer is stored in a file on disk. The file is stored in //! timelines/<timeline_id> directory. Currently, there are no //! subdirectories, and each image layer file is named like this: //! //! ```text //! <key start>-<key end>__<LSN> //! ``` //! //! For example: //! //! ```text //! 000000067F000032BE0000400000000070B6-000000067F000032BE0000400000000080B6__00000000346BC568 //! ``` //! //! Every image layer file consists of three parts: "summary", //! "index", and "values". The summary is a fixed size header at the //! beginning of the file, and it contains basic information about the //! layer, and offsets to the other parts. The "index" is a B-tree, //! mapping from Key to an offset in the "values" part. The //! actual page images are stored in the "values" part. use std::collections::{HashMap, VecDeque}; use std::fs::File; use std::ops::Range; use std::os::unix::prelude::FileExt; use std::str::FromStr; use std::sync::Arc; use std::sync::atomic::AtomicU64; use anyhow::{Context, Result, bail, ensure}; use bytes::Bytes; use camino::{Utf8Path, Utf8PathBuf}; use hex; use itertools::Itertools; use pageserver_api::config::MaxVectoredReadBytes; use pageserver_api::key::{DBDIR_KEY, KEY_SIZE, Key}; use pageserver_api::keyspace::KeySpace; use pageserver_api::shard::{ShardIdentity, TenantShardId}; use serde::{Deserialize, Serialize}; use tokio::sync::OnceCell; use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use tracing::*; use utils::bin_ser::BeSer; use utils::bin_ser::SerializeError; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; use wal_decoder::models::value::Value; use super::errors::PutError; use super::layer_name::ImageLayerName; use super::{ AsLayerDesc, LayerName, OnDiskValue, OnDiskValueIo, PersistentLayerDesc, ResidentLayer, ValuesReconstructState, }; use crate::config::PageServerConf; use crate::context::{PageContentKind, RequestContext, RequestContextBuilder}; use crate::page_cache::{self, FileId, PAGE_SZ}; use crate::tenant::blob_io::BlobWriter; use crate::tenant::block_io::{BlockBuf, FileBlockReader}; use crate::tenant::disk_btree::{ DiskBtreeBuilder, DiskBtreeIterator, DiskBtreeReader, VisitDirection, }; use crate::tenant::timeline::GetVectoredError; use crate::tenant::vectored_blob_io::{ BlobFlag, BufView, StreamingVectoredReadPlanner, VectoredBlobReader, VectoredRead, VectoredReadPlanner, }; use crate::virtual_file::TempVirtualFile; use crate::virtual_file::owned_buffers_io::io_buf_ext::IoBufExt; use crate::virtual_file::owned_buffers_io::write::{Buffer, BufferedWriterShutdownMode}; use crate::virtual_file::{self, IoBuffer, IoBufferMut, MaybeFatalIo, VirtualFile}; use crate::{IMAGE_FILE_MAGIC, STORAGE_FORMAT_VERSION, TEMP_FILE_SUFFIX}; /// /// Header stored in the beginning of the file /// /// After this comes the 'values' part, starting on block 1. After that, /// the 'index' starts at the block indicated by 'index_start_blk' /// #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct Summary { /// Magic value to identify this as a neon image file. Always IMAGE_FILE_MAGIC. pub magic: u16, pub format_version: u16, pub tenant_id: TenantId, pub timeline_id: TimelineId, pub key_range: Range<Key>, pub lsn: Lsn, /// Block number where the 'index' part of the file begins. pub index_start_blk: u32, /// Block within the 'index', where the B-tree root page is stored pub index_root_blk: u32, // the 'values' part starts after the summary header, on block 1. } impl From<&ImageLayer> for Summary { fn from(layer: &ImageLayer) -> Self { Self::expected( layer.desc.tenant_shard_id.tenant_id, layer.desc.timeline_id, layer.desc.key_range.clone(), layer.lsn, ) } } impl Summary { /// Serializes the summary header into an aligned buffer of lenth `PAGE_SZ`. pub fn ser_into_page(&self) -> Result<IoBuffer, SerializeError> { let mut buf = IoBufferMut::with_capacity(PAGE_SZ); Self::ser_into(self, &mut buf)?; // Pad zeroes to the buffer so the length is a multiple of the alignment. buf.extend_with(0, buf.capacity() - buf.len()); Ok(buf.freeze()) } pub(super) fn expected( tenant_id: TenantId, timeline_id: TimelineId, key_range: Range<Key>, lsn: Lsn, ) -> Self { Self { magic: IMAGE_FILE_MAGIC, format_version: STORAGE_FORMAT_VERSION, tenant_id, timeline_id, key_range, lsn, index_start_blk: 0, index_root_blk: 0, } } } /// This is used only from `pagectl`. Within pageserver, all layers are /// [`crate::tenant::storage_layer::Layer`], which can hold an [`ImageLayerInner`]. pub struct ImageLayer { path: Utf8PathBuf, pub desc: PersistentLayerDesc, // This entry contains an image of all pages as of this LSN, should be the same as desc.lsn pub lsn: Lsn, inner: OnceCell<ImageLayerInner>, } impl std::fmt::Debug for ImageLayer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use super::RangeDisplayDebug; f.debug_struct("ImageLayer") .field("key_range", &RangeDisplayDebug(&self.desc.key_range)) .field("file_size", &self.desc.file_size) .field("lsn", &self.lsn) .field("inner", &self.inner) .finish() } } /// ImageLayer is the in-memory data structure associated with an on-disk image /// file. pub struct ImageLayerInner { // values copied from summary index_start_blk: u32, index_root_blk: u32, key_range: Range<Key>, lsn: Lsn, file: Arc<VirtualFile>, file_id: FileId, max_vectored_read_bytes: Option<MaxVectoredReadBytes>, } impl ImageLayerInner { pub(crate) fn layer_dbg_info(&self) -> String { format!( "image {}..{} {}", self.key_range().start, self.key_range().end, self.lsn() ) } } impl std::fmt::Debug for ImageLayerInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ImageLayerInner") .field("index_start_blk", &self.index_start_blk) .field("index_root_blk", &self.index_root_blk) .finish() } } impl ImageLayerInner { pub(super) async fn dump(&self, ctx: &RequestContext) -> anyhow::Result<()> { let block_reader = FileBlockReader::new(&self.file, self.file_id); let tree_reader = DiskBtreeReader::<_, KEY_SIZE>::new( self.index_start_blk, self.index_root_blk, block_reader, ); tree_reader.dump(ctx).await?; tree_reader .visit( &[0u8; KEY_SIZE], VisitDirection::Forwards, |key, value| { println!("key: {} offset {}", hex::encode(key), value); true }, ctx, ) .await?; Ok(()) } } /// Boilerplate to implement the Layer trait, always use layer_desc for persistent layers. impl std::fmt::Display for ImageLayer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.layer_desc().short_id()) } } impl AsLayerDesc for ImageLayer { fn layer_desc(&self) -> &PersistentLayerDesc { &self.desc } } impl ImageLayer { pub async fn dump(&self, verbose: bool, ctx: &RequestContext) -> Result<()> { self.desc.dump(); if !verbose { return Ok(()); } let inner = self.load(ctx).await?; inner.dump(ctx).await?; Ok(()) } fn temp_path_for( conf: &PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, fname: &ImageLayerName, ) -> Utf8PathBuf { // TempVirtualFile requires us to never reuse a filename while an old // instance of TempVirtualFile created with that filename is not done dropping yet. // So, we use a monotonic counter to disambiguate the filenames. static NEXT_TEMP_DISAMBIGUATOR: AtomicU64 = AtomicU64::new(1); let filename_disambiguator = NEXT_TEMP_DISAMBIGUATOR.fetch_add(1, std::sync::atomic::Ordering::Relaxed); conf.timeline_path(&tenant_shard_id, &timeline_id) .join(format!( "{fname}.{filename_disambiguator:x}.{TEMP_FILE_SUFFIX}" )) } /// /// Open the underlying file and read the metadata into memory, if it's /// not loaded already. /// async fn load(&self, ctx: &RequestContext) -> Result<&ImageLayerInner> { self.inner .get_or_try_init(|| self.load_inner(ctx)) .await .with_context(|| format!("Failed to load image layer {}", self.path())) } async fn load_inner(&self, ctx: &RequestContext) -> Result<ImageLayerInner> { let path = self.path(); let loaded = ImageLayerInner::load(&path, self.desc.image_layer_lsn(), None, None, ctx).await?; // not production code let actual_layer_name = LayerName::from_str(path.file_name().unwrap()).unwrap(); let expected_layer_name = self.layer_desc().layer_name(); if actual_layer_name != expected_layer_name { println!("warning: filename does not match what is expected from in-file summary"); println!("actual: {:?}", actual_layer_name.to_string()); println!("expected: {:?}", expected_layer_name.to_string()); } Ok(loaded) } /// Create an ImageLayer struct representing an existing file on disk. /// /// This variant is only used for debugging purposes, by the 'pagectl' binary. pub fn new_for_path(path: &Utf8Path, file: File) -> Result<ImageLayer> { let mut summary_buf = vec![0; PAGE_SZ]; file.read_exact_at(&mut summary_buf, 0)?; let summary = Summary::des_prefix(&summary_buf)?; let metadata = file .metadata() .context("get file metadata to determine size")?; // This function is never used for constructing layers in a running pageserver, // so it does not need an accurate TenantShardId. let tenant_shard_id = TenantShardId::unsharded(summary.tenant_id); Ok(ImageLayer { path: path.to_path_buf(), desc: PersistentLayerDesc::new_img( tenant_shard_id, summary.timeline_id, summary.key_range, summary.lsn, metadata.len(), ), // Now we assume image layer ALWAYS covers the full range. This may change in the future. lsn: summary.lsn, inner: OnceCell::new(), }) } fn path(&self) -> Utf8PathBuf { self.path.clone() } } #[derive(thiserror::Error, Debug)] pub enum RewriteSummaryError { #[error("magic mismatch")] MagicMismatch, #[error(transparent)] Other(#[from] anyhow::Error), } impl From<std::io::Error> for RewriteSummaryError { fn from(e: std::io::Error) -> Self { Self::Other(anyhow::anyhow!(e)) } } impl ImageLayer { pub async fn rewrite_summary<F>( path: &Utf8Path, rewrite: F, ctx: &RequestContext, ) -> Result<(), RewriteSummaryError> where F: Fn(Summary) -> Summary, { let file = VirtualFile::open_with_options_v2( path, virtual_file::OpenOptions::new().read(true).write(true), ctx, ) .await .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; let actual_summary = Summary::des_prefix(summary_blk.as_ref()).context("deserialize")?; if actual_summary.magic != IMAGE_FILE_MAGIC { return Err(RewriteSummaryError::MagicMismatch); } let new_summary = rewrite(actual_summary); let buf = new_summary.ser_into_page().context("serialize")?; let (_buf, res) = file.write_all_at(buf.slice_len(), 0, ctx).await; res?; Ok(()) } } impl ImageLayerInner { pub(crate) fn key_range(&self) -> &Range<Key> { &self.key_range } pub(crate) fn lsn(&self) -> Lsn { self.lsn } pub(super) async fn load( path: &Utf8Path, lsn: Lsn, summary: Option<Summary>, max_vectored_read_bytes: Option<MaxVectoredReadBytes>, ctx: &RequestContext, ) -> anyhow::Result<Self> { let file = Arc::new( VirtualFile::open_v2(path, ctx) .await .context("open layer file")?, ); let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader .read_blk(0, ctx) .await .context("read first block")?; // length is the only way how this could fail, so it's not actually likely at all unless // read_blk returns wrong sized block. // // TODO: confirm and make this into assertion let actual_summary = Summary::des_prefix(summary_blk.as_ref()).context("deserialize first block")?; if let Some(mut expected_summary) = summary { // production code path expected_summary.index_start_blk = actual_summary.index_start_blk; expected_summary.index_root_blk = actual_summary.index_root_blk; // mask out the timeline_id, but still require the layers to be from the same tenant expected_summary.timeline_id = actual_summary.timeline_id; if actual_summary != expected_summary { bail!( "in-file summary does not match expected summary. actual = {:?} expected = {:?}", actual_summary, expected_summary ); } } Ok(ImageLayerInner { index_start_blk: actual_summary.index_start_blk, index_root_blk: actual_summary.index_root_blk, lsn, file, file_id, max_vectored_read_bytes, key_range: actual_summary.key_range, }) } // Look up the keys in the provided keyspace and update // the reconstruct state with whatever is found. pub(super) async fn get_values_reconstruct_data( &self, this: ResidentLayer, keyspace: KeySpace, reconstruct_state: &mut ValuesReconstructState, ctx: &RequestContext, ) -> Result<(), GetVectoredError> { let reads = self .plan_reads(keyspace, None, ctx) .await .map_err(GetVectoredError::Other)?; self.do_reads_and_update_state(this, reads, reconstruct_state, ctx) .await; reconstruct_state.on_image_layer_visited(&self.key_range); Ok(()) } /// Traverse the layer's index to build read operations on the overlap of the input keyspace /// and the keys in this layer. /// /// If shard_identity is provided, it will be used to filter keys down to those stored on /// this shard. async fn plan_reads( &self, keyspace: KeySpace, shard_identity: Option<&ShardIdentity>, ctx: &RequestContext, ) -> anyhow::Result<Vec<VectoredRead>> { let mut planner = VectoredReadPlanner::new( self.max_vectored_read_bytes .expect("Layer is loaded with max vectored bytes config") .0 .into(), ); let block_reader = FileBlockReader::new(&self.file, self.file_id); let tree_reader = DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, block_reader); let ctx = RequestContextBuilder::from(ctx) .page_content_kind(PageContentKind::ImageLayerBtreeNode) .attached_child(); for range in keyspace.ranges.iter() { let mut range_end_handled = false; let mut search_key: [u8; KEY_SIZE] = [0u8; KEY_SIZE]; range.start.write_to_byte_slice(&mut search_key); let index_stream = tree_reader.clone().into_stream(&search_key, &ctx); let mut index_stream = std::pin::pin!(index_stream); while let Some(index_entry) = index_stream.next().await { let (raw_key, offset) = index_entry?; let key = Key::from_slice(&raw_key[..KEY_SIZE]); assert!(key >= range.start); let flag = if let Some(shard_identity) = shard_identity { if shard_identity.is_key_disposable(&key) { BlobFlag::Ignore } else { BlobFlag::None } } else { BlobFlag::None }; if key >= range.end { planner.handle_range_end(offset); range_end_handled = true; break; } else { planner.handle(key, self.lsn, offset, flag); } } if !range_end_handled { let payload_end = self.index_start_blk as u64 * PAGE_SZ as u64; planner.handle_range_end(payload_end); } } Ok(planner.finish()) } /// Given a key range, select the parts of that range that should be retained by the ShardIdentity, /// then execute vectored GET operations, passing the results of all read keys into the writer. pub(super) async fn filter( &self, shard_identity: &ShardIdentity, writer: &mut ImageLayerWriter, ctx: &RequestContext, ) -> anyhow::Result<usize> { // Fragment the range into the regions owned by this ShardIdentity let plan = self .plan_reads( KeySpace { // If asked for the total key space, plan_reads will give us all the keys in the layer ranges: vec![Key::MIN..Key::MAX], }, Some(shard_identity), ctx, ) .await?; let vectored_blob_reader = VectoredBlobReader::new(&self.file); let mut key_count = 0; for read in plan.into_iter() { let buf_size = read.size(); let buf = IoBufferMut::with_capacity(buf_size); let blobs_buf = vectored_blob_reader.read_blobs(&read, buf, ctx).await?; let view = BufView::new_slice(&blobs_buf.buf); for meta in blobs_buf.blobs.iter() { // Just read the raw header+data and pass it through to the target layer, without // decoding and recompressing it. let raw = meta.raw_with_header(&view); key_count += 1; writer .put_image_raw(meta.meta.key, raw.into_bytes(), ctx) .await .context(format!("Storing key {}", meta.meta.key))?; } } Ok(key_count) } async fn do_reads_and_update_state( &self, this: ResidentLayer, reads: Vec<VectoredRead>, reconstruct_state: &mut ValuesReconstructState, ctx: &RequestContext, ) { let max_vectored_read_bytes = self .max_vectored_read_bytes .expect("Layer is loaded with max vectored bytes config") .0 .into(); for read in reads.into_iter() { let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default(); for (_, blob_meta) in read.blobs_at.as_slice() { let io = reconstruct_state.update_key(&blob_meta.key, blob_meta.lsn, true); ios.insert((blob_meta.key, blob_meta.lsn), io); } let buf_size = read.size(); if buf_size > max_vectored_read_bytes { // If the read is oversized, it should only contain one key. let offenders = read .blobs_at .as_slice() .iter() .filter_map(|(_, blob_meta)| { if blob_meta.key.is_rel_dir_key() || blob_meta.key == DBDIR_KEY || blob_meta.key.is_aux_file_key() { // The size of values for these keys is unbounded and can // grow very large in pathological cases. None } else { Some(format!("{}@{}", blob_meta.key, blob_meta.lsn)) } }) .join(", "); if !offenders.is_empty() { tracing::warn!( "Oversized vectored read ({} > {}) for keys {}", buf_size, max_vectored_read_bytes, offenders ); } } let read_extend_residency = this.clone(); let read_from = self.file.clone(); let read_ctx = ctx.attached_child(); reconstruct_state .spawn_io(async move { let buf = IoBufferMut::with_capacity(buf_size); let vectored_blob_reader = VectoredBlobReader::new(&read_from); let res = vectored_blob_reader.read_blobs(&read, buf, &read_ctx).await; match res { Ok(blobs_buf) => { let view = BufView::new_slice(&blobs_buf.buf); for meta in blobs_buf.blobs.iter() { let io: OnDiskValueIo = ios.remove(&(meta.meta.key, meta.meta.lsn)).unwrap(); let img_buf = meta.read(&view).await; let img_buf = match img_buf { Ok(img_buf) => img_buf, Err(e) => { io.complete(Err(e)); continue; } }; io.complete(Ok(OnDiskValue::RawImage(img_buf.into_bytes()))); } assert!(ios.is_empty()); } Err(err) => { for (_, io) in ios { io.complete(Err(std::io::Error::new( err.kind(), "vec read failed", ))); } } } // keep layer resident until this IO is done; this spawned IO future generally outlives the // call to `self` / the `Arc<DownloadedLayer>` / the `ResidentLayer` that guarantees residency drop(read_extend_residency); }) .await; } } pub(crate) fn iter_with_options<'a>( &'a self, ctx: &'a RequestContext, max_read_size: u64, max_batch_size: usize, ) -> ImageLayerIterator<'a> { let block_reader = FileBlockReader::new(&self.file, self.file_id); let tree_reader = DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, block_reader); ImageLayerIterator { image_layer: self, ctx, index_iter: tree_reader.iter(&[0; KEY_SIZE], ctx), key_values_batch: VecDeque::new(), is_end: false, planner: StreamingVectoredReadPlanner::new(max_read_size, max_batch_size), } } /// NB: not super efficient, but not terrible either. Should prob be an iterator. // // We're reusing the index traversal logical in plan_reads; would be nice to // factor that out. pub(crate) async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<Key>> { let plan = self .plan_reads(KeySpace::single(self.key_range.clone()), None, ctx) .await?; Ok(plan .into_iter() .flat_map(|read| read.blobs_at) .map(|(_, blob_meta)| blob_meta.key) .collect()) } } /// A builder object for constructing a new image layer. /// /// Usage: /// /// 1. Create the ImageLayerWriter by calling ImageLayerWriter::new(...) /// /// 2. Write the contents by calling `put_page_image` for every key-value /// pair in the key range. /// /// 3. Call `finish`. /// struct ImageLayerWriterInner { conf: &'static PageServerConf, path: Utf8PathBuf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, key_range: Range<Key>, lsn: Lsn, // Total uncompressed bytes passed into put_image uncompressed_bytes: u64, // Like `uncompressed_bytes`, // but only of images we might consider for compression uncompressed_bytes_eligible: u64, // Like `uncompressed_bytes`, but only of images // where we have chosen their compressed form uncompressed_bytes_chosen: u64, // Number of keys in the layer. num_keys: usize, blob_writer: BlobWriter<TempVirtualFile>, tree: DiskBtreeBuilder<BlockBuf, KEY_SIZE>, #[cfg(feature = "testing")] last_written_key: Key, } impl ImageLayerWriterInner { /// /// Start building a new image layer. /// #[allow(clippy::too_many_arguments)] async fn new( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, key_range: &Range<Key>, lsn: Lsn, gate: &utils::sync::gate::Gate, cancel: CancellationToken, ctx: &RequestContext, ) -> anyhow::Result<Self> { // Create the file initially with a temporary filename. // We'll atomically rename it to the final name when we're done. let path = ImageLayer::temp_path_for( conf, timeline_id, tenant_shard_id, &ImageLayerName { key_range: key_range.clone(), lsn, }, ); trace!("creating image layer {}", path); let file = TempVirtualFile::new( VirtualFile::open_with_options_v2( &path, virtual_file::OpenOptions::new() .create_new(true) .write(true), ctx, ) .await?, gate.enter()?, ); // Start at `PAGE_SZ` to make room for the header block. let blob_writer = BlobWriter::new( file, PAGE_SZ as u64, gate, cancel, ctx, info_span!(parent: None, "image_layer_writer_flush_task", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), timeline_id=%timeline_id, path = %path), )?; // Initialize the b-tree index builder let block_buf = BlockBuf::new(); let tree_builder = DiskBtreeBuilder::new(block_buf); let writer = Self { conf, path, timeline_id, tenant_shard_id, key_range: key_range.clone(), lsn, tree: tree_builder, blob_writer, uncompressed_bytes: 0, uncompressed_bytes_eligible: 0, uncompressed_bytes_chosen: 0, num_keys: 0, #[cfg(feature = "testing")] last_written_key: Key::MIN, }; Ok(writer) } /// /// Write next value to the file. /// /// The page versions must be appended in blknum order. /// async fn put_image( &mut self, key: Key, img: Bytes, ctx: &RequestContext, ) -> Result<(), PutError> { if !self.key_range.contains(&key) { return Err(PutError::Other(anyhow::anyhow!( "key {:?} not in range {:?}", key, self.key_range ))); } let compression = self.conf.image_compression; let uncompressed_len = img.len() as u64; self.uncompressed_bytes += uncompressed_len; self.num_keys += 1; let (_img, res) = self .blob_writer .write_blob_maybe_compressed(img.slice_len(), ctx, compression) .await; // TODO: re-use the buffer for `img` further upstack let (off, compression_info) = res.map_err(PutError::WriteBlob)?; if compression_info.compressed_size.is_some() { // The image has been considered for compression at least self.uncompressed_bytes_eligible += uncompressed_len; } if compression_info.written_compressed { // The image has been compressed self.uncompressed_bytes_chosen += uncompressed_len; } let mut keybuf: [u8; KEY_SIZE] = [0u8; KEY_SIZE]; key.write_to_byte_slice(&mut keybuf); self.tree .append(&keybuf, off) .map_err(anyhow::Error::new) .map_err(PutError::Other)?; #[cfg(feature = "testing")] { self.last_written_key = key; } Ok(()) } /// /// Write the next image to the file, as a raw blob header and data. /// /// The page versions must be appended in blknum order. /// async fn put_image_raw( &mut self, key: Key, raw_with_header: Bytes, ctx: &RequestContext, ) -> anyhow::Result<()> { ensure!(self.key_range.contains(&key)); // NB: we don't update the (un)compressed metrics, since we can't determine them without // decompressing the image. This seems okay. self.num_keys += 1; let (_, res) = self .blob_writer .write_blob_raw(raw_with_header.slice_len(), ctx) .await; let offset = res?; let mut keybuf: [u8; KEY_SIZE] = [0u8; KEY_SIZE]; key.write_to_byte_slice(&mut keybuf); self.tree.append(&keybuf, offset)?; #[cfg(feature = "testing")] { self.last_written_key = key; } Ok(()) } /// /// Finish writing the image layer. /// async fn finish( self, ctx: &RequestContext, end_key: Option<Key>, ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> { let index_start_blk = self.blob_writer.size().div_ceil(PAGE_SZ as u64) as u32; // Calculate compression ratio let compressed_size = self.blob_writer.size() - PAGE_SZ as u64; // Subtract PAGE_SZ for header crate::metrics::COMPRESSION_IMAGE_INPUT_BYTES.inc_by(self.uncompressed_bytes); crate::metrics::COMPRESSION_IMAGE_INPUT_BYTES_CONSIDERED .inc_by(self.uncompressed_bytes_eligible); crate::metrics::COMPRESSION_IMAGE_INPUT_BYTES_CHOSEN.inc_by(self.uncompressed_bytes_chosen); // NB: filter() may pass through raw pages from a different layer, without looking at // whether these are compressed or not. We don't track metrics for these, so avoid // increasing `COMPRESSION_IMAGE_OUTPUT_BYTES` in this case too. if self.uncompressed_bytes > 0 { crate::metrics::COMPRESSION_IMAGE_OUTPUT_BYTES.inc_by(compressed_size); }; let file = self .blob_writer .shutdown( BufferedWriterShutdownMode::ZeroPadToNextMultiple(PAGE_SZ), ctx, ) .await?; // Write out the index let mut offset = index_start_blk as u64 * PAGE_SZ as u64; let (index_root_blk, block_buf) = self.tree.finish()?;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/inmemory_layer.rs
pageserver/src/tenant/storage_layer/inmemory_layer.rs
//! An in-memory layer stores recently received key-value pairs. //! //! The "in-memory" part of the name is a bit misleading: the actual page versions are //! held in an ephemeral file, not in memory. The metadata for each page version, i.e. //! its position in the file, is kept in memory, though. //! use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap}; use std::fmt::Write; use std::ops::Range; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering as AtomicOrdering}; use std::sync::{Arc, OnceLock}; use std::time::Instant; use anyhow::Result; use camino::Utf8PathBuf; use pageserver_api::key::{CompactKey, Key}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::InMemoryLayerInfo; use pageserver_api::shard::TenantShardId; use tokio::sync::RwLock; use tokio_util::sync::CancellationToken; use tracing::*; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::vec_map::VecMap; use wal_decoder::serialized_batch::{SerializedValueBatch, SerializedValueMeta, ValueMeta}; use super::{DeltaLayerWriter, PersistentLayerDesc, ValuesReconstructState}; use crate::assert_u64_eq_usize::{U64IsUsize, UsizeIsU64, u64_to_usize}; use crate::config::PageServerConf; use crate::context::{PageContentKind, RequestContext, RequestContextBuilder}; // avoid binding to Write (conflicts with std::io::Write) // while being able to use std::fmt::Write's methods use crate::metrics::TIMELINE_EPHEMERAL_BYTES; use crate::tenant::ephemeral_file::EphemeralFile; use crate::tenant::storage_layer::{OnDiskValue, OnDiskValueIo}; use crate::tenant::timeline::GetVectoredError; use crate::virtual_file::owned_buffers_io::io_buf_ext::IoBufExt; use crate::{l0_flush, page_cache}; pub(crate) mod vectored_dio_read; #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] pub(crate) struct InMemoryLayerFileId(page_cache::FileId); pub struct InMemoryLayer { conf: &'static PageServerConf, tenant_shard_id: TenantShardId, timeline_id: TimelineId, file_id: InMemoryLayerFileId, /// This layer contains all the changes from 'start_lsn'. The /// start is inclusive. start_lsn: Lsn, /// Frozen layers have an exclusive end LSN. /// Writes are only allowed when this is `None`. pub(crate) end_lsn: OnceLock<Lsn>, /// Used for traversal path. Cached representation of the in-memory layer after frozen. frozen_local_path_str: OnceLock<Arc<str>>, opened_at: Instant, /// All versions of all pages in the layer are kept here. Indexed /// by block number and LSN. The [`IndexEntry`] is an offset into the /// ephemeral file where the page version is stored. /// /// We use a separate lock for the index to reduce the critical section /// during which reads cannot be planned. /// /// Note that the file backing [`InMemoryLayer::file`] is append-only, /// so it is not necessary to hold a lock on the index while reading or writing from the file. /// In particular: /// 1. It is safe to read and release [`InMemoryLayer::index`] before reading from [`InMemoryLayer::file`]. /// 2. It is safe to write to [`InMemoryLayer::file`] before locking and updating [`InMemoryLayer::index`]. index: RwLock<BTreeMap<CompactKey, VecMap<Lsn, IndexEntry>>>, /// Wrapper for the actual on-disk file. Uses interior mutability for concurrent reads/writes. file: EphemeralFile, estimated_in_mem_size: AtomicU64, } impl std::fmt::Debug for InMemoryLayer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InMemoryLayer") .field("start_lsn", &self.start_lsn) .field("end_lsn", &self.end_lsn) .finish() } } /// Support the same max blob length as blob_io, because ultimately /// all the InMemoryLayer contents end up being written into a delta layer, /// using the [`crate::tenant::blob_io`]. const MAX_SUPPORTED_BLOB_LEN: usize = crate::tenant::blob_io::MAX_SUPPORTED_BLOB_LEN; const MAX_SUPPORTED_BLOB_LEN_BITS: usize = { let trailing_ones = MAX_SUPPORTED_BLOB_LEN.trailing_ones() as usize; let leading_zeroes = MAX_SUPPORTED_BLOB_LEN.leading_zeros() as usize; assert!(trailing_ones + leading_zeroes == std::mem::size_of::<usize>() * 8); trailing_ones }; /// See [`InMemoryLayer::index`]. /// /// For memory efficiency, the data is packed into a u64. /// /// Layout: /// - 1 bit: `will_init` /// - [`MAX_SUPPORTED_BLOB_LEN_BITS`][]: `len` /// - [`MAX_SUPPORTED_POS_BITS`](IndexEntry::MAX_SUPPORTED_POS_BITS): `pos` #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct IndexEntry(u64); impl IndexEntry { /// See [`Self::MAX_SUPPORTED_POS`]. const MAX_SUPPORTED_POS_BITS: usize = { let remainder = 64 - 1 - MAX_SUPPORTED_BLOB_LEN_BITS; if remainder < 32 { panic!("pos can be u32 as per type system, support that"); } remainder }; /// The maximum supported blob offset that can be represented by [`Self`]. /// See also [`Self::validate_checkpoint_distance`]. const MAX_SUPPORTED_POS: usize = (1 << Self::MAX_SUPPORTED_POS_BITS) - 1; // Layout const WILL_INIT_RANGE: Range<usize> = 0..1; const LEN_RANGE: Range<usize> = Self::WILL_INIT_RANGE.end..Self::WILL_INIT_RANGE.end + MAX_SUPPORTED_BLOB_LEN_BITS; const POS_RANGE: Range<usize> = Self::LEN_RANGE.end..Self::LEN_RANGE.end + Self::MAX_SUPPORTED_POS_BITS; const _ASSERT: () = { if Self::POS_RANGE.end != 64 { panic!("we don't want undefined bits for our own sanity") } }; /// Fails if and only if the offset or length encoded in `arg` is too large to be represented by [`Self`]. /// /// The only reason why that can happen in the system is if the [`InMemoryLayer`] grows too long. /// The [`InMemoryLayer`] size is determined by the checkpoint distance, enforced by [`crate::tenant::Timeline::should_roll`]. /// /// Thus, to avoid failure of this function, whenever we start up and/or change checkpoint distance, /// call [`Self::validate_checkpoint_distance`] with the new checkpoint distance value. /// /// TODO: this check should happen ideally at config parsing time (and in the request handler when a change to checkpoint distance is requested) /// When cleaning this up, also look into the s3 max file size check that is performed in delta layer writer. #[inline(always)] fn new(arg: IndexEntryNewArgs) -> anyhow::Result<Self> { let IndexEntryNewArgs { base_offset, batch_offset, len, will_init, } = arg; let pos = base_offset .checked_add(batch_offset) .ok_or_else(|| anyhow::anyhow!("base_offset + batch_offset overflows u64: base_offset={base_offset} batch_offset={batch_offset}"))?; if pos.into_usize() > Self::MAX_SUPPORTED_POS { anyhow::bail!( "base_offset+batch_offset exceeds the maximum supported value: base_offset={base_offset} batch_offset={batch_offset} (+)={pos} max={max}", max = Self::MAX_SUPPORTED_POS ); } if len > MAX_SUPPORTED_BLOB_LEN { anyhow::bail!( "len exceeds the maximum supported length: len={len} max={MAX_SUPPORTED_BLOB_LEN}", ); } let mut data: u64 = 0; use bit_field::BitField; data.set_bits(Self::WILL_INIT_RANGE, if will_init { 1 } else { 0 }); data.set_bits(Self::LEN_RANGE, len.into_u64()); data.set_bits(Self::POS_RANGE, pos); Ok(Self(data)) } #[inline(always)] fn unpack(&self) -> IndexEntryUnpacked { use bit_field::BitField; IndexEntryUnpacked { will_init: self.0.get_bits(Self::WILL_INIT_RANGE) != 0, len: self.0.get_bits(Self::LEN_RANGE), pos: self.0.get_bits(Self::POS_RANGE), } } /// See [`Self::new`]. pub(crate) const fn validate_checkpoint_distance( checkpoint_distance: u64, ) -> Result<(), &'static str> { if checkpoint_distance > Self::MAX_SUPPORTED_POS as u64 { return Err("exceeds the maximum supported value"); } let res = u64_to_usize(checkpoint_distance).checked_add(MAX_SUPPORTED_BLOB_LEN); if res.is_none() { return Err( "checkpoint distance + max supported blob len overflows in-memory addition", ); } // NB: it is ok for the result of the addition to be larger than MAX_SUPPORTED_POS Ok(()) } const _ASSERT_DEFAULT_CHECKPOINT_DISTANCE_IS_VALID: () = { let res = Self::validate_checkpoint_distance( pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE, ); if res.is_err() { panic!("default checkpoint distance is valid") } }; } /// Args to [`IndexEntry::new`]. #[derive(Clone, Copy)] struct IndexEntryNewArgs { base_offset: u64, batch_offset: u64, len: usize, will_init: bool, } /// Unpacked representation of the bitfielded [`IndexEntry`]. #[derive(Clone, Copy, PartialEq, Eq, Debug)] struct IndexEntryUnpacked { will_init: bool, len: u64, pos: u64, } /// State shared by all in-memory (ephemeral) layers. Updated infrequently during background ticks in Timeline, /// to minimize contention. /// /// This global state is used to implement behaviors that require a global view of the system, e.g. /// rolling layers proactively to limit the total amount of dirty data. pub(crate) struct GlobalResources { // Limit on how high dirty_bytes may grow before we start freezing layers to reduce it. // Zero means unlimited. pub(crate) max_dirty_bytes: AtomicU64, // How many bytes are in all EphemeralFile objects dirty_bytes: AtomicU64, // How many layers are contributing to dirty_bytes dirty_layers: AtomicUsize, } // Per-timeline RAII struct for its contribution to [`GlobalResources`] pub(crate) struct GlobalResourceUnits { // How many dirty bytes have I added to the global dirty_bytes: this guard object is responsible // for decrementing the global counter by this many bytes when dropped. dirty_bytes: u64, } impl GlobalResourceUnits { // Hint for the layer append path to update us when the layer size differs from the last // call to update_size by this much. If we don't reach this threshold, we'll still get // updated when the Timeline "ticks" in the background. const MAX_SIZE_DRIFT: u64 = 10 * 1024 * 1024; pub(crate) fn new() -> Self { GLOBAL_RESOURCES .dirty_layers .fetch_add(1, AtomicOrdering::Relaxed); Self { dirty_bytes: 0 } } /// Do not call this frequently: all timelines will write to these same global atomics, /// so this is a relatively expensive operation. Wait at least a few seconds between calls. /// /// Returns the effective layer size limit that should be applied, if any, to keep /// the total number of dirty bytes below the configured maximum. pub(crate) fn publish_size(&mut self, size: u64) -> Option<u64> { let new_global_dirty_bytes = match size.cmp(&self.dirty_bytes) { Ordering::Equal => GLOBAL_RESOURCES.dirty_bytes.load(AtomicOrdering::Relaxed), Ordering::Greater => { let delta = size - self.dirty_bytes; let old = GLOBAL_RESOURCES .dirty_bytes .fetch_add(delta, AtomicOrdering::Relaxed); old + delta } Ordering::Less => { let delta = self.dirty_bytes - size; let old = GLOBAL_RESOURCES .dirty_bytes .fetch_sub(delta, AtomicOrdering::Relaxed); old - delta } }; // This is a sloppy update: concurrent updates to the counter will race, and the exact // value of the metric might not be the exact latest value of GLOBAL_RESOURCES::dirty_bytes. // That's okay: as long as the metric contains some recent value, it doesn't have to always // be literally the last update. TIMELINE_EPHEMERAL_BYTES.set(new_global_dirty_bytes); self.dirty_bytes = size; let max_dirty_bytes = GLOBAL_RESOURCES .max_dirty_bytes .load(AtomicOrdering::Relaxed); if max_dirty_bytes > 0 && new_global_dirty_bytes > max_dirty_bytes { // Set the layer file limit to the average layer size: this implies that all above-average // sized layers will be elegible for freezing. They will be frozen in the order they // next enter publish_size. Some( new_global_dirty_bytes / GLOBAL_RESOURCES.dirty_layers.load(AtomicOrdering::Relaxed) as u64, ) } else { None } } // Call publish_size if the input size differs from last published size by more than // the drift limit pub(crate) fn maybe_publish_size(&mut self, size: u64) { let publish = match size.cmp(&self.dirty_bytes) { Ordering::Equal => false, Ordering::Greater => size - self.dirty_bytes > Self::MAX_SIZE_DRIFT, Ordering::Less => self.dirty_bytes - size > Self::MAX_SIZE_DRIFT, }; if publish { self.publish_size(size); } } } impl Drop for GlobalResourceUnits { fn drop(&mut self) { GLOBAL_RESOURCES .dirty_layers .fetch_sub(1, AtomicOrdering::Relaxed); // Subtract our contribution to the global total dirty bytes self.publish_size(0); } } pub(crate) static GLOBAL_RESOURCES: GlobalResources = GlobalResources { max_dirty_bytes: AtomicU64::new(0), dirty_bytes: AtomicU64::new(0), dirty_layers: AtomicUsize::new(0), }; impl InMemoryLayer { pub(crate) fn file_id(&self) -> InMemoryLayerFileId { self.file_id } pub(crate) fn get_timeline_id(&self) -> TimelineId { self.timeline_id } pub(crate) fn info(&self) -> InMemoryLayerInfo { let lsn_start = self.start_lsn; if let Some(&lsn_end) = self.end_lsn.get() { InMemoryLayerInfo::Frozen { lsn_start, lsn_end } } else { InMemoryLayerInfo::Open { lsn_start } } } pub(crate) fn len(&self) -> u64 { self.file.len() } pub(crate) fn assert_writable(&self) { assert!(self.end_lsn.get().is_none()); } pub(crate) fn end_lsn_or_max(&self) -> Lsn { self.end_lsn.get().copied().unwrap_or(Lsn::MAX) } pub(crate) fn get_lsn_range(&self) -> Range<Lsn> { self.start_lsn..self.end_lsn_or_max() } /// debugging function to print out the contents of the layer /// /// this is likely completly unused pub async fn dump(&self, _verbose: bool, _ctx: &RequestContext) -> Result<()> { let end_str = self.end_lsn_or_max(); println!( "----- in-memory layer for tli {} LSNs {}-{} ----", self.timeline_id, self.start_lsn, end_str, ); Ok(()) } // Look up the keys in the provided keyspace and update // the reconstruct state with whatever is found. pub async fn get_values_reconstruct_data( self: &Arc<InMemoryLayer>, keyspace: KeySpace, lsn_range: Range<Lsn>, reconstruct_state: &mut ValuesReconstructState, ctx: &RequestContext, ) -> Result<(), GetVectoredError> { let ctx = RequestContextBuilder::from(ctx) .page_content_kind(PageContentKind::InMemoryLayer) .attached_child(); let index = self.index.read().await; struct ValueRead { entry_lsn: Lsn, read: vectored_dio_read::LogicalRead<Vec<u8>>, } let mut reads: HashMap<Key, Vec<ValueRead>> = HashMap::new(); let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default(); for range in keyspace.ranges.iter() { for (key, vec_map) in index.range(range.start.to_compact()..range.end.to_compact()) { let key = Key::from_compact(*key); let slice = vec_map.slice_range(lsn_range.clone()); for (entry_lsn, index_entry) in slice.iter().rev() { let IndexEntryUnpacked { pos, len, will_init, } = index_entry.unpack(); reads.entry(key).or_default().push(ValueRead { entry_lsn: *entry_lsn, read: vectored_dio_read::LogicalRead::new( pos, Vec::with_capacity(len as usize), ), }); let io = reconstruct_state.update_key(&key, *entry_lsn, will_init); ios.insert((key, *entry_lsn), io); if will_init { break; } } } } drop(index); // release the lock before we spawn the IO let read_from = Arc::clone(self); let read_ctx = ctx.attached_child(); reconstruct_state .spawn_io(async move { let f = vectored_dio_read::execute( &read_from.file, reads .iter() .flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)), &read_ctx, ); send_future::SendFuture::send(f) // https://github.com/rust-lang/rust/issues/96865 .await; for (key, value_reads) in reads { for ValueRead { entry_lsn, read } in value_reads { let io = ios.remove(&(key, entry_lsn)).expect("sender must exist"); match read.into_result().expect("we run execute() above") { Err(e) => { io.complete(Err(std::io::Error::new( e.kind(), "dio vec read failed", ))); } Ok(value_buf) => { io.complete(Ok(OnDiskValue::WalRecordOrImage(value_buf.into()))); } } } } assert!(ios.is_empty()); // Keep layer existent until this IO is done; // This is kinda forced for InMemoryLayer because we need to inner.read() anyway, // but it's less obvious for DeltaLayer and ImageLayer. So, keep this explicit // drop for consistency among all three layer types. drop(read_from); }) .await; Ok(()) } } fn inmem_layer_display(mut f: impl Write, start_lsn: Lsn, end_lsn: Lsn) -> std::fmt::Result { write!(f, "inmem-{:016X}-{:016X}", start_lsn.0, end_lsn.0) } fn inmem_layer_log_display( mut f: impl Write, timeline: TimelineId, start_lsn: Lsn, end_lsn: Lsn, ) -> std::fmt::Result { write!(f, "timeline {timeline} in-memory ")?; inmem_layer_display(f, start_lsn, end_lsn) } impl std::fmt::Display for InMemoryLayer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let end_lsn = self.end_lsn_or_max(); inmem_layer_display(f, self.start_lsn, end_lsn) } } impl InMemoryLayer { pub fn estimated_in_mem_size(&self) -> u64 { self.estimated_in_mem_size.load(AtomicOrdering::Relaxed) } /// Create a new, empty, in-memory layer pub async fn create( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, start_lsn: Lsn, gate: &utils::sync::gate::Gate, cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<InMemoryLayer> { trace!( "initializing new empty InMemoryLayer for writing on timeline {timeline_id} at {start_lsn}" ); let file = EphemeralFile::create(conf, tenant_shard_id, timeline_id, gate, cancel, ctx).await?; let key = InMemoryLayerFileId(file.page_cache_file_id()); Ok(InMemoryLayer { file_id: key, frozen_local_path_str: OnceLock::new(), conf, timeline_id, tenant_shard_id, start_lsn, end_lsn: OnceLock::new(), opened_at: Instant::now(), index: RwLock::new(BTreeMap::new()), file, estimated_in_mem_size: AtomicU64::new(0), }) } /// Write path. /// /// Errors are not retryable, the [`InMemoryLayer`] must be discarded, and not be read from. /// The reason why it's not retryable is that the [`EphemeralFile`] writes are not retryable. /// /// This method shall not be called concurrently. We enforce this property via [`crate::tenant::Timeline::write_lock`]. /// /// TODO: it can be made retryable if we aborted the process on EphemeralFile write errors. pub async fn put_batch( &self, serialized_batch: SerializedValueBatch, ctx: &RequestContext, ) -> anyhow::Result<()> { self.assert_writable(); let base_offset = self.file.len(); let SerializedValueBatch { raw, metadata, max_lsn: _, len: _, } = serialized_batch; // Write the batch to the file self.file.write_raw(&raw, ctx).await?; let new_size = self.file.len(); let expected_new_len = base_offset .checked_add(raw.len().into_u64()) // write_raw would error if we were to overflow u64. // also IndexEntry and higher levels in //the code don't allow the file to grow that large .unwrap(); assert_eq!(new_size, expected_new_len); // Update the index with the new entries let mut index = self.index.write().await; for meta in metadata { let SerializedValueMeta { key, lsn, batch_offset, len, will_init, } = match meta { ValueMeta::Serialized(ser) => ser, ValueMeta::Observed(_) => { continue; } }; // Add the base_offset to the batch's index entries which are relative to the batch start. let index_entry = IndexEntry::new(IndexEntryNewArgs { base_offset, batch_offset, len, will_init, })?; let vec_map = index.entry(key).or_default(); let old = vec_map.append_or_update_last(lsn, index_entry).unwrap().0; if old.is_some() { // This should not break anything, but is unexpected: ingestion code aims to filter out // multiple writes to the same key at the same LSN. This happens in cases where our // ingenstion code generates some write like an empty page, and we see a write from postgres // to the same key in the same wal record. If one such write makes it through, we // index the most recent write, implicitly ignoring the earlier write. We log a warning // because this case is unexpected, and we would like tests to fail if this happens. warn!("Key {} at {} written twice at same LSN", key, lsn); } self.estimated_in_mem_size.fetch_add( (std::mem::size_of::<CompactKey>() + std::mem::size_of::<Lsn>() + std::mem::size_of::<IndexEntry>()) as u64, AtomicOrdering::Relaxed, ); } Ok(()) } pub(crate) fn get_opened_at(&self) -> Instant { self.opened_at } pub(crate) fn tick(&self) -> Option<u64> { self.file.tick() } pub(crate) async fn put_tombstones(&self, _key_ranges: &[(Range<Key>, Lsn)]) -> Result<()> { // TODO: Currently, we just leak the storage for any deleted keys Ok(()) } /// Records the end_lsn for non-dropped layers. /// `end_lsn` is exclusive /// /// A note on locking: /// The current API of [`InMemoryLayer`] does not ensure that there's no ongoing /// writes while freezing the layer. This is enforced at a higher level via /// [`crate::tenant::Timeline::write_lock`]. Freeze might be called via two code paths: /// 1. Via the active [`crate::tenant::timeline::TimelineWriter`]. This holds the /// Timeline::write_lock for its lifetime. The rolling is handled in /// [`crate::tenant::timeline::TimelineWriter::put_batch`]. It's a &mut self function /// so can't be called from different threads. /// 2. In the background via [`crate::tenant::Timeline::maybe_freeze_ephemeral_layer`]. /// This only proceeds if try_lock on Timeline::write_lock succeeds (i.e. there's no active writer), /// hence there can be no concurrent writes pub async fn freeze(&self, end_lsn: Lsn) { assert!( self.start_lsn < end_lsn, "{} >= {}", self.start_lsn, end_lsn ); self.end_lsn.set(end_lsn).expect("end_lsn set only once"); self.frozen_local_path_str .set({ let mut buf = String::new(); inmem_layer_log_display(&mut buf, self.get_timeline_id(), self.start_lsn, end_lsn) .unwrap(); buf.into() }) .expect("frozen_local_path_str set only once"); #[cfg(debug_assertions)] { let index = self.index.read().await; for vec_map in index.values() { for (lsn, _) in vec_map.as_slice() { assert!(*lsn < end_lsn); } } } } /// Write this frozen in-memory layer to disk. If `key_range` is set, the delta /// layer will only contain the key range the user specifies, and may return `None` /// if there are no matching keys. /// /// Returns a new delta layer with all the same data as this in-memory layer pub async fn write_to_disk( &self, ctx: &RequestContext, key_range: Option<Range<Key>>, l0_flush_global_state: &l0_flush::Inner, gate: &utils::sync::gate::Gate, cancel: CancellationToken, ) -> Result<Option<(PersistentLayerDesc, Utf8PathBuf)>> { let index = self.index.read().await; use l0_flush::Inner; let _concurrency_permit = match l0_flush_global_state { Inner::Direct { semaphore, .. } => Some(semaphore.acquire().await), }; let end_lsn = *self.end_lsn.get().unwrap(); let key_count = if let Some(key_range) = key_range { let key_range = key_range.start.to_compact()..key_range.end.to_compact(); index.iter().filter(|(k, _)| key_range.contains(k)).count() } else { index.len() }; if key_count == 0 { return Ok(None); } let mut delta_layer_writer = DeltaLayerWriter::new( self.conf, self.timeline_id, self.tenant_shard_id, Key::MIN, self.start_lsn..end_lsn, gate, cancel, ctx, ) .await?; match l0_flush_global_state { l0_flush::Inner::Direct { .. } => { let file_contents = self.file.load_to_io_buf(ctx).await?; let file_contents = file_contents.freeze(); for (key, vec_map) in index.iter() { // Write all page versions for (lsn, entry) in vec_map .as_slice() .iter() .map(|(lsn, entry)| (lsn, entry.unpack())) { let IndexEntryUnpacked { pos, len, will_init, } = entry; let buf = file_contents.slice(pos as usize..(pos + len) as usize); let (_buf, res) = delta_layer_writer .put_value_bytes( Key::from_compact(*key), *lsn, buf.slice_len(), will_init, ctx, ) .await; res?; } } } } // MAX is used here because we identify L0 layers by full key range let (desc, path) = delta_layer_writer.finish(Key::MAX, ctx).await?; // Hold the permit until all the IO is done, including the fsync in `delta_layer_writer.finish()``. // // If we didn't and our caller drops this future, tokio-epoll-uring would extend the lifetime of // the `file_contents: Vec<u8>` until the IO is done, but not the permit's lifetime. // Thus, we'd have more concurrenct `Vec<u8>` in existence than the semaphore allows. // // We hold across the fsync so that on ext4 mounted with data=ordered, all the kernel page cache pages // we dirtied when writing to the filesystem have been flushed and marked !dirty. drop(_concurrency_permit); Ok(Some((desc, path))) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_index_entry() { const MAX_SUPPORTED_POS: usize = IndexEntry::MAX_SUPPORTED_POS; use {IndexEntryNewArgs as Args, IndexEntryUnpacked as Unpacked}; let roundtrip = |args, expect: Unpacked| { let res = IndexEntry::new(args).expect("this tests expects no errors"); let IndexEntryUnpacked { will_init, len, pos, } = res.unpack(); assert_eq!(will_init, expect.will_init); assert_eq!(len, expect.len); assert_eq!(pos, expect.pos); }; // basic roundtrip for pos in [0, MAX_SUPPORTED_POS] { for len in [0, MAX_SUPPORTED_BLOB_LEN] { for will_init in [true, false] { let expect = Unpacked { will_init, len: len.into_u64(), pos: pos.into_u64(), }; roundtrip( Args { will_init, base_offset: pos.into_u64(), batch_offset: 0, len, }, expect, ); roundtrip( Args { will_init, base_offset: 0, batch_offset: pos.into_u64(), len, }, expect, ); } } } // too-large len let too_large = Args { will_init: false, len: MAX_SUPPORTED_BLOB_LEN + 1, base_offset: 0, batch_offset: 0, }; assert!(IndexEntry::new(too_large).is_err()); // too-large pos { let too_large = Args { will_init: false, len: 0, base_offset: MAX_SUPPORTED_POS.into_u64() + 1, batch_offset: 0, }; assert!(IndexEntry::new(too_large).is_err()); let too_large = Args { will_init: false, len: 0, base_offset: 0, batch_offset: MAX_SUPPORTED_POS.into_u64() + 1, }; assert!(IndexEntry::new(too_large).is_err()); } // too large (base_offset + batch_offset) { let too_large = Args { will_init: false, len: 0,
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/layer_name.rs
pageserver/src/tenant/storage_layer/layer_name.rs
//! //! Helper functions for dealing with filenames of the image and delta layer files. //! use std::cmp::Ordering; use std::fmt; use std::ops::Range; use std::str::FromStr; use pageserver_api::key::Key; use utils::lsn::Lsn; use super::PersistentLayerDesc; // Note: Timeline::load_layer_map() relies on this sort order #[derive(PartialEq, Eq, Clone, Hash)] pub struct DeltaLayerName { pub key_range: Range<Key>, pub lsn_range: Range<Lsn>, } impl std::fmt::Debug for DeltaLayerName { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use super::RangeDisplayDebug; f.debug_struct("DeltaLayerName") .field("key_range", &RangeDisplayDebug(&self.key_range)) .field("lsn_range", &self.lsn_range) .finish() } } impl PartialOrd for DeltaLayerName { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for DeltaLayerName { fn cmp(&self, other: &Self) -> Ordering { let mut cmp = self.key_range.start.cmp(&other.key_range.start); if cmp != Ordering::Equal { return cmp; } cmp = self.key_range.end.cmp(&other.key_range.end); if cmp != Ordering::Equal { return cmp; } cmp = self.lsn_range.start.cmp(&other.lsn_range.start); if cmp != Ordering::Equal { return cmp; } cmp = self.lsn_range.end.cmp(&other.lsn_range.end); cmp } } /// Represents the region of the LSN-Key space covered by a DeltaLayer /// /// ```text /// <key start>-<key end>__<LSN start>-<LSN end>-<generation> /// ``` impl DeltaLayerName { /// Parse the part of a delta layer's file name that represents the LayerName. Returns None /// if the filename does not match the expected pattern. pub fn parse_str(fname: &str) -> Option<Self> { let (key_parts, lsn_generation_parts) = fname.split_once("__")?; let (key_start_str, key_end_str) = key_parts.split_once('-')?; let (lsn_start_str, lsn_end_generation_parts) = lsn_generation_parts.split_once('-')?; let lsn_end_str = if let Some((lsn_end_str, maybe_generation)) = lsn_end_generation_parts.split_once('-') { if maybe_generation.starts_with("v") { // vY-XXXXXXXX lsn_end_str } else if maybe_generation.len() == 8 { // XXXXXXXX lsn_end_str } else { // no idea what this is return None; } } else { lsn_end_generation_parts }; let key_start = Key::from_hex(key_start_str).ok()?; let key_end = Key::from_hex(key_end_str).ok()?; let start_lsn = Lsn::from_hex(lsn_start_str).ok()?; let end_lsn = Lsn::from_hex(lsn_end_str).ok()?; if start_lsn >= end_lsn { return None; // or panic? } if key_start >= key_end { return None; // or panic? } Some(DeltaLayerName { key_range: key_start..key_end, lsn_range: start_lsn..end_lsn, }) } } impl fmt::Display for DeltaLayerName { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}-{}__{:016X}-{:016X}", self.key_range.start, self.key_range.end, u64::from(self.lsn_range.start), u64::from(self.lsn_range.end), ) } } #[derive(PartialEq, Eq, Clone, Hash)] pub struct ImageLayerName { pub key_range: Range<Key>, pub lsn: Lsn, } impl std::fmt::Debug for ImageLayerName { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use super::RangeDisplayDebug; f.debug_struct("ImageLayerName") .field("key_range", &RangeDisplayDebug(&self.key_range)) .field("lsn", &self.lsn) .finish() } } impl PartialOrd for ImageLayerName { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for ImageLayerName { fn cmp(&self, other: &Self) -> Ordering { let mut cmp = self.key_range.start.cmp(&other.key_range.start); if cmp != Ordering::Equal { return cmp; } cmp = self.key_range.end.cmp(&other.key_range.end); if cmp != Ordering::Equal { return cmp; } cmp = self.lsn.cmp(&other.lsn); cmp } } impl ImageLayerName { pub fn lsn_as_range(&self) -> Range<Lsn> { // Saves from having to copypaste this all over PersistentLayerDesc::image_layer_lsn_range(self.lsn) } } /// /// Represents the part of the Key-LSN space covered by an ImageLayer /// /// ```text /// <key start>-<key end>__<LSN>-<generation> /// ``` impl ImageLayerName { /// Parse a string as then LayerName part of an image layer file name. Returns None if the /// filename does not match the expected pattern. pub fn parse_str(fname: &str) -> Option<Self> { let (key_parts, lsn_generation_parts) = fname.split_once("__")?; let (key_start_str, key_end_str) = key_parts.split_once('-')?; let lsn_str = if let Some((lsn_str, maybe_generation)) = lsn_generation_parts.split_once('-') { if maybe_generation.starts_with("v") { // vY-XXXXXXXX lsn_str } else if maybe_generation.len() == 8 { // XXXXXXXX lsn_str } else { // likely a delta layer return None; } } else { lsn_generation_parts }; let key_start = Key::from_hex(key_start_str).ok()?; let key_end = Key::from_hex(key_end_str).ok()?; let lsn = Lsn::from_hex(lsn_str).ok()?; Some(ImageLayerName { key_range: key_start..key_end, lsn, }) } } impl fmt::Display for ImageLayerName { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}-{}__{:016X}", self.key_range.start, self.key_range.end, u64::from(self.lsn), ) } } /// LayerName is the logical identity of a layer within a LayerMap at a moment in time. /// /// The LayerName is not a unique filename, as the same LayerName may have multiple physical incarnations /// over time (e.g. across shard splits or compression). The physical filenames of layers in local /// storage and object names in remote storage consist of the LayerName plus some extra qualifiers /// that uniquely identify the physical incarnation of a layer (see [crate::tenant::remote_timeline_client::remote_layer_path]) /// and [`crate::tenant::storage_layer::layer::local_layer_path`]) #[derive(Debug, PartialEq, Eq, Hash, Clone, Ord, PartialOrd)] pub enum LayerName { Image(ImageLayerName), Delta(DeltaLayerName), } impl LayerName { /// Determines if this layer file is considered to be in future meaning we will discard these /// layers during timeline initialization from the given disk_consistent_lsn. pub(crate) fn is_in_future(&self, disk_consistent_lsn: Lsn) -> bool { use LayerName::*; match self { Image(file_name) if file_name.lsn > disk_consistent_lsn => true, Delta(file_name) if file_name.lsn_range.end > disk_consistent_lsn + 1 => true, _ => false, } } pub(crate) fn kind(&self) -> &'static str { use LayerName::*; match self { Delta(_) => "delta", Image(_) => "image", } } /// Gets the key range encoded in the layer name. pub fn key_range(&self) -> &Range<Key> { match &self { LayerName::Image(layer) => &layer.key_range, LayerName::Delta(layer) => &layer.key_range, } } /// Gets the LSN range encoded in the layer name. pub fn lsn_as_range(&self) -> Range<Lsn> { match &self { LayerName::Image(layer) => layer.lsn_as_range(), LayerName::Delta(layer) => layer.lsn_range.clone(), } } pub fn is_delta(&self) -> bool { matches!(self, LayerName::Delta(_)) } } impl fmt::Display for LayerName { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Image(fname) => write!(f, "{fname}"), Self::Delta(fname) => write!(f, "{fname}"), } } } impl From<ImageLayerName> for LayerName { fn from(fname: ImageLayerName) -> Self { Self::Image(fname) } } impl From<DeltaLayerName> for LayerName { fn from(fname: DeltaLayerName) -> Self { Self::Delta(fname) } } impl FromStr for LayerName { type Err = String; /// Conversion from either a physical layer filename, or the string-ization of /// Self. When loading a physical layer filename, we drop any extra information /// not needed to build Self. fn from_str(value: &str) -> Result<Self, Self::Err> { let delta = DeltaLayerName::parse_str(value); let image = ImageLayerName::parse_str(value); let ok = match (delta, image) { (None, None) => { return Err(format!( "neither delta nor image layer file name: {value:?}" )); } (Some(delta), None) => Self::Delta(delta), (None, Some(image)) => Self::Image(image), (Some(_), Some(_)) => unreachable!(), }; Ok(ok) } } impl serde::Serialize for LayerName { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { match self { Self::Image(fname) => serializer.collect_str(fname), Self::Delta(fname) => serializer.collect_str(fname), } } } impl<'de> serde::Deserialize<'de> for LayerName { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { deserializer.deserialize_string(LayerNameVisitor) } } struct LayerNameVisitor; impl serde::de::Visitor<'_> for LayerNameVisitor { type Value = LayerName; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!( formatter, "a string that is a valid image or delta layer file name" ) } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error, { v.parse().map_err(|e| E::custom(e)) } } #[cfg(test)] mod test { use super::*; #[test] fn image_layer_parse() { let expected = LayerName::Image(ImageLayerName { key_range: Key::from_i128(0) ..Key::from_hex("000000067F00000001000004DF0000000006").unwrap(), lsn: Lsn::from_hex("00000000014FED58").unwrap(), }); let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-v1-00000001").unwrap(); assert_eq!(parsed, expected); let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-00000001").unwrap(); assert_eq!(parsed, expected); // Omitting generation suffix is valid let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58").unwrap(); assert_eq!(parsed, expected); } #[test] fn delta_layer_parse() { let expected = LayerName::Delta(DeltaLayerName { key_range: Key::from_i128(0) ..Key::from_hex("000000067F00000001000004DF0000000006").unwrap(), lsn_range: Lsn::from_hex("00000000014FED58").unwrap() ..Lsn::from_hex("000000000154C481").unwrap(), }); let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-000000000154C481-v1-00000001").unwrap(); assert_eq!(parsed, expected); let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-000000000154C481-00000001").unwrap(); assert_eq!(parsed, expected); // Omitting generation suffix is valid let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-000000000154C481").unwrap(); assert_eq!(parsed, expected); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/layer_desc.rs
pageserver/src/tenant/storage_layer/layer_desc.rs
use core::fmt::Display; use std::ops::Range; use pageserver_api::key::Key; use pageserver_api::shard::TenantShardId; use serde::{Deserialize, Serialize}; #[cfg(test)] use utils::id::TenantId; use utils::id::TimelineId; use utils::lsn::Lsn; use super::{DeltaLayerName, ImageLayerName, LayerName}; /// A unique identifier of a persistent layer. /// /// This is different from `LayerDescriptor`, which is only used in the benchmarks. /// This struct contains all necessary information to find the image / delta layer. It also provides /// a unified way to generate layer information like file name. #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Hash)] pub struct PersistentLayerDesc { pub tenant_shard_id: TenantShardId, pub timeline_id: TimelineId, /// Range of keys that this layer covers pub key_range: Range<Key>, /// Inclusive start, exclusive end of the LSN range that this layer holds. /// /// - For an open in-memory layer, the end bound is MAX_LSN /// - For a frozen in-memory layer or a delta layer, the end bound is a valid lsn after the /// range start /// - An image layer represents snapshot at one LSN, so end_lsn is always the snapshot LSN + 1 pub lsn_range: Range<Lsn>, /// Whether this is a delta layer, and also, is this incremental. pub is_delta: bool, pub file_size: u64, } /// A unique identifier of a persistent layer within the context of one timeline. #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct PersistentLayerKey { pub key_range: Range<Key>, pub lsn_range: Range<Lsn>, pub is_delta: bool, } impl std::fmt::Display for PersistentLayerKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "{}..{} {}..{} is_delta={}", self.key_range.start, self.key_range.end, self.lsn_range.start, self.lsn_range.end, self.is_delta ) } } impl From<ImageLayerName> for PersistentLayerKey { fn from(image_layer_name: ImageLayerName) -> Self { Self { key_range: image_layer_name.key_range, lsn_range: PersistentLayerDesc::image_layer_lsn_range(image_layer_name.lsn), is_delta: false, } } } impl From<DeltaLayerName> for PersistentLayerKey { fn from(delta_layer_name: DeltaLayerName) -> Self { Self { key_range: delta_layer_name.key_range, lsn_range: delta_layer_name.lsn_range, is_delta: true, } } } impl From<LayerName> for PersistentLayerKey { fn from(layer_name: LayerName) -> Self { match layer_name { LayerName::Image(i) => i.into(), LayerName::Delta(d) => d.into(), } } } impl PersistentLayerDesc { pub fn key(&self) -> PersistentLayerKey { PersistentLayerKey { key_range: self.key_range.clone(), lsn_range: self.lsn_range.clone(), is_delta: self.is_delta, } } pub fn short_id(&self) -> impl Display { self.layer_name() } #[cfg(test)] pub fn new_test(key_range: Range<Key>, lsn_range: Range<Lsn>, is_delta: bool) -> Self { Self { tenant_shard_id: TenantShardId::unsharded(TenantId::generate()), timeline_id: TimelineId::generate(), key_range, lsn_range, is_delta, file_size: 0, } } pub fn new_img( tenant_shard_id: TenantShardId, timeline_id: TimelineId, key_range: Range<Key>, lsn: Lsn, file_size: u64, ) -> Self { Self { tenant_shard_id, timeline_id, key_range, lsn_range: Self::image_layer_lsn_range(lsn), is_delta: false, file_size, } } pub fn new_delta( tenant_shard_id: TenantShardId, timeline_id: TimelineId, key_range: Range<Key>, lsn_range: Range<Lsn>, file_size: u64, ) -> Self { Self { tenant_shard_id, timeline_id, key_range, lsn_range, is_delta: true, file_size, } } pub fn from_filename( tenant_shard_id: TenantShardId, timeline_id: TimelineId, filename: LayerName, file_size: u64, ) -> Self { match filename { LayerName::Image(i) => { Self::new_img(tenant_shard_id, timeline_id, i.key_range, i.lsn, file_size) } LayerName::Delta(d) => Self::new_delta( tenant_shard_id, timeline_id, d.key_range, d.lsn_range, file_size, ), } } /// Get the LSN that the image layer covers. pub fn image_layer_lsn(&self) -> Lsn { assert!(!self.is_delta); assert!(self.lsn_range.start + 1 == self.lsn_range.end); self.lsn_range.start } /// Get the LSN range corresponding to a single image layer LSN. pub fn image_layer_lsn_range(lsn: Lsn) -> Range<Lsn> { lsn..(lsn + 1) } /// Get a delta layer name for this layer. /// /// Panic: if this is not a delta layer. pub fn delta_layer_name(&self) -> DeltaLayerName { assert!(self.is_delta); DeltaLayerName { key_range: self.key_range.clone(), lsn_range: self.lsn_range.clone(), } } /// Get a image layer name for this layer. /// /// Panic: if this is not an image layer, or the lsn range is invalid pub fn image_layer_name(&self) -> ImageLayerName { assert!(!self.is_delta); assert!(self.lsn_range.start + 1 == self.lsn_range.end); ImageLayerName { key_range: self.key_range.clone(), lsn: self.lsn_range.start, } } pub fn layer_name(&self) -> LayerName { if self.is_delta { self.delta_layer_name().into() } else { self.image_layer_name().into() } } // TODO: remove this in the future once we refactor timeline APIs. pub fn get_lsn_range(&self) -> Range<Lsn> { self.lsn_range.clone() } pub fn get_key_range(&self) -> Range<Key> { self.key_range.clone() } pub fn get_timeline_id(&self) -> TimelineId { self.timeline_id } /// Does this layer only contain some data for the key-range (incremental), /// or does it contain a version of every page? This is important to know /// for garbage collecting old layers: an incremental layer depends on /// the previous non-incremental layer. pub fn is_incremental(&self) -> bool { self.is_delta } pub fn is_delta(&self) -> bool { self.is_delta } pub fn dump(&self) { if self.is_delta { println!( "----- delta layer for ten {} tli {} keys {}-{} lsn {}-{} is_incremental {} size {} ----", self.tenant_shard_id, self.timeline_id, self.key_range.start, self.key_range.end, self.lsn_range.start, self.lsn_range.end, self.is_incremental(), self.file_size, ); } else { println!( "----- image layer for ten {} tli {} key {}-{} at {} is_incremental {} size {} ----", self.tenant_shard_id, self.timeline_id, self.key_range.start, self.key_range.end, self.image_layer_lsn(), self.is_incremental(), self.file_size ); } } pub fn file_size(&self) -> u64 { self.file_size } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/filter_iterator.rs
pageserver/src/tenant/storage_layer/filter_iterator.rs
use std::ops::Range; use std::sync::Arc; use anyhow::bail; use pageserver_api::key::Key; use pageserver_api::keyspace::{KeySpace, SparseKeySpace}; use utils::lsn::Lsn; use wal_decoder::models::value::Value; use super::PersistentLayerKey; use super::merge_iterator::{MergeIterator, MergeIteratorItem}; /// A filter iterator over merge iterators (and can be easily extended to other types of iterators). /// /// The iterator will skip any keys not included in the keyspace filter. In other words, the keyspace filter contains the keys /// to be retained. pub struct FilterIterator<'a> { inner: MergeIterator<'a>, retain_key_filters: Vec<Range<Key>>, current_filter_idx: usize, } impl<'a> FilterIterator<'a> { pub fn create( inner: MergeIterator<'a>, dense_keyspace: KeySpace, sparse_keyspace: SparseKeySpace, ) -> anyhow::Result<Self> { let mut retain_key_filters = Vec::new(); retain_key_filters.extend(dense_keyspace.ranges); retain_key_filters.extend(sparse_keyspace.0.ranges); retain_key_filters.sort_by(|a, b| a.start.cmp(&b.start)); // Verify key filters are non-overlapping and sorted for window in retain_key_filters.windows(2) { if window[0].end > window[1].start { bail!( "Key filters are overlapping: {:?} and {:?}", window[0], window[1] ); } } Ok(Self { inner, retain_key_filters, current_filter_idx: 0, }) } async fn next_inner<R: MergeIteratorItem>(&mut self) -> anyhow::Result<Option<R>> { while let Some(item) = self.inner.next_inner::<R>().await? { while self.current_filter_idx < self.retain_key_filters.len() && item.key_lsn_value().0 >= self.retain_key_filters[self.current_filter_idx].end { // [filter region] [filter region] [filter region] // ^ item // ^ current filter self.current_filter_idx += 1; // [filter region] [filter region] [filter region] // ^ item // ^ current filter } if self.current_filter_idx >= self.retain_key_filters.len() { // We already exhausted all filters, so we should return now // [filter region] [filter region] [filter region] // ^ item // ^ current filter (nothing) return Ok(None); } if self.retain_key_filters[self.current_filter_idx].contains(&item.key_lsn_value().0) { // [filter region] [filter region] [filter region] // ^ item // ^ current filter return Ok(Some(item)); } // If the key is not contained in the key retaining filters, continue to the next item. // [filter region] [filter region] [filter region] // ^ item // ^ current filter } Ok(None) } pub async fn next(&mut self) -> anyhow::Result<Option<(Key, Lsn, Value)>> { self.next_inner().await } pub async fn next_with_trace( &mut self, ) -> anyhow::Result<Option<((Key, Lsn, Value), Arc<PersistentLayerKey>)>> { self.next_inner().await } } #[cfg(test)] mod tests { use itertools::Itertools; use pageserver_api::key::Key; use utils::lsn::Lsn; use super::*; use crate::DEFAULT_PG_VERSION; use crate::tenant::harness::{TIMELINE_ID, TenantHarness}; use crate::tenant::storage_layer::delta_layer::test::produce_delta_layer; async fn assert_filter_iter_equal( filter_iter: &mut FilterIterator<'_>, expect: &[(Key, Lsn, Value)], ) { let mut expect_iter = expect.iter(); loop { let o1 = filter_iter.next().await.unwrap(); let o2 = expect_iter.next(); assert_eq!(o1.is_some(), o2.is_some()); if o1.is_none() && o2.is_none() { break; } let (k1, l1, v1) = o1.unwrap(); let (k2, l2, v2) = o2.unwrap(); assert_eq!(&k1, k2); assert_eq!(l1, *l2); assert_eq!(&v1, v2); } } #[tokio::test] async fn filter_keyspace_iterator() { use bytes::Bytes; let harness = TenantHarness::create("filter_iterator_filter_keyspace_iterator") .await .unwrap(); let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await .unwrap(); fn get_key(id: u32) -> Key { let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); key.field6 = id; key } const N: usize = 100; let test_deltas1 = (0..N) .map(|idx| { ( get_key(idx as u32), Lsn(0x20 * ((idx as u64) % 10 + 1)), Value::Image(Bytes::from(format!("img{idx:05}"))), ) }) .collect_vec(); let resident_layer_1 = produce_delta_layer(&tenant, &tline, test_deltas1.clone(), &ctx) .await .unwrap(); let merge_iter = MergeIterator::create_for_testing( &[resident_layer_1.get_as_delta(&ctx).await.unwrap()], &[], &ctx, ); let mut filter_iter = FilterIterator::create( merge_iter, KeySpace { ranges: vec![ get_key(5)..get_key(10), get_key(20)..get_key(30), get_key(90)..get_key(110), get_key(1000)..get_key(2000), ], }, SparseKeySpace(KeySpace::default()), ) .unwrap(); let mut result = Vec::new(); result.extend(test_deltas1[5..10].iter().cloned()); result.extend(test_deltas1[20..30].iter().cloned()); result.extend(test_deltas1[90..100].iter().cloned()); assert_filter_iter_equal(&mut filter_iter, &result).await; let merge_iter = MergeIterator::create_for_testing( &[resident_layer_1.get_as_delta(&ctx).await.unwrap()], &[], &ctx, ); let mut filter_iter = FilterIterator::create( merge_iter, KeySpace { ranges: vec![ get_key(0)..get_key(10), get_key(20)..get_key(30), get_key(90)..get_key(95), ], }, SparseKeySpace(KeySpace::default()), ) .unwrap(); let mut result = Vec::new(); result.extend(test_deltas1[0..10].iter().cloned()); result.extend(test_deltas1[20..30].iter().cloned()); result.extend(test_deltas1[90..95].iter().cloned()); assert_filter_iter_equal(&mut filter_iter, &result).await; } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs
pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs
use std::collections::BTreeMap; use std::sync::{Arc, RwLock}; use itertools::Itertools; use tokio_epoll_uring::{BoundedBuf, IoBufMut, Slice}; use crate::assert_u64_eq_usize::{U64IsUsize, UsizeIsU64}; use crate::context::RequestContext; use crate::virtual_file::IoBufferMut; use crate::virtual_file::owned_buffers_io::io_buf_aligned::IoBufAlignedMut; /// The file interface we require. At runtime, this is a [`crate::tenant::ephemeral_file::EphemeralFile`]. pub trait File: Send { /// Attempt to read the bytes in `self` in range `[start,start+dst.bytes_total())` /// and return the number of bytes read (let's call it `nread`). /// The bytes read are placed in `dst`, i.e., `&dst[..nread]` will contain the read bytes. /// /// The only reason why the read may be short (i.e., `nread != dst.bytes_total()`) /// is if the file is shorter than `start+dst.len()`. /// /// This is unlike [`std::os::unix::fs::FileExt::read_exact_at`] which returns an /// [`std::io::ErrorKind::UnexpectedEof`] error if the file is shorter than `start+dst.len()`. /// /// No guarantees are made about the remaining bytes in `dst` in case of a short read. async fn read_exact_at_eof_ok<B: IoBufAlignedMut + Send>( &self, start: u64, dst: Slice<B>, ctx: &RequestContext, ) -> std::io::Result<(Slice<B>, usize)>; } /// A logical read from [`File`]. See [`Self::new`]. pub struct LogicalRead<B: Buffer> { pos: u64, state: RwLockRefCell<LogicalReadState<B>>, } enum LogicalReadState<B: Buffer> { NotStarted(B), Ongoing(B), Ok(B), Error(Arc<std::io::Error>), Undefined, } impl<B: Buffer> LogicalRead<B> { /// Create a new [`LogicalRead`] from [`File`] of the data in the file in range `[ pos, pos + buf.cap() )`. pub fn new(pos: u64, buf: B) -> Self { Self { pos, state: RwLockRefCell::new(LogicalReadState::NotStarted(buf)), } } pub fn into_result(self) -> Option<Result<B, Arc<std::io::Error>>> { match self.state.into_inner() { LogicalReadState::Ok(buf) => Some(Ok(buf)), LogicalReadState::Error(e) => Some(Err(e)), LogicalReadState::NotStarted(_) | LogicalReadState::Ongoing(_) => None, LogicalReadState::Undefined => unreachable!(), } } } /// The buffer into which a [`LogicalRead`] result is placed. pub trait Buffer: std::ops::Deref<Target = [u8]> { /// Immutable. fn cap(&self) -> usize; /// Changes only through [`Self::extend_from_slice`]. fn len(&self) -> usize; /// Panics if the total length would exceed the initialized capacity. fn extend_from_slice(&mut self, src: &[u8]); } /// The minimum alignment and size requirement for disk offsets and memory buffer size for direct IO. const DIO_CHUNK_SIZE: usize = crate::virtual_file::get_io_buffer_alignment(); /// If multiple chunks need to be read, merge adjacent chunk reads into batches of max size `MAX_CHUNK_BATCH_SIZE`. /// (The unit is the number of chunks.) const MAX_CHUNK_BATCH_SIZE: usize = { let desired = 128 * 1024; // 128k if desired % DIO_CHUNK_SIZE != 0 { panic!("MAX_CHUNK_BATCH_SIZE must be a multiple of DIO_CHUNK_SIZE") // compile-time error } desired / DIO_CHUNK_SIZE }; /// Execute the given logical `reads` against `file`. /// The results are placed in the buffers of the [`LogicalRead`]s. /// Retrieve the results by calling [`LogicalRead::into_result`] on each [`LogicalRead`]. /// /// The [`LogicalRead`]s must be freshly created using [`LogicalRead::new`] when calling this function. /// Otherwise, this function panics. pub async fn execute<'a, I, F, B>(file: &F, reads: I, ctx: &RequestContext) where I: IntoIterator<Item = &'a LogicalRead<B>>, F: File, B: Buffer + IoBufMut + Send, { // Terminology: // logical read = a request to read an arbitrary range of bytes from `file`; byte-level granularity // chunk = we conceptually divide up the byte range of `file` into DIO_CHUNK_SIZEs ranges // interest = a range within a chunk that a logical read is interested in; one logical read gets turned into many interests // physical read = the read request we're going to issue to the OS; covers a range of chunks; chunk-level granularity // Preserve a copy of the logical reads for debug assertions at the end #[cfg(debug_assertions)] let (reads, assert_logical_reads) = { let (reads, assert) = reads.into_iter().tee(); (reads, Some(Vec::from_iter(assert))) }; #[cfg(not(debug_assertions))] let (reads, assert_logical_reads): (_, Option<Vec<&'a LogicalRead<B>>>) = (reads, None); // Plan which parts of which chunks need to be appended to which buffer let mut by_chunk: BTreeMap<u64, Vec<Interest<B>>> = BTreeMap::new(); struct Interest<'a, B: Buffer> { logical_read: &'a LogicalRead<B>, offset_in_chunk: u64, len: u64, } for logical_read in reads { let LogicalRead { pos, state } = logical_read; let mut state = state.borrow_mut(); // transition from NotStarted to Ongoing let cur = std::mem::replace(&mut *state, LogicalReadState::Undefined); let req_len = match cur { LogicalReadState::NotStarted(buf) => { if buf.len() != 0 { panic!( "The `LogicalRead`s that are passed in must be freshly created using `LogicalRead::new`" ); } // buf.cap() == 0 is ok // transition into Ongoing state let req_len = buf.cap(); *state = LogicalReadState::Ongoing(buf); req_len } x => panic!( "must only call with fresh LogicalReads, got another state, leaving Undefined state behind state={x:?}" ), }; // plan which chunks we need to read from let mut remaining = req_len; let mut chunk_no = *pos / (DIO_CHUNK_SIZE.into_u64()); let mut offset_in_chunk = pos.into_usize() % DIO_CHUNK_SIZE; while remaining > 0 { let remaining_in_chunk = std::cmp::min(remaining, DIO_CHUNK_SIZE - offset_in_chunk); by_chunk.entry(chunk_no).or_default().push(Interest { logical_read, offset_in_chunk: offset_in_chunk.into_u64(), len: remaining_in_chunk.into_u64(), }); offset_in_chunk = 0; chunk_no += 1; remaining -= remaining_in_chunk; } } // At this point, we could iterate over by_chunk, in chunk order, // read each chunk from disk, and fill the buffers. // However, we can merge adjacent chunks into batches of MAX_CHUNK_BATCH_SIZE // so we issue fewer IOs = fewer roundtrips = lower overall latency. struct PhysicalRead<'a, B: Buffer> { start_chunk_no: u64, nchunks: usize, dsts: Vec<PhysicalInterest<'a, B>>, } struct PhysicalInterest<'a, B: Buffer> { logical_read: &'a LogicalRead<B>, offset_in_physical_read: u64, len: u64, } let mut physical_reads: Vec<PhysicalRead<B>> = Vec::new(); let mut by_chunk = by_chunk.into_iter().peekable(); loop { let mut last_chunk_no = None; let to_merge: Vec<(u64, Vec<Interest<B>>)> = by_chunk .peeking_take_while(|(chunk_no, _)| { if let Some(last_chunk_no) = last_chunk_no { if *chunk_no != last_chunk_no + 1 { return false; } } last_chunk_no = Some(*chunk_no); true }) .take(MAX_CHUNK_BATCH_SIZE) .collect(); // TODO: avoid this .collect() let Some(start_chunk_no) = to_merge.first().map(|(chunk_no, _)| *chunk_no) else { break; }; let nchunks = to_merge.len(); let dsts = to_merge .into_iter() .enumerate() .flat_map(|(i, (_, dsts))| { dsts.into_iter().map( move |Interest { logical_read, offset_in_chunk, len, }| { PhysicalInterest { logical_read, offset_in_physical_read: i .checked_mul(DIO_CHUNK_SIZE) .unwrap() .into_u64() + offset_in_chunk, len, } }, ) }) .collect(); physical_reads.push(PhysicalRead { start_chunk_no, nchunks, dsts, }); } drop(by_chunk); // Execute physical reads and fill the logical read buffers // TODO: pipelined reads; prefetch; let get_io_buffer = |nchunks| IoBufferMut::with_capacity(nchunks * DIO_CHUNK_SIZE); for PhysicalRead { start_chunk_no, nchunks, dsts, } in physical_reads { let all_done = dsts .iter() .all(|PhysicalInterest { logical_read, .. }| logical_read.state.borrow().is_terminal()); if all_done { continue; } let read_offset = start_chunk_no .checked_mul(DIO_CHUNK_SIZE.into_u64()) .expect("we produce chunk_nos by dividing by DIO_CHUNK_SIZE earlier"); let io_buf = get_io_buffer(nchunks).slice_full(); let req_len = io_buf.len(); let (io_buf_slice, nread) = match file.read_exact_at_eof_ok(read_offset, io_buf, ctx).await { Ok(t) => t, Err(e) => { let e = Arc::new(e); for PhysicalInterest { logical_read, .. } in dsts { *logical_read.state.borrow_mut() = LogicalReadState::Error(Arc::clone(&e)); // this will make later reads for the given LogicalRead short-circuit, see top of loop body } continue; } }; let io_buf = io_buf_slice.into_inner(); assert!( nread <= io_buf.len(), "the last chunk in the file can be a short read, so, no ==" ); let io_buf = &io_buf[..nread]; for PhysicalInterest { logical_read, offset_in_physical_read, len, } in dsts { let mut logical_read_state_borrow = logical_read.state.borrow_mut(); let logical_read_buf = match &mut *logical_read_state_borrow { LogicalReadState::NotStarted(_) => { unreachable!("we transition it into Ongoing at function entry") } LogicalReadState::Ongoing(buf) => buf, LogicalReadState::Ok(_) | LogicalReadState::Error(_) => { continue; } LogicalReadState::Undefined => unreachable!(), }; let range_in_io_buf = std::ops::Range { start: offset_in_physical_read as usize, end: offset_in_physical_read as usize + len as usize, }; assert!(range_in_io_buf.end >= range_in_io_buf.start); if range_in_io_buf.end > nread { let msg = format!( "physical read returned EOF where this logical read expected more data in the file: offset=0x{read_offset:x} req_len=0x{req_len:x} nread=0x{nread:x} {:?}", &*logical_read_state_borrow ); logical_read_state_borrow.transition_to_terminal(Err(std::io::Error::new( std::io::ErrorKind::UnexpectedEof, msg, ))); continue; } let data = &io_buf[range_in_io_buf]; // Copy data from io buffer into the logical read buffer. // (And in debug mode, validate that the buffer impl adheres to the Buffer trait spec.) let pre = if cfg!(debug_assertions) { Some((logical_read_buf.len(), logical_read_buf.cap())) } else { None }; logical_read_buf.extend_from_slice(data); let post = if cfg!(debug_assertions) { Some((logical_read_buf.len(), logical_read_buf.cap())) } else { None }; match (pre, post) { (None, None) => {} (Some(_), None) | (None, Some(_)) => unreachable!(), (Some((pre_len, pre_cap)), Some((post_len, post_cap))) => { assert_eq!(pre_len + len as usize, post_len); assert_eq!(pre_cap, post_cap); } } if logical_read_buf.len() == logical_read_buf.cap() { logical_read_state_borrow.transition_to_terminal(Ok(())); } } } if let Some(assert_logical_reads) = assert_logical_reads { for logical_read in assert_logical_reads { assert!(logical_read.state.borrow().is_terminal()); } } } impl<B: Buffer> LogicalReadState<B> { fn is_terminal(&self) -> bool { match self { LogicalReadState::NotStarted(_) | LogicalReadState::Ongoing(_) => false, LogicalReadState::Ok(_) | LogicalReadState::Error(_) => true, LogicalReadState::Undefined => unreachable!(), } } fn transition_to_terminal(&mut self, err: std::io::Result<()>) { let cur = std::mem::replace(self, LogicalReadState::Undefined); let buf = match cur { LogicalReadState::Ongoing(buf) => buf, x => panic!("must only call in state Ongoing, got {x:?}"), }; *self = match err { Ok(()) => LogicalReadState::Ok(buf), Err(e) => LogicalReadState::Error(Arc::new(e)), }; } } impl<B: Buffer> std::fmt::Debug for LogicalReadState<B> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { #[derive(Debug)] #[allow(unused)] struct BufferDebug { len: usize, cap: usize, } impl<'a> From<&'a dyn Buffer> for BufferDebug { fn from(buf: &'a dyn Buffer) -> Self { Self { len: buf.len(), cap: buf.cap(), } } } match self { LogicalReadState::NotStarted(b) => { write!(f, "NotStarted({:?})", BufferDebug::from(b as &dyn Buffer)) } LogicalReadState::Ongoing(b) => { write!(f, "Ongoing({:?})", BufferDebug::from(b as &dyn Buffer)) } LogicalReadState::Ok(b) => write!(f, "Ok({:?})", BufferDebug::from(b as &dyn Buffer)), LogicalReadState::Error(e) => write!(f, "Error({e:?})"), LogicalReadState::Undefined => write!(f, "Undefined"), } } } #[derive(Debug)] struct RwLockRefCell<T>(RwLock<T>); impl<T> RwLockRefCell<T> { fn new(value: T) -> Self { Self(RwLock::new(value)) } fn borrow(&self) -> impl std::ops::Deref<Target = T> + '_ { self.0.try_read().unwrap() } fn borrow_mut(&self) -> impl std::ops::DerefMut<Target = T> + '_ { self.0.try_write().unwrap() } fn into_inner(self) -> T { self.0.into_inner().unwrap() } } impl Buffer for Vec<u8> { fn cap(&self) -> usize { self.capacity() } fn len(&self) -> usize { self.len() } fn extend_from_slice(&mut self, src: &[u8]) { if self.len() + src.len() > self.cap() { panic!("Buffer capacity exceeded"); } Vec::extend_from_slice(self, src); } } #[cfg(test)] #[allow(clippy::assertions_on_constants)] mod tests { use std::cell::RefCell; use std::collections::VecDeque; use rand::Rng; use super::*; use crate::context::DownloadBehavior; use crate::task_mgr::TaskKind; use crate::virtual_file::owned_buffers_io::slice::SliceMutExt; struct InMemoryFile { content: Vec<u8>, } impl InMemoryFile { fn new_random(len: usize) -> Self { Self { content: rand::rng() .sample_iter(rand::distr::StandardUniform) .take(len) .collect(), } } fn test_logical_read(&self, pos: u64, len: usize) -> TestLogicalRead { let expected_result = if pos as usize + len > self.content.len() { Err("InMemoryFile short read".to_string()) } else { Ok(self.content[pos as usize..pos as usize + len].to_vec()) }; TestLogicalRead::new(pos, len, expected_result) } } #[test] fn test_in_memory_file() { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); let file = InMemoryFile::new_random(10); let test_read = |pos, len| { let buf = IoBufferMut::with_capacity_zeroed(len); let fut = file.read_exact_at_eof_ok(pos, buf.slice_full(), &ctx); use futures::FutureExt; let (slice, nread) = fut .now_or_never() .expect("impl never awaits") .expect("impl never errors"); let mut buf = slice.into_inner(); buf.truncate(nread); buf }; assert_eq!(&test_read(0, 1), &file.content[0..1]); assert_eq!(&test_read(1, 2), &file.content[1..3]); assert_eq!(&test_read(9, 2), &file.content[9..]); assert!(test_read(10, 2).is_empty()); assert!(test_read(11, 2).is_empty()); } impl File for InMemoryFile { async fn read_exact_at_eof_ok<B: IoBufMut + Send>( &self, start: u64, mut dst: Slice<B>, _ctx: &RequestContext, ) -> std::io::Result<(Slice<B>, usize)> { let dst_slice: &mut [u8] = dst.as_mut_rust_slice_full_zeroed(); let nread = { let req_len = dst_slice.len(); let len = std::cmp::min(req_len, self.content.len().saturating_sub(start as usize)); if start as usize >= self.content.len() { 0 } else { dst_slice[..len] .copy_from_slice(&self.content[start as usize..start as usize + len]); len } }; rand::Rng::fill(&mut rand::rng(), &mut dst_slice[nread..]); // to discover bugs Ok((dst, nread)) } } #[derive(Clone)] struct TestLogicalRead { pos: u64, len: usize, expected_result: Result<Vec<u8>, String>, } impl TestLogicalRead { fn new(pos: u64, len: usize, expected_result: Result<Vec<u8>, String>) -> Self { Self { pos, len, expected_result, } } fn make_logical_read(&self) -> LogicalRead<Vec<u8>> { LogicalRead::new(self.pos, Vec::with_capacity(self.len)) } } async fn execute_and_validate_test_logical_reads<I, F>( file: &F, test_logical_reads: I, ctx: &RequestContext, ) where I: IntoIterator<Item = TestLogicalRead>, F: File, { let (tmp, test_logical_reads) = test_logical_reads.into_iter().tee(); let logical_reads = tmp.map(|tr| tr.make_logical_read()).collect::<Vec<_>>(); execute(file, logical_reads.iter(), ctx).await; for (logical_read, test_logical_read) in logical_reads.into_iter().zip(test_logical_reads) { let actual = logical_read.into_result().expect("we call execute()"); match (actual, test_logical_read.expected_result) { (Ok(actual), Ok(expected)) if actual == expected => {} (Err(actual), Err(expected)) => { assert_eq!(actual.to_string(), expected); } (actual, expected) => panic!("expected {expected:?}\nactual {actual:?}"), } } } #[tokio::test] async fn test_blackbox() { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); let cs = DIO_CHUNK_SIZE; let cs_u64 = cs.into_u64(); let file = InMemoryFile::new_random(10 * cs); let test_logical_reads = vec![ file.test_logical_read(0, 1), // adjacent to logical_read0 file.test_logical_read(1, 2), // gap // spans adjacent chunks file.test_logical_read(cs_u64 - 1, 2), // gap // tail of chunk 3, all of chunk 4, and 2 bytes of chunk 5 file.test_logical_read(3 * cs_u64 - 1, cs + 2), // gap file.test_logical_read(5 * cs_u64, 1), ]; let num_test_logical_reads = test_logical_reads.len(); let test_logical_reads_perms = test_logical_reads .into_iter() .permutations(num_test_logical_reads); // test all orderings of LogicalReads, the order shouldn't matter for the results for test_logical_reads in test_logical_reads_perms { execute_and_validate_test_logical_reads(&file, test_logical_reads, &ctx).await; } } #[tokio::test] #[should_panic] async fn test_reusing_logical_reads_panics() { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); let file = InMemoryFile::new_random(DIO_CHUNK_SIZE); let a = file.test_logical_read(23, 10); let logical_reads = vec![a.make_logical_read()]; execute(&file, &logical_reads, &ctx).await; // reuse pancis execute(&file, &logical_reads, &ctx).await; } struct RecorderFile<'a> { recorded: RefCell<Vec<RecordedRead>>, file: &'a InMemoryFile, } struct RecordedRead { pos: u64, req_len: usize, res: Vec<u8>, } impl<'a> RecorderFile<'a> { fn new(file: &'a InMemoryFile) -> RecorderFile<'a> { Self { recorded: Default::default(), file, } } } impl File for RecorderFile<'_> { async fn read_exact_at_eof_ok<B: IoBufAlignedMut + Send>( &self, start: u64, dst: Slice<B>, ctx: &RequestContext, ) -> std::io::Result<(Slice<B>, usize)> { let (dst, nread) = self.file.read_exact_at_eof_ok(start, dst, ctx).await?; self.recorded.borrow_mut().push(RecordedRead { pos: start, req_len: dst.bytes_total(), res: Vec::from(&dst[..nread]), }); Ok((dst, nread)) } } #[tokio::test] async fn test_logical_reads_to_same_chunk_are_merged_into_one_chunk_read() { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); let file = InMemoryFile::new_random(2 * DIO_CHUNK_SIZE); let a = file.test_logical_read(DIO_CHUNK_SIZE.into_u64(), 10); let b = file.test_logical_read(DIO_CHUNK_SIZE.into_u64() + 30, 20); let recorder = RecorderFile::new(&file); execute_and_validate_test_logical_reads(&recorder, vec![a, b], &ctx).await; let recorded = recorder.recorded.borrow(); assert_eq!(recorded.len(), 1); let RecordedRead { pos, req_len, .. } = &recorded[0]; assert_eq!(*pos, DIO_CHUNK_SIZE.into_u64()); assert_eq!(*req_len, DIO_CHUNK_SIZE); } #[tokio::test] async fn test_max_chunk_batch_size_is_respected() { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); let file = InMemoryFile::new_random(4 * MAX_CHUNK_BATCH_SIZE * DIO_CHUNK_SIZE); // read the 10th byte of each chunk 3 .. 3+2*MAX_CHUNK_BATCH_SIZE assert!(3 < MAX_CHUNK_BATCH_SIZE, "test assumption"); assert!(10 < DIO_CHUNK_SIZE, "test assumption"); let mut test_logical_reads = Vec::new(); for i in 3..3 + MAX_CHUNK_BATCH_SIZE + MAX_CHUNK_BATCH_SIZE / 2 { test_logical_reads .push(file.test_logical_read(i.into_u64() * DIO_CHUNK_SIZE.into_u64() + 10, 1)); } let recorder = RecorderFile::new(&file); execute_and_validate_test_logical_reads(&recorder, test_logical_reads, &ctx).await; let recorded = recorder.recorded.borrow(); assert_eq!(recorded.len(), 2); { let RecordedRead { pos, req_len, .. } = &recorded[0]; assert_eq!(*pos as usize, 3 * DIO_CHUNK_SIZE); assert_eq!(*req_len, MAX_CHUNK_BATCH_SIZE * DIO_CHUNK_SIZE); } { let RecordedRead { pos, req_len, .. } = &recorded[1]; assert_eq!(*pos as usize, (3 + MAX_CHUNK_BATCH_SIZE) * DIO_CHUNK_SIZE); assert_eq!(*req_len, MAX_CHUNK_BATCH_SIZE / 2 * DIO_CHUNK_SIZE); } } #[tokio::test] async fn test_batch_breaks_if_chunk_is_not_interesting() { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); assert!(MAX_CHUNK_BATCH_SIZE > 10, "test assumption"); let file = InMemoryFile::new_random(3 * DIO_CHUNK_SIZE); let a = file.test_logical_read(0, 1); // chunk 0 let b = file.test_logical_read(2 * DIO_CHUNK_SIZE.into_u64(), 1); // chunk 2 let recorder = RecorderFile::new(&file); execute_and_validate_test_logical_reads(&recorder, vec![a, b], &ctx).await; let recorded = recorder.recorded.borrow(); assert_eq!(recorded.len(), 2); { let RecordedRead { pos, req_len, .. } = &recorded[0]; assert_eq!(*pos, 0); assert_eq!(*req_len, DIO_CHUNK_SIZE); } { let RecordedRead { pos, req_len, .. } = &recorded[1]; assert_eq!(*pos, 2 * DIO_CHUNK_SIZE.into_u64()); assert_eq!(*req_len, DIO_CHUNK_SIZE); } } struct ExpectedRead { expect_pos: u64, expect_len: usize, respond: Result<Vec<u8>, String>, } struct MockFile { expected: RefCell<VecDeque<ExpectedRead>>, } impl Drop for MockFile { fn drop(&mut self) { assert!( self.expected.borrow().is_empty(), "expected reads not satisfied" ); } } macro_rules! mock_file { ($($pos:expr , $len:expr => $respond:expr),* $(,)?) => {{ MockFile { expected: RefCell::new(VecDeque::from(vec![$(ExpectedRead { expect_pos: $pos, expect_len: $len, respond: $respond, }),*])), } }}; } impl File for MockFile { async fn read_exact_at_eof_ok<B: IoBufMut + Send>( &self, start: u64, mut dst: Slice<B>, _ctx: &RequestContext, ) -> std::io::Result<(Slice<B>, usize)> { let ExpectedRead { expect_pos, expect_len, respond, } = self .expected .borrow_mut() .pop_front() .expect("unexpected read"); assert_eq!(start, expect_pos); assert_eq!(dst.bytes_total(), expect_len); match respond { Ok(mocked_bytes) => { let len = std::cmp::min(dst.bytes_total(), mocked_bytes.len()); let dst_slice: &mut [u8] = dst.as_mut_rust_slice_full_zeroed(); dst_slice[..len].copy_from_slice(&mocked_bytes[..len]); rand::Rng::fill(&mut rand::rng(), &mut dst_slice[len..]); // to discover bugs Ok((dst, len)) } Err(e) => Err(std::io::Error::other(e)), } } } #[tokio::test] async fn test_mock_file() { // Self-test to ensure the relevant features of mock file work as expected. let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); let mock_file = mock_file! { 0 , 512 => Ok(vec![0; 512]), 512 , 512 => Ok(vec![1; 512]), 1024 , 512 => Ok(vec![2; 10]), 2048, 1024 => Err("foo".to_owned()), }; let buf = IoBufferMut::with_capacity(512); let (buf, nread) = mock_file .read_exact_at_eof_ok(0, buf.slice_full(), &ctx) .await .unwrap(); assert_eq!(nread, 512); assert_eq!(&buf.into_inner()[..nread], &[0; 512]); let buf = IoBufferMut::with_capacity(512); let (buf, nread) = mock_file .read_exact_at_eof_ok(512, buf.slice_full(), &ctx) .await .unwrap(); assert_eq!(nread, 512); assert_eq!(&buf.into_inner()[..nread], &[1; 512]); let buf = IoBufferMut::with_capacity(512); let (buf, nread) = mock_file .read_exact_at_eof_ok(1024, buf.slice_full(), &ctx) .await .unwrap(); assert_eq!(nread, 10); assert_eq!(&buf.into_inner()[..nread], &[2; 10]); let buf = IoBufferMut::with_capacity(1024); let err = mock_file .read_exact_at_eof_ok(2048, buf.slice_full(), &ctx) .await .err() .unwrap(); assert_eq!(err.to_string(), "foo"); } #[tokio::test] async fn test_error_on_one_chunk_read_fails_only_dependent_logical_reads() { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); let test_logical_reads = vec![ // read spanning two batches TestLogicalRead::new( DIO_CHUNK_SIZE.into_u64() / 2, MAX_CHUNK_BATCH_SIZE * DIO_CHUNK_SIZE, Err("foo".to_owned()), ), // second read in failing chunk TestLogicalRead::new( (MAX_CHUNK_BATCH_SIZE * DIO_CHUNK_SIZE).into_u64() + DIO_CHUNK_SIZE.into_u64() - 10, 5, Err("foo".to_owned()), ), // read unaffected TestLogicalRead::new( (MAX_CHUNK_BATCH_SIZE * DIO_CHUNK_SIZE).into_u64() + 2 * DIO_CHUNK_SIZE.into_u64() + 10, 5, Ok(vec![1; 5]), ), ]; let (tmp, test_logical_reads) = test_logical_reads.into_iter().tee(); let test_logical_read_perms = tmp.permutations(test_logical_reads.len()); for test_logical_reads in test_logical_read_perms { let file = mock_file!( 0, MAX_CHUNK_BATCH_SIZE*DIO_CHUNK_SIZE => Ok(vec![0; MAX_CHUNK_BATCH_SIZE*DIO_CHUNK_SIZE]), (MAX_CHUNK_BATCH_SIZE*DIO_CHUNK_SIZE).into_u64(), DIO_CHUNK_SIZE => Err("foo".to_owned()), (MAX_CHUNK_BATCH_SIZE*DIO_CHUNK_SIZE + 2*DIO_CHUNK_SIZE).into_u64(), DIO_CHUNK_SIZE => Ok(vec![1; DIO_CHUNK_SIZE]), ); execute_and_validate_test_logical_reads(&file, test_logical_reads, &ctx).await; } } struct TestShortReadsSetup { ctx: RequestContext, file: InMemoryFile, written: u64, } fn setup_short_chunk_read_tests() -> TestShortReadsSetup { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); assert!(DIO_CHUNK_SIZE > 20, "test assumption"); let written = (2 * DIO_CHUNK_SIZE - 10).into_u64(); let file = InMemoryFile::new_random(written as usize); TestShortReadsSetup { ctx, file, written } } #[tokio::test] async fn test_short_chunk_read_from_written_range() { // Test what happens if there are logical reads // that start within the last chunk, and // the last chunk is not the full chunk length. // // The read should succeed despite the short chunk length. let TestShortReadsSetup { ctx, file, written } = setup_short_chunk_read_tests(); let a = file.test_logical_read(written - 10, 5); let recorder = RecorderFile::new(&file);
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/layer/failpoints.rs
pageserver/src/tenant/storage_layer/layer/failpoints.rs
//! failpoints for unit tests, implying `#[cfg(test)]`. //! //! These are not accessible over http. use super::*; impl Layer { /// Enable a failpoint from a unit test. pub(super) fn enable_failpoint(&self, failpoint: Failpoint) { self.0.failpoints.lock().unwrap().push(failpoint); } } impl LayerInner { /// Query if this failpoint is enabled, as in, arrive at a failpoint. /// /// Calls to this method need to be `#[cfg(test)]` guarded. pub(super) async fn failpoint(&self, kind: FailpointKind) -> Result<(), FailpointHit> { let fut = { let mut fps = self.failpoints.lock().unwrap(); // find the *last* failpoint for cases in which we need to use multiple for the same // thing (two blocked evictions) let fp = fps.iter_mut().rfind(|x| x.kind() == kind); let Some(fp) = fp else { return Ok(()); }; fp.hit() }; fut.await } } #[derive(Debug, PartialEq, Eq)] pub(crate) enum FailpointKind { /// Failpoint acts as an accurate cancelled by drop here; see the only site of use. AfterDeterminingLayerNeedsNoDownload, /// Failpoint for stalling eviction starting WaitBeforeStartingEvicting, /// Failpoint hit in the spawned task WaitBeforeDownloading, } pub(crate) enum Failpoint { AfterDeterminingLayerNeedsNoDownload, WaitBeforeStartingEvicting( Option<utils::completion::Completion>, utils::completion::Barrier, ), WaitBeforeDownloading( Option<utils::completion::Completion>, utils::completion::Barrier, ), } impl Failpoint { fn kind(&self) -> FailpointKind { match self { Failpoint::AfterDeterminingLayerNeedsNoDownload => { FailpointKind::AfterDeterminingLayerNeedsNoDownload } Failpoint::WaitBeforeStartingEvicting(..) => FailpointKind::WaitBeforeStartingEvicting, Failpoint::WaitBeforeDownloading(..) => FailpointKind::WaitBeforeDownloading, } } fn hit(&mut self) -> impl std::future::Future<Output = Result<(), FailpointHit>> + 'static { use futures::future::FutureExt; // use boxed futures to avoid Either hurdles match self { Failpoint::AfterDeterminingLayerNeedsNoDownload => { let kind = self.kind(); async move { Err(FailpointHit(kind)) }.boxed() } Failpoint::WaitBeforeStartingEvicting(arrival, b) | Failpoint::WaitBeforeDownloading(arrival, b) => { // first one signals arrival drop(arrival.take()); let b = b.clone(); async move { tracing::trace!("waiting on a failpoint barrier"); b.wait().await; tracing::trace!("done waiting on a failpoint barrier"); Ok(()) } .boxed() } } } } impl std::fmt::Display for FailpointKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(self, f) } } #[derive(Debug)] pub(crate) struct FailpointHit(FailpointKind); impl std::fmt::Display for FailpointHit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(self, f) } } impl std::error::Error for FailpointHit {} impl From<FailpointHit> for DownloadError { fn from(value: FailpointHit) -> Self { DownloadError::Failpoint(value.0) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/storage_layer/layer/tests.rs
pageserver/src/tenant/storage_layer/layer/tests.rs
use std::time::UNIX_EPOCH; use pageserver_api::key::{CONTROLFILE_KEY, Key}; use postgres_ffi::PgMajorVersion; use tokio::task::JoinSet; use utils::completion::{self, Completion}; use utils::id::TimelineId; use super::failpoints::{Failpoint, FailpointKind}; use super::*; use crate::context::DownloadBehavior; use crate::tenant::harness::{TenantHarness, test_img}; use crate::tenant::storage_layer::{IoConcurrency, LayerVisibilityHint}; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; /// Used in tests to advance a future to wanted await point, and not futher. const ADVANCE: std::time::Duration = std::time::Duration::from_secs(3600); /// Used in tests to indicate forever long timeout; has to be longer than the amount of ADVANCE /// timeout uses to advance futures. const FOREVER: std::time::Duration = std::time::Duration::from_secs(ADVANCE.as_secs() * 24 * 7); /// Demonstrate the API and resident -> evicted -> resident -> deleted transitions. #[tokio::test] async fn smoke_test() { let handle = tokio::runtime::Handle::current(); let h = TenantHarness::create("smoke_test").await.unwrap(); let span = h.span(); let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let (tenant, ctx) = h.load().await; let io_concurrency = IoConcurrency::spawn_for_test(); let image_layers = vec![( Lsn(0x40), vec![( Key::from_hex("620000000033333333444444445500000000").unwrap(), test_img("foo"), )], )]; // Create a test timeline with one real layer, and one synthetic test layer. The synthetic // one is only there so that we can GC the real one without leaving the timeline's metadata // empty, which is an illegal state (see [`IndexPart::validate`]). let timeline = tenant .create_test_timeline_with_layers( TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, &ctx, Default::default(), // in-memory layers Default::default(), image_layers, Lsn(0x100), ) .await .unwrap(); let ctx = &ctx.with_scope_timeline(&timeline); // Grab one of the timeline's layers to exercise in the test, and the other layer that is just // there to avoid the timeline being illegally empty let (layer, dummy_layer) = { let mut layers = { let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::<Vec<_>>() }; assert_eq!(layers.len(), 2); layers.sort_by_key(|l| l.layer_desc().get_key_range().start); let synthetic_layer = layers.pop().unwrap(); let real_layer = layers.pop().unwrap(); tracing::info!( "real_layer={:?} ({}), synthetic_layer={:?} ({})", real_layer, real_layer.layer_desc().file_size, synthetic_layer, synthetic_layer.layer_desc().file_size ); (real_layer, synthetic_layer) }; // all layers created at pageserver are like `layer`, initialized with strong // Arc<DownloadedLayer>. let controlfile_keyspace = KeySpace { ranges: vec![CONTROLFILE_KEY..CONTROLFILE_KEY.next()], }; let img_before = { let mut data = ValuesReconstructState::new(io_concurrency.clone()); layer .get_values_reconstruct_data( controlfile_keyspace.clone(), Lsn(0x10)..Lsn(0x11), &mut data, ctx, ) .await .unwrap(); data.keys .remove(&CONTROLFILE_KEY) .expect("must be present") .collect_pending_ios() .await .expect("must not error") .img .take() .expect("tenant harness writes the control file") }; // important part is evicting the layer, which can be done when there are no more ResidentLayer // instances -- there currently are none, only two `Layer` values, one in the layermap and on // in scope. layer.evict_and_wait(FOREVER).await.unwrap(); // double-evict returns an error, which is valid if both eviction_task and disk usage based // eviction would both evict the same layer at the same time. let e = layer.evict_and_wait(FOREVER).await.unwrap_err(); assert!(matches!(e, EvictionError::NotFound)); let dl_ctx = RequestContextBuilder::from(ctx) .download_behavior(DownloadBehavior::Download) .attached_child(); // on accesses when the layer is evicted, it will automatically be downloaded. let img_after = { let mut data = ValuesReconstructState::new(io_concurrency.clone()); layer .get_values_reconstruct_data( controlfile_keyspace.clone(), Lsn(0x10)..Lsn(0x11), &mut data, &dl_ctx, ) .instrument(download_span.clone()) .await .unwrap(); data.keys .remove(&CONTROLFILE_KEY) .expect("must be present") .collect_pending_ios() .await .expect("must not error") .img .take() .expect("tenant harness writes the control file") }; assert_eq!(img_before, img_after); // evict_and_wait can timeout, but it doesn't cancel the evicting itself // // ZERO for timeout does not work reliably, so first take up all spawn_blocking slots to // artificially slow it down. let helper = SpawnBlockingPoolHelper::consume_all_spawn_blocking_threads(&handle).await; match layer .evict_and_wait(std::time::Duration::ZERO) .await .unwrap_err() { EvictionError::Timeout => { // expected, but note that the eviction is "still ongoing" helper.release().await; // exhaust spawn_blocking pool to ensure it is now complete SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle) .await; } other => unreachable!("{other:?}"), } // only way to query if a layer is resident is to acquire a ResidentLayer instance. // Layer::keep_resident never downloads, but it might initialize if the layer file is found // downloaded locally. let none = layer.keep_resident().await; assert!( none.is_none(), "Expected none, because eviction removed the local file, found: {none:?}" ); // plain downloading is rarely needed layer .download_and_keep_resident(&dl_ctx) .instrument(download_span) .await .unwrap(); // last important part is deletion on drop: gc and compaction use it for compacted L0 layers // or fully garbage collected layers. deletion means deleting the local file, and scheduling a // deletion of the already unlinked from index_part.json remote file. // // marking a layer to be deleted on drop is irreversible; there is no technical reason against // reversiblity, but currently it is not needed so it is not provided. layer.delete_on_drop(); let path = layer.local_path().to_owned(); // wait_drop produces an unconnected to Layer future which will resolve when the // LayerInner::drop has completed. let mut wait_drop = std::pin::pin!(layer.wait_drop()); // paused time doesn't really work well with timeouts and evict_and_wait, so delay pausing // until here tokio::time::pause(); tokio::time::timeout(ADVANCE, &mut wait_drop) .await .expect_err("should had timed out because two strong references exist"); tokio::fs::metadata(&path) .await .expect("the local layer file still exists"); let rtc = &timeline.remote_client; // Simulate GC removing our test layer. { let mut g = timeline.layers.write(LayerManagerLockHolder::Testing).await; let layers = &[layer]; g.open_mut().unwrap().finish_gc_timeline(layers); // this just updates the remote_physical_size for demonstration purposes rtc.schedule_gc_update(layers).unwrap(); } // when strong references are dropped, the file is deleted and remote deletion is scheduled wait_drop.await; let e = tokio::fs::metadata(&path) .await .expect_err("the local file is deleted"); assert_eq!(e.kind(), std::io::ErrorKind::NotFound); rtc.wait_completion().await.unwrap(); assert_eq!( rtc.get_remote_physical_size(), dummy_layer.metadata().file_size ); assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get()) } /// This test demonstrates a previous hang when a eviction and deletion were requested at the same /// time. Now both of them complete per Arc drop semantics. #[tokio::test(start_paused = true)] async fn evict_and_wait_on_wanted_deleted() { // this is the runtime on which Layer spawns the blocking tasks on let handle = tokio::runtime::Handle::current(); let h = TenantHarness::create("evict_and_wait_on_wanted_deleted") .await .unwrap(); utils::logging::replace_panic_hook_with_tracing_panic_hook().forget(); let (tenant, ctx) = h.load().await; let timeline = tenant .create_test_timeline( TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, &ctx, ) .await .unwrap(); let layer = { let mut layers = { let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::<Vec<_>>() }; assert_eq!(layers.len(), 1); layers.swap_remove(0) }; // setup done let resident = layer.keep_resident().await.unwrap(); { let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER)); // drive the future to await on the status channel tokio::time::timeout(ADVANCE, &mut evict_and_wait) .await .expect_err("should had been a timeout since we are holding the layer resident"); layer.delete_on_drop(); drop(resident); // make sure the eviction task gets to run SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await; let resident = layer.keep_resident().await; assert!( resident.is_none(), "keep_resident should not have re-initialized: {resident:?}" ); evict_and_wait .await .expect("evict_and_wait should had succeeded"); // works as intended } // assert that once we remove the `layer` from the layer map and drop our reference, // the deletion of the layer in remote_storage happens. { let mut layers = timeline.layers.write(LayerManagerLockHolder::Testing).await; layers.open_mut().unwrap().finish_gc_timeline(&[layer]); } SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await; assert_eq!(1, LAYER_IMPL_METRICS.started_deletes.get()); assert_eq!(1, LAYER_IMPL_METRICS.completed_deletes.get()); assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get()); assert_eq!(1, LAYER_IMPL_METRICS.completed_evictions.get()); assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get()) } /// This test ensures we are able to read the layer while the layer eviction has been /// started but not completed. #[test] fn read_wins_pending_eviction() { let rt = tokio::runtime::Builder::new_current_thread() .max_blocking_threads(1) .enable_all() .start_paused(true) .build() .unwrap(); rt.block_on(async move { // this is the runtime on which Layer spawns the blocking tasks on let handle = tokio::runtime::Handle::current(); let h = TenantHarness::create("read_wins_pending_eviction") .await .unwrap(); let (tenant, ctx) = h.load().await; let span = h.span(); let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant .create_test_timeline( TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, &ctx, ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); let layer = { let mut layers = { let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::<Vec<_>>() }; assert_eq!(layers.len(), 1); layers.swap_remove(0) }; // setup done let resident = layer.keep_resident().await.unwrap(); let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER)); // drive the future to await on the status channel tokio::time::timeout(ADVANCE, &mut evict_and_wait) .await .expect_err("should had been a timeout since we are holding the layer resident"); assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get()); let (completion, barrier) = utils::completion::channel(); let (arrival, arrived_at_barrier) = utils::completion::channel(); layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting( Some(arrival), barrier, )); // now the eviction cannot proceed because the threads are consumed while completion exists drop(resident); arrived_at_barrier.wait().await; assert!(!layer.is_likely_resident()); // because no actual eviction happened, we get to just reinitialize the DownloadedLayer layer .0 .get_or_maybe_download(false, &ctx) .instrument(download_span) .await .expect("should had reinitialized without downloading"); assert!(layer.is_likely_resident()); // reinitialization notifies of new resident status, which should error out all evict_and_wait let e = tokio::time::timeout(ADVANCE, &mut evict_and_wait) .await .expect("no timeout, because get_or_maybe_download re-initialized") .expect_err("eviction should not have succeeded because re-initialized"); // works as intended: evictions lose to "downloads" assert!(matches!(e, EvictionError::Downloaded), "{e:?}"); assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get()); // this is not wrong: the eviction is technically still "on the way" as it's still queued // because of a failpoint assert_eq!( 0, LAYER_IMPL_METRICS .cancelled_evictions .values() .map(|ctr| ctr.get()) .sum::<u64>() ); drop(completion); tokio::time::sleep(ADVANCE).await; SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads0(&handle, 1) .await; assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get()); // now we finally can observe the original eviction failing // it would had been possible to observe it earlier, but here it is guaranteed to have // happened. assert_eq!( 1, LAYER_IMPL_METRICS .cancelled_evictions .values() .map(|ctr| ctr.get()) .sum::<u64>() ); assert_eq!( 1, LAYER_IMPL_METRICS.cancelled_evictions[EvictionCancelled::AlreadyReinitialized].get() ); assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get()) }); } /// Use failpoint to delay an eviction starting to get a VersionCheckFailed. #[test] fn multiple_pending_evictions_in_order() { let name = "multiple_pending_evictions_in_order"; let in_order = true; multiple_pending_evictions_scenario(name, in_order); } /// Use failpoint to reorder later eviction before first to get a UnexpectedEvictedState. #[test] fn multiple_pending_evictions_out_of_order() { let name = "multiple_pending_evictions_out_of_order"; let in_order = false; multiple_pending_evictions_scenario(name, in_order); } fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) { let rt = tokio::runtime::Builder::new_current_thread() .max_blocking_threads(1) .enable_all() .start_paused(true) .build() .unwrap(); rt.block_on(async move { // this is the runtime on which Layer spawns the blocking tasks on let handle = tokio::runtime::Handle::current(); let h = TenantHarness::create(name).await.unwrap(); let (tenant, ctx) = h.load().await; let span = h.span(); let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant .create_test_timeline( TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, &ctx, ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); let layer = { let mut layers = { let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::<Vec<_>>() }; assert_eq!(layers.len(), 1); layers.swap_remove(0) }; // setup done let resident = layer.keep_resident().await.unwrap(); let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER)); // drive the future to await on the status channel tokio::time::timeout(ADVANCE, &mut evict_and_wait) .await .expect_err("should had been a timeout since we are holding the layer resident"); assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get()); let (completion1, barrier) = utils::completion::channel(); let mut completion1 = Some(completion1); let (arrival, arrived_at_barrier) = utils::completion::channel(); layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting( Some(arrival), barrier, )); // now the eviction cannot proceed because we are simulating arbitrary long delay for the // eviction task start. drop(resident); assert!(!layer.is_likely_resident()); arrived_at_barrier.wait().await; // because no actual eviction happened, we get to just reinitialize the DownloadedLayer layer .0 .get_or_maybe_download(false, &ctx) .instrument(download_span) .await .expect("should had reinitialized without downloading"); assert!(layer.is_likely_resident()); // reinitialization notifies of new resident status, which should error out all evict_and_wait let e = tokio::time::timeout(ADVANCE, &mut evict_and_wait) .await .expect("no timeout, because get_or_maybe_download re-initialized") .expect_err("eviction should not have succeeded because re-initialized"); // works as intended: evictions lose to "downloads" assert!(matches!(e, EvictionError::Downloaded), "{e:?}"); assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get()); // this is not wrong: the eviction is technically still "on the way" as it's still queued // because of a failpoint assert_eq!( 0, LAYER_IMPL_METRICS .cancelled_evictions .values() .map(|ctr| ctr.get()) .sum::<u64>() ); assert_eq!(0, LAYER_IMPL_METRICS.completed_evictions.get()); // configure another failpoint for the second eviction -- evictions are per initialization, // so now that we've reinitialized the inner, we get to run two of them at the same time. let (completion2, barrier) = utils::completion::channel(); let (arrival, arrived_at_barrier) = utils::completion::channel(); layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting( Some(arrival), barrier, )); let mut second_eviction = std::pin::pin!(layer.evict_and_wait(FOREVER)); // advance to the wait on the queue tokio::time::timeout(ADVANCE, &mut second_eviction) .await .expect_err("timeout because failpoint is blocking"); arrived_at_barrier.wait().await; assert_eq!(2, LAYER_IMPL_METRICS.started_evictions.get()); let mut release_earlier_eviction = |expected_reason| { assert_eq!( 0, LAYER_IMPL_METRICS.cancelled_evictions[expected_reason].get(), ); drop(completion1.take().unwrap()); let handle = &handle; async move { tokio::time::sleep(ADVANCE).await; SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads0( handle, 1, ) .await; assert_eq!( 1, LAYER_IMPL_METRICS.cancelled_evictions[expected_reason].get(), ); } }; if in_order { release_earlier_eviction(EvictionCancelled::VersionCheckFailed).await; } // release the later eviction which is for the current version drop(completion2); tokio::time::sleep(ADVANCE).await; SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads0(&handle, 1) .await; if !in_order { release_earlier_eviction(EvictionCancelled::UnexpectedEvictedState).await; } tokio::time::timeout(ADVANCE, &mut second_eviction) .await .expect("eviction goes through now that spawn_blocking is unclogged") .expect("eviction should succeed, because version matches"); assert_eq!(1, LAYER_IMPL_METRICS.completed_evictions.get()); // ensure the cancelled are unchanged assert_eq!( 1, LAYER_IMPL_METRICS .cancelled_evictions .values() .map(|ctr| ctr.get()) .sum::<u64>() ); assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get()) }); } /// The test ensures with a failpoint that a pending eviction is not cancelled by what is currently /// a `Layer::keep_resident` call. /// /// This matters because cancelling the eviction would leave us in a state where the file is on /// disk but the layer internal state says it has not been initialized. Futhermore, it allows us to /// have non-repairing `Layer::is_likely_resident`. #[tokio::test(start_paused = true)] async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() { let handle = tokio::runtime::Handle::current(); let h = TenantHarness::create("cancelled_get_or_maybe_download_does_not_cancel_eviction") .await .unwrap(); let (tenant, ctx) = h.load().await; let timeline = tenant .create_test_timeline( TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, &ctx, ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); // This test does downloads let ctx = RequestContextBuilder::from(&ctx) .download_behavior(DownloadBehavior::Download) .attached_child(); let layer = { let mut layers = { let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::<Vec<_>>() }; assert_eq!(layers.len(), 1); layers.swap_remove(0) }; // this failpoint will simulate the `get_or_maybe_download` becoming cancelled (by returning an // Err) at the right time as in "during" the `LayerInner::needs_download`. layer.enable_failpoint(Failpoint::AfterDeterminingLayerNeedsNoDownload); let (completion, barrier) = utils::completion::channel(); let (arrival, arrived_at_barrier) = utils::completion::channel(); layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting( Some(arrival), barrier, )); tokio::time::timeout(ADVANCE, layer.evict_and_wait(FOREVER)) .await .expect_err("should had advanced to waiting on channel"); arrived_at_barrier.wait().await; // simulate a cancelled read which is cancelled before it gets to re-initialize let e = layer .0 .get_or_maybe_download(false, &ctx) .await .unwrap_err(); assert!( matches!( e, DownloadError::Failpoint(FailpointKind::AfterDeterminingLayerNeedsNoDownload) ), "{e:?}" ); assert!( layer.0.needs_download().await.unwrap().is_none(), "file is still on disk" ); // release the eviction task drop(completion); tokio::time::sleep(ADVANCE).await; SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await; // failpoint is still enabled, but it is not hit let e = layer .0 .get_or_maybe_download(false, &ctx) .await .unwrap_err(); assert!(matches!(e, DownloadError::DownloadRequired), "{e:?}"); // failpoint is not counted as cancellation either assert_eq!(0, LAYER_IMPL_METRICS.inits_cancelled.get()) } #[tokio::test(start_paused = true)] async fn evict_and_wait_does_not_wait_for_download() { // let handle = tokio::runtime::Handle::current(); let h = TenantHarness::create("evict_and_wait_does_not_wait_for_download") .await .unwrap(); let (tenant, ctx) = h.load().await; let span = h.span(); let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant .create_test_timeline( TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, &ctx, ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); // This test does downloads let ctx = RequestContextBuilder::from(&ctx) .download_behavior(DownloadBehavior::Download) .attached_child(); let layer = { let mut layers = { let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::<Vec<_>>() }; assert_eq!(layers.len(), 1); layers.swap_remove(0) }; // kind of forced setup: start an eviction but do not allow it progress until we are // downloading let (eviction_can_continue, barrier) = utils::completion::channel(); let (arrival, eviction_arrived) = utils::completion::channel(); layer.enable_failpoint(Failpoint::WaitBeforeStartingEvicting( Some(arrival), barrier, )); let mut evict_and_wait = std::pin::pin!(layer.evict_and_wait(FOREVER)); // use this once-awaited other_evict to synchronize with the eviction let other_evict = layer.evict_and_wait(FOREVER); tokio::time::timeout(ADVANCE, &mut evict_and_wait) .await .expect_err("should had advanced"); eviction_arrived.wait().await; drop(eviction_can_continue); other_evict.await.unwrap(); // now the layer is evicted, and the "evict_and_wait" is waiting on the receiver assert!(!layer.is_likely_resident()); // following new evict_and_wait will fail until we've completed the download let e = layer.evict_and_wait(FOREVER).await.unwrap_err(); assert!(matches!(e, EvictionError::NotFound), "{e:?}"); let (download_can_continue, barrier) = utils::completion::channel(); let (arrival, _download_arrived) = utils::completion::channel(); layer.enable_failpoint(Failpoint::WaitBeforeDownloading(Some(arrival), barrier)); let mut download = std::pin::pin!( layer .0 .get_or_maybe_download(true, &ctx) .instrument(download_span) ); assert!( !layer.is_likely_resident(), "during download layer is evicted" ); tokio::time::timeout(ADVANCE, &mut download) .await .expect_err("should had timed out because of failpoint"); // now we finally get to continue, and because the latest state is downloading, we deduce that // original eviction succeeded evict_and_wait.await.unwrap(); // however a new evict_and_wait will fail let e = layer.evict_and_wait(FOREVER).await.unwrap_err(); assert!(matches!(e, EvictionError::NotFound), "{e:?}"); assert!(!layer.is_likely_resident()); drop(download_can_continue); download.await.expect("download should had succeeded"); assert!(layer.is_likely_resident()); // only now can we evict layer.evict_and_wait(FOREVER).await.unwrap(); } /// Asserts that there is no miscalculation when Layer is dropped while it is being kept resident, /// which is the last value. /// /// Also checks that the same does not happen on a non-evicted layer (regression test). #[tokio::test(start_paused = true)] async fn eviction_cancellation_on_drop() { use bytes::Bytes; use wal_decoder::models::value::Value; // this is the runtime on which Layer spawns the blocking tasks on let handle = tokio::runtime::Handle::current(); let h = TenantHarness::create("eviction_cancellation_on_drop") .await .unwrap(); utils::logging::replace_panic_hook_with_tracing_panic_hook().forget(); let (tenant, ctx) = h.load().await; let timeline = tenant .create_test_timeline( TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, &ctx, ) .await .unwrap(); { // create_test_timeline wrote us one layer, write another let mut writer = timeline.writer().await; writer .put( pageserver_api::key::Key::from_i128(5), Lsn(0x20), &Value::Image(Bytes::from_static(b"this does not matter either")), &ctx, ) .await .unwrap(); writer.finish_write(Lsn(0x20)); } timeline.freeze_and_flush().await.unwrap(); // wait for the upload to complete so our Arc::strong_count assertion holds timeline.remote_client.wait_completion().await.unwrap(); let (evicted_layer, not_evicted) = { let mut layers = { let mut guard = timeline.layers.write(LayerManagerLockHolder::Testing).await; let layers = guard.likely_resident_layers().cloned().collect::<Vec<_>>(); // remove the layers from layermap guard.open_mut().unwrap().finish_gc_timeline(&layers); layers }; assert_eq!(layers.len(), 2); (layers.pop().unwrap(), layers.pop().unwrap()) }; let victims = [(evicted_layer, true), (not_evicted, false)]; for (victim, evict) in victims { let resident = victim.keep_resident().await.unwrap(); drop(victim); assert_eq!(Arc::strong_count(&resident.owner.0), 1); if evict { let evict_and_wait = resident.owner.evict_and_wait(FOREVER); // drive the future to await on the status channel, and then drop it tokio::time::timeout(ADVANCE, evict_and_wait) .await .expect_err("should had been a timeout since we are holding the layer resident"); } // 1 == we only evict one of the layers assert_eq!(1, LAYER_IMPL_METRICS.started_evictions.get()); drop(resident); // run any spawned tokio::time::sleep(ADVANCE).await; SpawnBlockingPoolHelper::consume_and_release_all_of_spawn_blocking_threads(&handle).await; assert_eq!( 1, LAYER_IMPL_METRICS.cancelled_evictions[EvictionCancelled::LayerGone].get() ); } } /// A test case to remind you the cost of these structures. You can bump the size limit /// below if it is really necessary to add more fields to the structures.
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/layer_map/historic_layer_coverage.rs
pageserver/src/tenant/layer_map/historic_layer_coverage.rs
use std::collections::BTreeMap; use std::ops::Range; use tracing::info; use super::layer_coverage::LayerCoverageTuple; use crate::tenant::storage_layer::PersistentLayerDesc; /// Layers in this module are identified and indexed by this data. /// /// This is a helper struct to enable sorting layers by lsn.start. /// /// These three values are enough to uniquely identify a layer, since /// a layer is obligated to contain all contents within range, so two /// deltas (or images) with the same range have identical content. #[derive(Debug, PartialEq, Eq, Clone)] pub struct LayerKey { // TODO I use i128 and u64 because it was easy for prototyping, // testing, and benchmarking. If we can use the Lsn and Key // types without overhead that would be preferable. pub key: Range<i128>, pub lsn: Range<u64>, pub is_image: bool, } impl PartialOrd for LayerKey { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for LayerKey { fn cmp(&self, other: &Self) -> std::cmp::Ordering { // NOTE we really care about comparing by lsn.start first self.lsn .start .cmp(&other.lsn.start) .then(self.lsn.end.cmp(&other.lsn.end)) .then(self.key.start.cmp(&other.key.start)) .then(self.key.end.cmp(&other.key.end)) .then(self.is_image.cmp(&other.is_image)) } } impl From<&PersistentLayerDesc> for LayerKey { fn from(layer: &PersistentLayerDesc) -> Self { let kr = layer.get_key_range(); let lr = layer.get_lsn_range(); LayerKey { key: kr.start.to_i128()..kr.end.to_i128(), lsn: lr.start.0..lr.end.0, is_image: !layer.is_incremental(), } } } /// Efficiently queryable layer coverage for each LSN. /// /// Allows answering layer map queries very efficiently, /// but doesn't allow retroactive insertion, which is /// sometimes necessary. See BufferedHistoricLayerCoverage. pub struct HistoricLayerCoverage<Value> { /// The latest state head: LayerCoverageTuple<Value>, /// TODO: this could be an ordered vec using binary search. /// We push into this map everytime we add a layer, so might see some benefit /// All previous states historic: BTreeMap<u64, LayerCoverageTuple<Value>>, } impl<T: Clone> Default for HistoricLayerCoverage<T> { fn default() -> Self { Self::new() } } impl<Value: Clone> HistoricLayerCoverage<Value> { pub fn new() -> Self { Self { head: LayerCoverageTuple::default(), historic: BTreeMap::default(), } } /// Add a layer /// /// Panics if new layer has older lsn.start than an existing layer. /// See BufferedHistoricLayerCoverage for a more general insertion method. pub fn insert(&mut self, layer_key: LayerKey, value: Value) { // It's only a persistent map, not a retroactive one if let Some(last_entry) = self.historic.iter().next_back() { let last_lsn = last_entry.0; if layer_key.lsn.start < *last_lsn { panic!("unexpected retroactive insert"); } } // Insert into data structure let target = if layer_key.is_image { &mut self.head.image_coverage } else { &mut self.head.delta_coverage }; target.insert(layer_key.key, layer_key.lsn.clone(), value); // Remember history. Clone is O(1) self.historic.insert(layer_key.lsn.start, self.head.clone()); } /// Query at a particular LSN, inclusive pub fn get_version(&self, lsn: u64) -> Option<&LayerCoverageTuple<Value>> { match self.historic.range(..=lsn).next_back() { Some((_, v)) => Some(v), None => None, } } /// Remove all entries after a certain LSN (inclusive) pub fn trim(&mut self, begin: &u64) { self.historic.split_off(begin); self.head = self .historic .iter() .next_back() .map(|(_, v)| v.clone()) .unwrap_or_default(); } } /// This is the most basic test that demonstrates intended usage. /// All layers in this test have height 1. #[test] fn test_persistent_simple() { let mut map = HistoricLayerCoverage::<String>::new(); map.insert( LayerKey { key: 0..5, lsn: 100..101, is_image: true, }, "Layer 1".to_string(), ); map.insert( LayerKey { key: 3..9, lsn: 110..111, is_image: true, }, "Layer 2".to_string(), ); map.insert( LayerKey { key: 5..6, lsn: 120..121, is_image: true, }, "Layer 3".to_string(), ); // After Layer 1 insertion let version = map.get_version(105).unwrap(); assert_eq!(version.image_coverage.query(1), Some("Layer 1".to_string())); assert_eq!(version.image_coverage.query(4), Some("Layer 1".to_string())); // After Layer 2 insertion let version = map.get_version(115).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Layer 2".to_string())); assert_eq!(version.image_coverage.query(8), Some("Layer 2".to_string())); assert_eq!(version.image_coverage.query(11), None); // After Layer 3 insertion let version = map.get_version(125).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Layer 2".to_string())); assert_eq!(version.image_coverage.query(5), Some("Layer 3".to_string())); assert_eq!(version.image_coverage.query(7), Some("Layer 2".to_string())); } /// Cover simple off-by-one edge cases #[test] fn test_off_by_one() { let mut map = HistoricLayerCoverage::<String>::new(); map.insert( LayerKey { key: 3..5, lsn: 100..110, is_image: true, }, "Layer 1".to_string(), ); // Check different LSNs let version = map.get_version(99); assert!(version.is_none()); let version = map.get_version(100).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Layer 1".to_string())); let version = map.get_version(110).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Layer 1".to_string())); // Check different keys let version = map.get_version(105).unwrap(); assert_eq!(version.image_coverage.query(2), None); assert_eq!(version.image_coverage.query(3), Some("Layer 1".to_string())); assert_eq!(version.image_coverage.query(4), Some("Layer 1".to_string())); assert_eq!(version.image_coverage.query(5), None); } /// White-box regression test, checking for incorrect removal of node at key.end #[test] fn test_regression() { let mut map = HistoricLayerCoverage::<String>::new(); map.insert( LayerKey { key: 0..5, lsn: 0..5, is_image: false, }, "Layer 1".to_string(), ); map.insert( LayerKey { key: 0..5, lsn: 1..2, is_image: false, }, "Layer 2".to_string(), ); // If an insertion operation improperly deletes the endpoint of a previous layer // (which is more likely to happen with layers that collide on key.end), we will // end up with an infinite layer, covering the entire keyspace. Here we assert // that there's no layer at key 100 because we didn't insert any layer there. let version = map.get_version(100).unwrap(); assert_eq!(version.delta_coverage.query(100), None); } /// Cover edge cases where layers begin or end on the same key #[test] fn test_key_collision() { let mut map = HistoricLayerCoverage::<String>::new(); map.insert( LayerKey { key: 3..5, lsn: 100..110, is_image: true, }, "Layer 10".to_string(), ); map.insert( LayerKey { key: 5..8, lsn: 100..110, is_image: true, }, "Layer 11".to_string(), ); map.insert( LayerKey { key: 3..4, lsn: 200..210, is_image: true, }, "Layer 20".to_string(), ); // Check after layer 11 let version = map.get_version(105).unwrap(); assert_eq!(version.image_coverage.query(2), None); assert_eq!( version.image_coverage.query(3), Some("Layer 10".to_string()) ); assert_eq!( version.image_coverage.query(5), Some("Layer 11".to_string()) ); assert_eq!( version.image_coverage.query(7), Some("Layer 11".to_string()) ); assert_eq!(version.image_coverage.query(8), None); // Check after layer 20 let version = map.get_version(205).unwrap(); assert_eq!(version.image_coverage.query(2), None); assert_eq!( version.image_coverage.query(3), Some("Layer 20".to_string()) ); assert_eq!( version.image_coverage.query(5), Some("Layer 11".to_string()) ); assert_eq!( version.image_coverage.query(7), Some("Layer 11".to_string()) ); assert_eq!(version.image_coverage.query(8), None); } /// Test when rectangles have nontrivial height and possibly overlap #[test] fn test_persistent_overlapping() { let mut map = HistoricLayerCoverage::<String>::new(); // Add 3 key-disjoint layers with varying LSN ranges map.insert( LayerKey { key: 1..2, lsn: 100..200, is_image: true, }, "Layer 1".to_string(), ); map.insert( LayerKey { key: 4..5, lsn: 110..200, is_image: true, }, "Layer 2".to_string(), ); map.insert( LayerKey { key: 7..8, lsn: 120..300, is_image: true, }, "Layer 3".to_string(), ); // Add wide and short layer map.insert( LayerKey { key: 0..9, lsn: 130..199, is_image: true, }, "Layer 4".to_string(), ); // Add wide layer taller than some map.insert( LayerKey { key: 0..9, lsn: 140..201, is_image: true, }, "Layer 5".to_string(), ); // Add wide layer taller than all map.insert( LayerKey { key: 0..9, lsn: 150..301, is_image: true, }, "Layer 6".to_string(), ); // After layer 4 insertion let version = map.get_version(135).unwrap(); assert_eq!(version.image_coverage.query(0), Some("Layer 4".to_string())); assert_eq!(version.image_coverage.query(1), Some("Layer 1".to_string())); assert_eq!(version.image_coverage.query(2), Some("Layer 4".to_string())); assert_eq!(version.image_coverage.query(4), Some("Layer 2".to_string())); assert_eq!(version.image_coverage.query(5), Some("Layer 4".to_string())); assert_eq!(version.image_coverage.query(7), Some("Layer 3".to_string())); assert_eq!(version.image_coverage.query(8), Some("Layer 4".to_string())); // After layer 5 insertion let version = map.get_version(145).unwrap(); assert_eq!(version.image_coverage.query(0), Some("Layer 5".to_string())); assert_eq!(version.image_coverage.query(1), Some("Layer 5".to_string())); assert_eq!(version.image_coverage.query(2), Some("Layer 5".to_string())); assert_eq!(version.image_coverage.query(4), Some("Layer 5".to_string())); assert_eq!(version.image_coverage.query(5), Some("Layer 5".to_string())); assert_eq!(version.image_coverage.query(7), Some("Layer 3".to_string())); assert_eq!(version.image_coverage.query(8), Some("Layer 5".to_string())); // After layer 6 insertion let version = map.get_version(155).unwrap(); assert_eq!(version.image_coverage.query(0), Some("Layer 6".to_string())); assert_eq!(version.image_coverage.query(1), Some("Layer 6".to_string())); assert_eq!(version.image_coverage.query(2), Some("Layer 6".to_string())); assert_eq!(version.image_coverage.query(4), Some("Layer 6".to_string())); assert_eq!(version.image_coverage.query(5), Some("Layer 6".to_string())); assert_eq!(version.image_coverage.query(7), Some("Layer 6".to_string())); assert_eq!(version.image_coverage.query(8), Some("Layer 6".to_string())); } /// Wrapper for HistoricLayerCoverage that allows us to hack around the lack /// of support for retroactive insertion by rebuilding the map since the /// change. /// /// Why is this needed? We most often insert new layers with newer LSNs, /// but during compaction we create layers with non-latest LSN, and during /// GC we delete historic layers. /// /// Even though rebuilding is an expensive (N log N) solution to the problem, /// it's not critical since we do something equally expensive just to decide /// whether or not to create new image layers. /// TODO It's not expensive but it's not great to hold a layer map write lock /// for that long. /// /// If this becomes an actual bottleneck, one solution would be to build a /// segment tree that holds PersistentLayerMaps. Though this would mean that /// we take an additional log(N) performance hit for queries, which will probably /// still be more critical. /// /// See this for more on persistent and retroactive techniques: /// <https://www.youtube.com/watch?v=WqCWghETNDc&t=581s> pub struct BufferedHistoricLayerCoverage<Value> { /// A persistent layer map that we rebuild when we need to retroactively update historic_coverage: HistoricLayerCoverage<Value>, /// We buffer insertion into the PersistentLayerMap to decrease the number of rebuilds. buffer: BTreeMap<LayerKey, Option<Value>>, /// All current layers. This is not used for search. Only to make rebuilds easier. // TODO: This map is never cleared. Rebuilds could use the post-trim last entry of // [`Self::historic_coverage`] instead of doubling memory usage. // [`Self::len`]: can require rebuild and serve from latest historic // [`Self::iter`]: already requires rebuild => can serve from latest historic layers: BTreeMap<LayerKey, Value>, } impl<T: std::fmt::Debug> std::fmt::Debug for BufferedHistoricLayerCoverage<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("RetroactiveLayerMap") .field("buffer", &self.buffer) .field("layers", &self.layers) .finish() } } impl<T: Clone> Default for BufferedHistoricLayerCoverage<T> { fn default() -> Self { Self::new() } } impl<Value: Clone> BufferedHistoricLayerCoverage<Value> { pub fn new() -> Self { Self { historic_coverage: HistoricLayerCoverage::<Value>::new(), buffer: BTreeMap::new(), layers: BTreeMap::new(), } } pub fn insert(&mut self, layer_key: LayerKey, value: Value) { self.buffer.insert(layer_key, Some(value)); } pub fn remove(&mut self, layer_key: LayerKey) { self.buffer.insert(layer_key, None); } pub fn rebuild(&mut self) { // Find the first LSN that needs to be rebuilt let rebuild_since: u64 = match self.buffer.iter().next() { Some((LayerKey { lsn, .. }, _)) => lsn.start, None => return, // No need to rebuild if buffer is empty }; // Apply buffered updates to self.layers let num_updates = self.buffer.len(); self.buffer.retain(|layer_key, layer| { match layer { Some(l) => { self.layers.insert(layer_key.clone(), l.clone()); } None => { self.layers.remove(layer_key); } }; false }); // Rebuild let mut num_inserted = 0; self.historic_coverage.trim(&rebuild_since); for (layer_key, layer) in self.layers.range( LayerKey { lsn: rebuild_since..0, key: 0..0, is_image: false, }.., ) { self.historic_coverage .insert(layer_key.clone(), layer.clone()); num_inserted += 1; } // TODO maybe only warn if ratio is at least 10 info!( "Rebuilt layer map. Did {} insertions to process a batch of {} updates.", num_inserted, num_updates, ) } /// Iterate all the layers pub fn iter(&self) -> impl ExactSizeIterator<Item = Value> { // NOTE we can actually perform this without rebuilding, // but it's not necessary for now. if !self.buffer.is_empty() { panic!("rebuild pls") } self.layers.values().cloned() } /// Return a reference to a queryable map, assuming all updates /// have already been processed using self.rebuild() pub fn get(&self) -> anyhow::Result<&HistoricLayerCoverage<Value>> { // NOTE we error here instead of implicitly rebuilding because // rebuilding is somewhat expensive. // TODO maybe implicitly rebuild and log/sentry an error? if !self.buffer.is_empty() { anyhow::bail!("rebuild required") } Ok(&self.historic_coverage) } pub(crate) fn len(&self) -> usize { self.layers.len() } } #[test] fn test_retroactive_regression_1() { let mut map = BufferedHistoricLayerCoverage::new(); map.insert( LayerKey { key: 0..21267647932558653966460912964485513215, lsn: 23761336..23761457, is_image: false, }, "sdfsdfs".to_string(), ); map.rebuild(); let version = map.get().unwrap().get_version(23761457).unwrap(); assert_eq!( version.delta_coverage.query(100), Some("sdfsdfs".to_string()) ); } #[test] fn test_retroactive_simple() { let mut map = BufferedHistoricLayerCoverage::new(); // Append some images in increasing LSN order map.insert( LayerKey { key: 0..5, lsn: 100..101, is_image: true, }, "Image 1".to_string(), ); map.insert( LayerKey { key: 3..9, lsn: 110..111, is_image: true, }, "Image 2".to_string(), ); map.insert( LayerKey { key: 4..6, lsn: 120..121, is_image: true, }, "Image 3".to_string(), ); map.insert( LayerKey { key: 8..9, lsn: 120..121, is_image: true, }, "Image 4".to_string(), ); // Add a delta layer out of order map.insert( LayerKey { key: 2..5, lsn: 105..106, is_image: false, }, "Delta 1".to_string(), ); // Rebuild so we can start querying map.rebuild(); { let map = map.get().expect("rebuilt"); let version = map.get_version(90); assert!(version.is_none()); let version = map.get_version(102).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Image 1".to_string())); let version = map.get_version(107).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Image 1".to_string())); assert_eq!(version.delta_coverage.query(4), Some("Delta 1".to_string())); let version = map.get_version(115).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Image 2".to_string())); let version = map.get_version(125).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Image 3".to_string())); } // Remove Image 3 map.remove(LayerKey { key: 4..6, lsn: 120..121, is_image: true, }); map.rebuild(); { // Check deletion worked let map = map.get().expect("rebuilt"); let version = map.get_version(125).unwrap(); assert_eq!(version.image_coverage.query(4), Some("Image 2".to_string())); assert_eq!(version.image_coverage.query(8), Some("Image 4".to_string())); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/layer_map/layer_coverage.rs
pageserver/src/tenant/layer_map/layer_coverage.rs
use std::ops::Range; // NOTE the `im` crate has 20x more downloads and also has // persistent/immutable BTree. But it's bugged so rpds is a // better choice <https://github.com/neondatabase/neon/issues/3395> use rpds::RedBlackTreeMapSync; /// Data structure that can efficiently: /// - find the latest layer by lsn.end at a given key /// - iterate the latest layers in a key range /// - insert layers in non-decreasing lsn.start order /// /// For a detailed explanation and justification of this approach, see: /// <https://neon.tech/blog/persistent-structures-in-neons-wal-indexing> /// /// NOTE The struct is parameterized over Value for easier /// testing, but in practice it's some sort of layer. pub struct LayerCoverage<Value> { /// For every change in coverage (as we sweep the key space) /// we store (lsn.end, value). /// /// NOTE We use an immutable/persistent tree so that we can keep historic /// versions of this coverage without cloning the whole thing and /// incurring quadratic memory cost. See HistoricLayerCoverage. /// /// NOTE We use the Sync version of the map because we want Self to /// be Sync. Using nonsync might be faster, if we can work with /// that. nodes: RedBlackTreeMapSync<i128, Option<(u64, Value)>>, } impl<T: Clone> Default for LayerCoverage<T> { fn default() -> Self { Self::new() } } impl<Value: Clone> LayerCoverage<Value> { pub fn new() -> Self { Self { nodes: RedBlackTreeMapSync::default(), } } /// Helper function to subdivide the key range without changing any values /// /// This operation has no semantic effect by itself. It only helps us pin in /// place the part of the coverage we don't want to change when inserting. /// /// As an analogy, think of a polygon. If you add a vertex along one of the /// segments, the polygon is still the same, but it behaves differently when /// we move or delete one of the other points. /// /// Complexity: O(log N) fn add_node(&mut self, key: i128) { let value = match self.nodes.range(..=key).next_back() { Some((_, Some(v))) => Some(v.clone()), Some((_, None)) => None, None => None, }; self.nodes.insert_mut(key, value); } /// Insert a layer. /// /// Complexity: worst case O(N), in practice O(log N). See NOTE in implementation. pub fn insert(&mut self, key: Range<i128>, lsn: Range<u64>, value: Value) { // Add nodes at endpoints // // NOTE The order of lines is important. We add nodes at the start // and end of the key range **before updating any nodes** in order // to pin down the current coverage outside of the relevant key range. // Only the coverage inside the layer's key range should change. self.add_node(key.start); self.add_node(key.end); // Raise the height where necessary // // NOTE This loop is worst case O(N), but amortized O(log N) in the special // case when rectangles have no height. In practice I don't think we'll see // the kind of layer intersections needed to trigger O(N) behavior. The worst // case is N/2 horizontal layers overlapped with N/2 vertical layers in a // grid pattern. let mut to_update = Vec::new(); let mut to_remove = Vec::new(); let mut prev_covered = false; for (k, node) in self.nodes.range(key) { let needs_cover = match node { None => true, Some((h, _)) => h < &lsn.end, }; if needs_cover { match prev_covered { true => to_remove.push(*k), false => to_update.push(*k), } } prev_covered = needs_cover; } // TODO check if the nodes inserted at key.start and key.end are safe // to remove. It's fine to keep them but they could be redundant. for k in to_update { self.nodes.insert_mut(k, Some((lsn.end, value.clone()))); } for k in to_remove { self.nodes.remove_mut(&k); } } /// Get the latest (by lsn.end) layer at a given key /// /// Complexity: O(log N) pub fn query(&self, key: i128) -> Option<Value> { self.nodes .range(..=key) .next_back()? .1 .as_ref() .map(|(_, v)| v.clone()) } /// Iterate the changes in layer coverage in a given range. You will likely /// want to start with self.query(key.start), and then follow up with self.range /// /// Complexity: O(log N + result_size) pub fn range(&self, key: Range<i128>) -> impl '_ + Iterator<Item = (i128, Option<Value>)> { self.nodes .range(key) .map(|(k, v)| (*k, v.as_ref().map(|x| x.1.clone()))) } /// Returns an iterator which includes all coverage changes for layers that intersect /// with the provided range. pub fn range_overlaps( &self, key_range: &Range<i128>, ) -> impl Iterator<Item = (i128, Option<Value>)> + '_ where Value: Eq, { let first_change = self.query(key_range.start); match first_change { Some(change) => { // If the start of the range is covered, we have to deal with two cases: // 1. Start of the range is aligned with the start of a layer. // In this case the return of `self.range` will contain the layer which aligns with the start of the key range. // We advance said iterator to avoid duplicating the first change. // 2. Start of the range is not aligned with the start of a layer. let range = key_range.start..key_range.end; let mut range_coverage = self.range(range).peekable(); if range_coverage .peek() .is_some_and(|c| c.1.as_ref() == Some(&change)) { range_coverage.next(); } itertools::Either::Left( std::iter::once((key_range.start, Some(change))).chain(range_coverage), ) } None => { let range = key_range.start..key_range.end; let coverage = self.range(range); itertools::Either::Right(coverage) } } } /// O(1) clone pub fn clone(&self) -> Self { Self { nodes: self.nodes.clone(), } } } /// Image and delta coverage at a specific LSN. pub struct LayerCoverageTuple<Value> { pub image_coverage: LayerCoverage<Value>, pub delta_coverage: LayerCoverage<Value>, } impl<T: Clone> Default for LayerCoverageTuple<T> { fn default() -> Self { Self { image_coverage: LayerCoverage::default(), delta_coverage: LayerCoverage::default(), } } } impl<Value: Clone> LayerCoverageTuple<Value> { pub fn clone(&self) -> Self { Self { image_coverage: self.image_coverage.clone(), delta_coverage: self.delta_coverage.clone(), } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/remote_timeline_client/download.rs
pageserver/src/tenant/remote_timeline_client/download.rs
//! Helper functions to download files from remote storage with a RemoteStorage //! //! The functions in this module retry failed operations automatically, according //! to the FAILED_DOWNLOAD_RETRIES constant. use std::collections::HashSet; use std::future::Future; use std::str::FromStr; use std::sync::atomic::AtomicU64; use std::time::SystemTime; use anyhow::{Context, anyhow}; use camino::{Utf8Path, Utf8PathBuf}; use pageserver_api::shard::TenantShardId; use remote_storage::{ DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, ListingMode, RemotePath, }; use tokio::fs::{self, File, OpenOptions}; use tokio::io::AsyncSeekExt; use tokio_util::io::StreamReader; use tokio_util::sync::CancellationToken; use tracing::warn; use utils::crashsafe::path_with_suffix_extension; use utils::id::{TenantId, TimelineId}; use utils::{backoff, pausable_failpoint}; use super::index::{IndexPart, LayerFileMetadata}; use super::manifest::TenantManifest; use super::{ FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, INITDB_PATH, parse_remote_index_path, parse_remote_tenant_manifest_path, remote_index_path, remote_initdb_archive_path, remote_initdb_preserved_archive_path, remote_tenant_manifest_path, remote_tenant_manifest_prefix, remote_tenant_path, }; use crate::TEMP_FILE_SUFFIX; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::span::{ debug_assert_current_span_has_tenant_and_timeline_id, debug_assert_current_span_has_tenant_id, }; use crate::tenant::Generation; use crate::tenant::remote_timeline_client::{remote_layer_path, remote_timelines_path}; use crate::tenant::storage_layer::LayerName; use crate::virtual_file; use crate::virtual_file::owned_buffers_io::write::FlushTaskError; use crate::virtual_file::{IoBufferMut, MaybeFatalIo, VirtualFile}; use crate::virtual_file::{TempVirtualFile, owned_buffers_io}; /// /// If 'metadata' is given, we will validate that the downloaded file's size matches that /// in the metadata. (In the future, we might do more cross-checks, like CRC validation) /// /// Returns the size of the downloaded file. #[allow(clippy::too_many_arguments)] pub async fn download_layer_file<'a>( conf: &'static PageServerConf, storage: &'a GenericRemoteStorage, tenant_shard_id: TenantShardId, timeline_id: TimelineId, layer_file_name: &'a LayerName, layer_metadata: &'a LayerFileMetadata, local_path: &Utf8Path, gate: &utils::sync::gate::Gate, cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<u64, DownloadError> { debug_assert_current_span_has_tenant_and_timeline_id(); let timeline_path = conf.timeline_path(&tenant_shard_id, &timeline_id); let remote_path = remote_layer_path( &tenant_shard_id.tenant_id, &timeline_id, layer_metadata.shard, layer_file_name, layer_metadata.generation, ); let (bytes_amount, temp_file) = download_retry( || async { // TempVirtualFile requires us to never reuse a filename while an old // instance of TempVirtualFile created with that filename is not done dropping yet. // So, we use a monotonic counter to disambiguate the filenames. static NEXT_TEMP_DISAMBIGUATOR: AtomicU64 = AtomicU64::new(1); let filename_disambiguator = NEXT_TEMP_DISAMBIGUATOR.fetch_add(1, std::sync::atomic::Ordering::Relaxed); let temp_file_path = path_with_suffix_extension( local_path, &format!("{filename_disambiguator:x}.{TEMP_DOWNLOAD_EXTENSION}"), ); let temp_file = TempVirtualFile::new( VirtualFile::open_with_options_v2( &temp_file_path, virtual_file::OpenOptions::new() .create_new(true) .write(true), ctx, ) .await .with_context(|| format!("create a temp file for layer download: {temp_file_path}")) .map_err(DownloadError::Other)?, gate.enter().map_err(|_| DownloadError::Cancelled)?, ); download_object(storage, &remote_path, temp_file, gate, cancel, ctx).await }, &format!("download {remote_path:?}"), cancel, ) .await?; let expected = layer_metadata.file_size; if expected != bytes_amount { return Err(DownloadError::Other(anyhow!( "According to layer file metadata should have downloaded {expected} bytes but downloaded {bytes_amount} bytes into file {:?}", temp_file.path() ))); } fail::fail_point!("remote-storage-download-pre-rename", |_| { Err(DownloadError::Other(anyhow!( "remote-storage-download-pre-rename failpoint triggered" ))) }); // Try rename before disarming the temp file. // That way, if rename fails for whatever reason, we clean up the temp file on the return path. fs::rename(temp_file.path(), &local_path) .await .with_context(|| format!("rename download layer file to {local_path}")) .map_err(DownloadError::Other)?; // The temp file's VirtualFile points to the temp_file_path which we moved above. // Drop it immediately, it's invalid. // This will get better in https://github.com/neondatabase/neon/issues/11692 let _: VirtualFile = temp_file.disarm_into_inner(); // NB: The gate guard that was stored in `temp_file` is dropped but we continue // to operate on it and on the parent timeline directory. // Those operations are safe to do because higher-level code is holding another gate guard: // - attached mode: the download task spawned by struct Layer is holding the gate guard // - secondary mode: The TenantDownloader::download holds the gate open // The rename above is not durable yet. // It doesn't matter for crash consistency because pageserver startup deletes temp // files and we'll re-download on demand if necessary. // We use fatal_err() below because the after the rename above, // the in-memory state of the filesystem already has the layer file in its final place, // and subsequent pageserver code could think it's durable while it really isn't. let work = { let ctx = ctx.detached_child(ctx.task_kind(), ctx.download_behavior()); async move { let timeline_dir = VirtualFile::open(&timeline_path, &ctx) .await .fatal_err("VirtualFile::open for timeline dir fsync"); timeline_dir .sync_all() .await .fatal_err("VirtualFile::sync_all timeline dir"); } }; crate::virtual_file::io_engine::get() .spawn_blocking_and_block_on_if_std(work) .await; tracing::debug!("download complete: {local_path}"); Ok(bytes_amount) } /// Download the object `src_path` in the remote `storage` to local path `dst_path`. /// /// If Ok() is returned, the download succeeded and the inode & data have been made durable. /// (Note that the directory entry for the inode is not made durable.) /// The file size in bytes is returned. /// /// If Err() is returned, there was some error. The file at `dst_path` has been unlinked. /// The unlinking has _not_ been made durable. async fn download_object( storage: &GenericRemoteStorage, src_path: &RemotePath, destination_file: TempVirtualFile, gate: &utils::sync::gate::Gate, cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<(u64, TempVirtualFile), DownloadError> { let mut download = storage .download(src_path, &DownloadOpts::default(), cancel) .await?; pausable_failpoint!("before-downloading-layer-stream-pausable"); let dst_path = destination_file.path().to_owned(); let mut buffered = owned_buffers_io::write::BufferedWriter::<IoBufferMut, _>::new( destination_file, 0, || IoBufferMut::with_capacity(super::BUFFER_SIZE), gate.enter().map_err(|_| DownloadError::Cancelled)?, cancel.child_token(), ctx, tracing::info_span!(parent: None, "download_object_buffered_writer", %dst_path), ); // TODO: use vectored write (writev) once supported by tokio-epoll-uring. // There's chunks_vectored() on the stream. let (bytes_amount, destination_file) = async { while let Some(res) = futures::StreamExt::next(&mut download.download_stream).await { let chunk = match res { Ok(chunk) => chunk, Err(e) => return Err(DownloadError::from(e)), }; buffered .write_buffered_borrowed(&chunk, ctx) .await .map_err(|e| match e { FlushTaskError::Cancelled => DownloadError::Cancelled, })?; } buffered .shutdown( owned_buffers_io::write::BufferedWriterShutdownMode::PadThenTruncate, ctx, ) .await .map_err(|e| match e { FlushTaskError::Cancelled => DownloadError::Cancelled, }) } .await?; // not using sync_data because it can lose file size update destination_file .sync_all() .await .maybe_fatal_err("download_object sync_all") .with_context(|| format!("failed to fsync source file at {dst_path}")) .map_err(DownloadError::Other)?; Ok((bytes_amount, destination_file)) } const TEMP_DOWNLOAD_EXTENSION: &str = "temp_download"; pub(crate) fn is_temp_download_file(path: &Utf8Path) -> bool { let extension = path.extension(); match extension { Some(TEMP_DOWNLOAD_EXTENSION) => true, Some(_) => false, None => false, } } async fn list_identifiers<T>( storage: &GenericRemoteStorage, prefix: RemotePath, cancel: CancellationToken, ) -> anyhow::Result<(HashSet<T>, HashSet<String>)> where T: FromStr + Eq + std::hash::Hash, { let listing = download_retry_forever( || storage.list(Some(&prefix), ListingMode::WithDelimiter, None, &cancel), &format!("list identifiers in prefix {prefix}"), &cancel, ) .await?; let mut parsed_ids = HashSet::new(); let mut other_prefixes = HashSet::new(); for id_remote_storage_key in listing.prefixes { let object_name = id_remote_storage_key.object_name().ok_or_else(|| { anyhow::anyhow!("failed to get object name for key {id_remote_storage_key}") })?; match object_name.parse::<T>() { Ok(t) => parsed_ids.insert(t), Err(_) => other_prefixes.insert(object_name.to_string()), }; } for object in listing.keys { let object_name = object .key .object_name() .ok_or_else(|| anyhow::anyhow!("object name for key {}", object.key))?; other_prefixes.insert(object_name.to_string()); } Ok((parsed_ids, other_prefixes)) } /// List shards of given tenant in remote storage pub(crate) async fn list_remote_tenant_shards( storage: &GenericRemoteStorage, tenant_id: TenantId, cancel: CancellationToken, ) -> anyhow::Result<(HashSet<TenantShardId>, HashSet<String>)> { let remote_path = remote_tenant_path(&TenantShardId::unsharded(tenant_id)); list_identifiers::<TenantShardId>(storage, remote_path, cancel).await } /// List timelines of given tenant shard in remote storage pub async fn list_remote_timelines( storage: &GenericRemoteStorage, tenant_shard_id: TenantShardId, cancel: CancellationToken, ) -> anyhow::Result<(HashSet<TimelineId>, HashSet<String>)> { fail::fail_point!("storage-sync-list-remote-timelines", |_| { anyhow::bail!("storage-sync-list-remote-timelines"); }); let remote_path = remote_timelines_path(&tenant_shard_id).add_trailing_slash(); list_identifiers::<TimelineId>(storage, remote_path, cancel).await } async fn do_download_remote_path_retry_forever( storage: &GenericRemoteStorage, remote_path: &RemotePath, download_opts: DownloadOpts, cancel: &CancellationToken, ) -> Result<(Vec<u8>, SystemTime), DownloadError> { download_retry_forever( || async { let download = storage .download(remote_path, &download_opts, cancel) .await?; let mut bytes = Vec::new(); let stream = download.download_stream; let mut stream = StreamReader::new(stream); tokio::io::copy_buf(&mut stream, &mut bytes).await?; Ok((bytes, download.last_modified)) }, &format!("download {remote_path:?}"), cancel, ) .await } async fn do_download_tenant_manifest( storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, _timeline_id: Option<&TimelineId>, generation: Generation, cancel: &CancellationToken, ) -> Result<(TenantManifest, Generation, SystemTime), DownloadError> { let remote_path = remote_tenant_manifest_path(tenant_shard_id, generation); let download_opts = DownloadOpts { kind: DownloadKind::Small, ..Default::default() }; let (manifest_bytes, manifest_bytes_mtime) = do_download_remote_path_retry_forever(storage, &remote_path, download_opts, cancel).await?; let tenant_manifest = TenantManifest::from_json_bytes(&manifest_bytes) .with_context(|| format!("deserialize tenant manifest file at {remote_path:?}")) .map_err(DownloadError::Other)?; Ok((tenant_manifest, generation, manifest_bytes_mtime)) } async fn do_download_index_part( storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, timeline_id: Option<&TimelineId>, index_generation: Generation, cancel: &CancellationToken, ) -> Result<(IndexPart, Generation, SystemTime), DownloadError> { let timeline_id = timeline_id.expect("A timeline ID is always provided when downloading an index"); let remote_path = remote_index_path(tenant_shard_id, timeline_id, index_generation); let download_opts = DownloadOpts { kind: DownloadKind::Small, ..Default::default() }; let (index_part_bytes, index_part_mtime) = do_download_remote_path_retry_forever(storage, &remote_path, download_opts, cancel).await?; let index_part: IndexPart = serde_json::from_slice(&index_part_bytes) .with_context(|| format!("deserialize index part file at {remote_path:?}")) .map_err(DownloadError::Other)?; Ok((index_part, index_generation, index_part_mtime)) } /// Metadata objects are "generationed", meaning that they include a generation suffix. This /// function downloads the object with the highest generation <= `my_generation`. /// /// Data objects (layer files) also include a generation in their path, but there is no equivalent /// search process, because their reference from an index includes the generation. /// /// An expensive object listing operation is only done if necessary: the typical fast path is to issue two /// GET operations, one to our own generation (stale attachment case), and one to the immediately preceding /// generation (normal case when migrating/restarting). Only if both of these return 404 do we fall back /// to listing objects. /// /// * `my_generation`: the value of `[crate::tenant::TenantShard::generation]` /// * `what`: for logging, what object are we downloading /// * `prefix`: when listing objects, use this prefix (i.e. the part of the object path before the generation) /// * `do_download`: a GET of the object in a particular generation, which should **retry indefinitely** unless /// `cancel`` has fired. This function does not do its own retries of GET operations, and relies /// on the function passed in to do so. /// * `parse_path`: parse a fully qualified remote storage path to get the generation of the object. #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, fields(generation=?my_generation))] pub(crate) async fn download_generation_object<'a, T, DF, DFF, PF>( storage: &'a GenericRemoteStorage, tenant_shard_id: &'a TenantShardId, timeline_id: Option<&'a TimelineId>, my_generation: Generation, what: &str, prefix: RemotePath, do_download: DF, parse_path: PF, cancel: &'a CancellationToken, ) -> Result<(T, Generation, SystemTime), DownloadError> where DF: Fn( &'a GenericRemoteStorage, &'a TenantShardId, Option<&'a TimelineId>, Generation, &'a CancellationToken, ) -> DFF, DFF: Future<Output = Result<(T, Generation, SystemTime), DownloadError>>, PF: Fn(RemotePath) -> Option<Generation>, T: 'static, { debug_assert_current_span_has_tenant_id(); if my_generation.is_none() { // Operating without generations: just fetch the generation-less path return do_download(storage, tenant_shard_id, timeline_id, my_generation, cancel).await; } // Stale case: If we were intentionally attached in a stale generation, the remote object may already // exist in our generation. // // This is an optimization to avoid doing the listing for the general case below. let res = do_download(storage, tenant_shard_id, timeline_id, my_generation, cancel).await; match res { Ok(decoded) => { tracing::debug!("Found {what} from current generation (this is a stale attachment)"); return Ok(decoded); } Err(DownloadError::NotFound) => {} Err(e) => return Err(e), }; // Typical case: the previous generation of this tenant was running healthily, and had uploaded the object // we are seeking in that generation. We may safely start from this index without doing a listing, because: // - We checked for current generation case above // - generations > my_generation are to be ignored // - any other objects that exist would have an older generation than `previous_gen`, and // we want to find the most recent object from a previous generation. // // This is an optimization to avoid doing the listing for the general case below. let res = do_download( storage, tenant_shard_id, timeline_id, my_generation.previous(), cancel, ) .await; match res { Ok(decoded) => { tracing::debug!("Found {what} from previous generation"); return Ok(decoded); } Err(DownloadError::NotFound) => { tracing::debug!("No {what} found from previous generation, falling back to listing"); } Err(e) => { return Err(e); } } // General case/fallback: if there is no index at my_generation or prev_generation, then list all index_part.json // objects, and select the highest one with a generation <= my_generation. Constructing the prefix is equivalent // to constructing a full index path with no generation, because the generation is a suffix. let paths = download_retry( || async { storage .list(Some(&prefix), ListingMode::NoDelimiter, None, cancel) .await }, "list index_part files", cancel, ) .await? .keys; // General case logic for which index to use: the latest index whose generation // is <= our own. See "Finding the remote indices for timelines" in docs/rfcs/025-generation-numbers.md let max_previous_generation = paths .into_iter() .filter_map(|o| parse_path(o.key)) .filter(|g| g <= &my_generation) .max(); match max_previous_generation { Some(g) => { tracing::debug!("Found {what} in generation {g:?}"); do_download(storage, tenant_shard_id, timeline_id, g, cancel).await } None => { // Migration from legacy pre-generation state: we have a generation but no prior // attached pageservers did. Try to load from a no-generation path. tracing::debug!("No {what}* found"); do_download( storage, tenant_shard_id, timeline_id, Generation::none(), cancel, ) .await } } } /// index_part.json objects are suffixed with a generation number, so we cannot /// directly GET the latest index part without doing some probing. /// /// In this function we probe for the most recent index in a generation <= our current generation. /// See "Finding the remote indices for timelines" in docs/rfcs/025-generation-numbers.md pub(crate) async fn download_index_part( storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, my_generation: Generation, cancel: &CancellationToken, ) -> Result<(IndexPart, Generation, SystemTime), DownloadError> { debug_assert_current_span_has_tenant_and_timeline_id(); let index_prefix = remote_index_path(tenant_shard_id, timeline_id, Generation::none()); download_generation_object( storage, tenant_shard_id, Some(timeline_id), my_generation, "index_part", index_prefix, do_download_index_part, parse_remote_index_path, cancel, ) .await } pub(crate) async fn download_tenant_manifest( storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, my_generation: Generation, cancel: &CancellationToken, ) -> Result<(TenantManifest, Generation, SystemTime), DownloadError> { let manifest_prefix = remote_tenant_manifest_prefix(tenant_shard_id); download_generation_object( storage, tenant_shard_id, None, my_generation, "tenant-manifest", manifest_prefix, do_download_tenant_manifest, parse_remote_tenant_manifest_path, cancel, ) .await } pub(crate) async fn download_initdb_tar_zst( conf: &'static PageServerConf, storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, cancel: &CancellationToken, ) -> Result<(Utf8PathBuf, File), DownloadError> { debug_assert_current_span_has_tenant_and_timeline_id(); let remote_path = remote_initdb_archive_path(&tenant_shard_id.tenant_id, timeline_id); let remote_preserved_path = remote_initdb_preserved_archive_path(&tenant_shard_id.tenant_id, timeline_id); let timeline_path = conf.timelines_path(tenant_shard_id); if !timeline_path.exists() { tokio::fs::create_dir_all(&timeline_path) .await .with_context(|| format!("timeline dir creation {timeline_path}")) .map_err(DownloadError::Other)?; } let temp_path = timeline_path.join(format!( "{INITDB_PATH}.download-{timeline_id}.{TEMP_FILE_SUFFIX}" )); let file = download_retry( || async { let file = OpenOptions::new() .create(true) .truncate(true) .read(true) .write(true) .open(&temp_path) .await .with_context(|| format!("tempfile creation {temp_path}")) .map_err(DownloadError::Other)?; let download = match storage .download(&remote_path, &DownloadOpts::default(), cancel) .await { Ok(dl) => dl, Err(DownloadError::NotFound) => { storage .download(&remote_preserved_path, &DownloadOpts::default(), cancel) .await? } Err(other) => Err(other)?, }; let mut download = tokio_util::io::StreamReader::new(download.download_stream); let mut writer = tokio::io::BufWriter::with_capacity(super::BUFFER_SIZE, file); tokio::io::copy_buf(&mut download, &mut writer).await?; let mut file = writer.into_inner(); file.seek(std::io::SeekFrom::Start(0)) .await .with_context(|| format!("rewinding initdb.tar.zst at: {remote_path:?}")) .map_err(DownloadError::Other)?; Ok(file) }, &format!("download {remote_path}"), cancel, ) .await .inspect_err(|_e| { // Do a best-effort attempt at deleting the temporary file upon encountering an error. // We don't have async here nor do we want to pile on any extra errors. if let Err(e) = std::fs::remove_file(&temp_path) { if e.kind() != std::io::ErrorKind::NotFound { warn!("error deleting temporary file {temp_path}: {e}"); } } })?; Ok((temp_path, file)) } /// Helper function to handle retries for a download operation. /// /// Remote operations can fail due to rate limits (S3), spurious network /// problems, or other external reasons. Retry FAILED_DOWNLOAD_RETRIES times, /// with backoff. /// /// (See similar logic for uploads in `perform_upload_task`) pub(super) async fn download_retry<T, O, F>( op: O, description: &str, cancel: &CancellationToken, ) -> Result<T, DownloadError> where O: FnMut() -> F, F: Future<Output = Result<T, DownloadError>>, { backoff::retry( op, DownloadError::is_permanent, FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, description, cancel, ) .await .ok_or_else(|| DownloadError::Cancelled) .and_then(|x| x) } pub(crate) async fn download_retry_forever<T, O, F>( op: O, description: &str, cancel: &CancellationToken, ) -> Result<T, DownloadError> where O: FnMut() -> F, F: Future<Output = Result<T, DownloadError>>, { backoff::retry( op, DownloadError::is_permanent, FAILED_DOWNLOAD_WARN_THRESHOLD, u32::MAX, description, cancel, ) .await .ok_or_else(|| DownloadError::Cancelled) .and_then(|x| x) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/remote_timeline_client/index.rs
pageserver/src/tenant/remote_timeline_client/index.rs
//! In-memory index to track the tenant files on the remote storage. //! //! Able to restore itself from the storage index parts, that are located in every timeline's remote directory and contain all data about //! remote timeline layers and its metadata. use std::collections::HashMap; use chrono::NaiveDateTime; use pageserver_api::models::AuxFilePolicy; use pageserver_api::models::RelSizeMigration; use pageserver_api::shard::ShardIndex; use serde::{Deserialize, Serialize}; use utils::id::TimelineId; use utils::lsn::Lsn; use super::is_same_remote_layer_path; use crate::tenant::Generation; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::storage_layer::LayerName; use crate::tenant::timeline::import_pgdata; /// In-memory representation of an `index_part.json` file /// /// Contains the data about all files in the timeline, present remotely and its metadata. /// /// This type needs to be backwards and forwards compatible. When changing the fields, /// remember to add a test case for the changed version. #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct IndexPart { /// Debugging aid describing the version of this type. #[serde(default)] version: usize, #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub deleted_at: Option<NaiveDateTime>, #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub archived_at: Option<NaiveDateTime>, /// This field supports import-from-pgdata ("fast imports" platform feature). /// We don't currently use fast imports, so, this field is None for all production timelines. /// See <https://github.com/neondatabase/neon/pull/9218> for more information. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub import_pgdata: Option<import_pgdata::index_part_format::Root>, /// Layer filenames and metadata. For an index persisted in remote storage, all layers must /// exist in remote storage. pub layer_metadata: HashMap<LayerName, LayerFileMetadata>, /// Because of the trouble of eyeballing the legacy "metadata" field, we copied the /// "disk_consistent_lsn" out. After version 7 this is no longer needed, but the name cannot be /// reused. pub(super) disk_consistent_lsn: Lsn, // TODO: rename as "metadata" next week, keep the alias = "metadata_bytes", bump version Adding // the "alias = metadata" was forgotten in #7693, so we have to use "rewrite = metadata_bytes" // for backwards compatibility. #[serde( rename = "metadata_bytes", alias = "metadata", with = "crate::tenant::metadata::modern_serde" )] pub metadata: TimelineMetadata, #[serde(default)] pub(crate) lineage: Lineage, #[serde(skip_serializing_if = "Option::is_none", default)] pub(crate) gc_blocking: Option<GcBlocking>, /// Describes the kind of aux files stored in the timeline. /// /// The value is modified during file ingestion when the latest wanted value communicated via tenant config is applied if it is acceptable. /// A V1 setting after V2 files have been committed is not accepted. /// /// None means no aux files have been written to the storage before the point /// when this flag is introduced. /// /// This flag is not used any more as all tenants have been transitioned to the new aux file policy. #[serde(skip_serializing_if = "Option::is_none", default)] pub(crate) last_aux_file_policy: Option<AuxFilePolicy>, #[serde(skip_serializing_if = "Option::is_none", default)] pub(crate) rel_size_migration: Option<RelSizeMigration>, /// Not used anymore -- kept here for backwards compatibility. Merged into the `gc_compaction` field. #[serde(skip_serializing_if = "Option::is_none", default)] l2_lsn: Option<Lsn>, /// State for the garbage-collecting compaction pass. /// /// Garbage-collecting compaction (gc-compaction) prunes `Value`s that are outside /// the PITR window and not needed by child timelines. /// /// A commonly used synonym for this compaction pass is /// "bottommost-compaction" because the affected LSN range /// is the "bottom" of the (key,lsn) map. /// /// Gc-compaction is a quite expensive operation; that's why we use /// trigger condition. /// This field here holds the state pertaining to that trigger condition /// and (in future) to the progress of the gc-compaction, so that it's /// resumable across restarts & migrations. /// /// Note that the underlying algorithm is _also_ called `gc-compaction` /// in most places & design docs; but in fact it is more flexible than /// just the specific use case here; it needs a new name. #[serde(skip_serializing_if = "Option::is_none", default)] pub(crate) gc_compaction: Option<GcCompactionState>, /// The timestamp when the timeline was marked invisible in synthetic size calculations. #[serde(skip_serializing_if = "Option::is_none", default)] pub(crate) marked_invisible_at: Option<NaiveDateTime>, /// The LSN at which we started the rel size migration. Accesses below this LSN should be /// processed with the v1 read path. Usually this LSN should be set together with `rel_size_migration`. #[serde(skip_serializing_if = "Option::is_none", default)] pub(crate) rel_size_migrated_at: Option<Lsn>, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct GcCompactionState { /// The upper bound of the last completed garbage-collecting compaction, aka. L2 LSN. pub(crate) last_completed_lsn: Lsn, } impl IndexPart { /// When adding or modifying any parts of `IndexPart`, increment the version so that it can be /// used to understand later versions. /// /// Version is currently informative only. /// Version history /// - 2: added `deleted_at` /// - 3: no longer deserialize `timeline_layers` (serialized format is the same, but timeline_layers /// is always generated from the keys of `layer_metadata`) /// - 4: timeline_layers is fully removed. /// - 5: lineage was added /// - 6: last_aux_file_policy is added. /// - 7: metadata_bytes is no longer written, but still read /// - 8: added `archived_at` /// - 9: +gc_blocking /// - 10: +import_pgdata /// - 11: +rel_size_migration /// - 12: +l2_lsn /// - 13: +gc_compaction /// - 14: +marked_invisible_at /// - 15: +rel_size_migrated_at const LATEST_VERSION: usize = 15; // Versions we may see when reading from a bucket. pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]; pub const FILE_NAME: &'static str = "index_part.json"; pub fn empty(metadata: TimelineMetadata) -> Self { IndexPart { version: Self::LATEST_VERSION, layer_metadata: Default::default(), disk_consistent_lsn: metadata.disk_consistent_lsn(), metadata, deleted_at: None, archived_at: None, lineage: Default::default(), gc_blocking: None, last_aux_file_policy: None, import_pgdata: None, rel_size_migration: None, l2_lsn: None, gc_compaction: None, marked_invisible_at: None, rel_size_migrated_at: None, } } pub fn version(&self) -> usize { self.version } /// If you want this under normal operations, read it from self.metadata: /// this method is just for the scrubber to use when validating an index. pub fn duplicated_disk_consistent_lsn(&self) -> Lsn { self.disk_consistent_lsn } pub fn from_json_bytes(bytes: &[u8]) -> Result<Self, serde_json::Error> { serde_json::from_slice::<IndexPart>(bytes) } pub fn to_json_bytes(&self) -> serde_json::Result<Vec<u8>> { serde_json::to_vec(self) } #[cfg(test)] pub(crate) fn example() -> Self { Self::empty(TimelineMetadata::example()) } /// Returns true if the index contains a reference to the given layer (i.e. file path). /// /// TODO: there should be a variant of LayerName for the physical remote path that contains /// information about the shard and generation, to avoid passing in metadata. pub fn references(&self, name: &LayerName, metadata: &LayerFileMetadata) -> bool { let Some(index_metadata) = self.layer_metadata.get(name) else { return false; }; is_same_remote_layer_path(name, metadata, name, index_metadata) } /// Check for invariants in the index: this is useful when uploading an index to ensure that if /// we encounter a bug, we do not persist buggy metadata. pub(crate) fn validate(&self) -> Result<(), String> { if self.import_pgdata.is_none() && self.metadata.ancestor_timeline().is_none() && self.layer_metadata.is_empty() { // Unless we're in the middle of a raw pgdata import, or this is a child timeline,the index must // always have at least one layer. return Err("Index has no ancestor and no layers".to_string()); } Ok(()) } } /// Metadata gathered for each of the layer files. /// /// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which /// might have less or more metadata depending if upgrading or rolling back an upgrade. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub struct LayerFileMetadata { pub file_size: u64, #[serde(default = "Generation::none")] #[serde(skip_serializing_if = "Generation::is_none")] pub generation: Generation, #[serde(default = "ShardIndex::unsharded")] #[serde(skip_serializing_if = "ShardIndex::is_unsharded")] pub shard: ShardIndex, } impl LayerFileMetadata { pub fn new(file_size: u64, generation: Generation, shard: ShardIndex) -> Self { LayerFileMetadata { file_size, generation, shard, } } /// Helper to get both generation and file size in a tuple pub fn generation_file_size(&self) -> (Generation, u64) { (self.generation, self.file_size) } } /// Limited history of earlier ancestors. /// /// A timeline can have more than 1 earlier ancestor, in the rare case that it was repeatedly /// reparented by having an later timeline be detached from it's ancestor. #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] pub(crate) struct Lineage { /// Has the `reparenting_history` been truncated to [`Lineage::REMEMBER_AT_MOST`]. #[serde(skip_serializing_if = "is_false", default)] reparenting_history_truncated: bool, /// Earlier ancestors, truncated when [`Self::reparenting_history_truncated`] /// /// These are stored in case we want to support WAL based DR on the timeline. There can be many /// of these and at most one [`Self::original_ancestor`]. There cannot be more reparentings /// after [`Self::original_ancestor`] has been set. #[serde(skip_serializing_if = "Vec::is_empty", default)] reparenting_history: Vec<TimelineId>, /// The ancestor from which this timeline has been detached from and when. /// /// If you are adding support for detaching from a hierarchy, consider changing the ancestry /// into a `Vec<(TimelineId, Lsn)>` to be a path instead. // FIXME: this is insufficient even for path of two timelines for future wal recovery // purposes: // // assuming a "old main" which has received most of the WAL, and has a branch "new main", // starting a bit before "old main" last_record_lsn. the current version works fine, // because we will know to replay wal and branch at the recorded Lsn to do wal recovery. // // then assuming "new main" would similarly receive a branch right before its last_record_lsn, // "new new main". the current implementation would just store ("new main", ancestor_lsn, _) // here. however, we cannot recover from WAL using only that information, we would need the // whole ancestry here: // // ```json // [ // ["old main", ancestor_lsn("new main"), _], // ["new main", ancestor_lsn("new new main"), _] // ] // ``` #[serde(skip_serializing_if = "Option::is_none", default)] original_ancestor: Option<(TimelineId, Lsn, NaiveDateTime)>, } fn is_false(b: &bool) -> bool { !b } impl Lineage { const REMEMBER_AT_MOST: usize = 100; pub(crate) fn record_previous_ancestor(&mut self, old_ancestor: &TimelineId) -> bool { if self.reparenting_history.last() == Some(old_ancestor) { // do not re-record it false } else { #[cfg(feature = "testing")] { let existing = self .reparenting_history .iter() .position(|x| x == old_ancestor); assert_eq!( existing, None, "we cannot reparent onto and off and onto the same timeline twice" ); } let drop_oldest = self.reparenting_history.len() + 1 >= Self::REMEMBER_AT_MOST; self.reparenting_history_truncated |= drop_oldest; if drop_oldest { self.reparenting_history.remove(0); } self.reparenting_history.push(*old_ancestor); true } } /// Returns true if anything changed. pub(crate) fn record_detaching(&mut self, branchpoint: &(TimelineId, Lsn)) -> bool { if let Some((id, lsn, _)) = self.original_ancestor { assert_eq!( &(id, lsn), branchpoint, "detaching attempt has to be for the same ancestor we are already detached from" ); false } else { self.original_ancestor = Some((branchpoint.0, branchpoint.1, chrono::Utc::now().naive_utc())); true } } /// The queried lsn is most likely the basebackup lsn, and this answers question "is it allowed /// to start a read/write primary at this lsn". /// /// Returns true if the Lsn was previously our branch point. pub(crate) fn is_previous_ancestor_lsn(&self, lsn: Lsn) -> bool { self.original_ancestor .is_some_and(|(_, ancestor_lsn, _)| ancestor_lsn == lsn) } /// Returns true if the timeline originally had an ancestor, and no longer has one. pub(crate) fn is_detached_from_ancestor(&self) -> bool { self.original_ancestor.is_some() } /// Returns original ancestor timeline id and lsn that this timeline has been detached from. pub(crate) fn detached_previous_ancestor(&self) -> Option<(TimelineId, Lsn)> { self.original_ancestor.map(|(id, lsn, _)| (id, lsn)) } pub(crate) fn is_reparented(&self) -> bool { !self.reparenting_history.is_empty() } } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub(crate) struct GcBlocking { pub(crate) started_at: NaiveDateTime, pub(crate) reasons: enumset::EnumSet<GcBlockingReason>, } #[derive(Debug, enumset::EnumSetType, serde::Serialize, serde::Deserialize)] #[enumset(serialize_repr = "list")] pub(crate) enum GcBlockingReason { Manual, DetachAncestor, } impl GcBlocking { pub(super) fn started_now_for(reason: GcBlockingReason) -> Self { GcBlocking { started_at: chrono::Utc::now().naive_utc(), reasons: enumset::EnumSet::only(reason), } } /// Returns true if the given reason is one of the reasons why the gc is blocked. pub(crate) fn blocked_by(&self, reason: GcBlockingReason) -> bool { self.reasons.contains(reason) } /// Returns a version of self with the given reason. pub(super) fn with_reason(&self, reason: GcBlockingReason) -> Self { assert!(!self.blocked_by(reason)); let mut reasons = self.reasons; reasons.insert(reason); Self { started_at: self.started_at, reasons, } } /// Returns a version of self without the given reason. Assumption is that if /// there are no more reasons, we can unblock the gc by returning `None`. pub(super) fn without_reason(&self, reason: GcBlockingReason) -> Option<Self> { assert!(self.blocked_by(reason)); if self.reasons.len() == 1 { None } else { let mut reasons = self.reasons; assert!(reasons.remove(reason)); assert!(!reasons.is_empty()); Some(Self { started_at: self.started_at, reasons, }) } } } #[cfg(test)] mod tests { use postgres_ffi::PgMajorVersion; use std::str::FromStr; use utils::id::TimelineId; use super::*; #[test] fn v1_indexpart_is_parsed() { let example = r#"{ "version":1, "timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"], "layer_metadata":{ "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 }, "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 } }, "disk_consistent_lsn":"0/16960E8", "metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] }"#; let expected = IndexPart { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 1, layer_metadata: HashMap::from([ ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, generation: Generation::none(), shard: ShardIndex::unsharded() }) ]), disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(), metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), deleted_at: None, archived_at: None, lineage: Lineage::default(), gc_blocking: None, last_aux_file_policy: None, import_pgdata: None, rel_size_migration: None, l2_lsn: None, gc_compaction: None, marked_invisible_at: None, rel_size_migrated_at: None, }; let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap(); assert_eq!(part, expected); } #[test] fn v1_indexpart_is_parsed_with_optional_missing_layers() { let example = r#"{ "version":1, "timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"], "missing_layers":["This shouldn't fail deserialization"], "layer_metadata":{ "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 }, "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 } }, "disk_consistent_lsn":"0/16960E8", "metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] }"#; let expected = IndexPart { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 1, layer_metadata: HashMap::from([ ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, generation: Generation::none(), shard: ShardIndex::unsharded() }) ]), disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(), metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), deleted_at: None, archived_at: None, lineage: Lineage::default(), gc_blocking: None, last_aux_file_policy: None, import_pgdata: None, rel_size_migration: None, l2_lsn: None, gc_compaction: None, marked_invisible_at: None, rel_size_migrated_at: None, }; let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap(); assert_eq!(part, expected); } #[test] fn v2_indexpart_is_parsed_with_deleted_at() { let example = r#"{ "version":2, "timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"], "missing_layers":["This shouldn't fail deserialization"], "layer_metadata":{ "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 }, "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 } }, "disk_consistent_lsn":"0/16960E8", "metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "deleted_at": "2023-07-31T09:00:00.123" }"#; let expected = IndexPart { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 2, layer_metadata: HashMap::from([ ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, generation: Generation::none(), shard: ShardIndex::unsharded() }) ]), disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(), metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), archived_at: None, lineage: Lineage::default(), gc_blocking: None, last_aux_file_policy: None, import_pgdata: None, rel_size_migration: None, l2_lsn: None, gc_compaction: None, marked_invisible_at: None, rel_size_migrated_at: None, }; let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap(); assert_eq!(part, expected); } #[test] fn empty_layers_are_parsed() { let empty_layers_json = r#"{ "version":1, "timeline_layers":[], "layer_metadata":{}, "disk_consistent_lsn":"0/2532648", "metadata_bytes":[136,151,49,208,0,70,0,4,0,0,0,0,2,83,38,72,1,0,0,0,0,2,83,38,32,1,87,198,240,135,97,119,45,125,38,29,155,161,140,141,255,210,0,0,0,0,2,83,38,72,0,0,0,0,1,73,240,192,0,0,0,0,1,73,240,192,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] }"#; let expected = IndexPart { version: 1, layer_metadata: HashMap::new(), disk_consistent_lsn: "0/2532648".parse::<Lsn>().unwrap(), metadata: TimelineMetadata::from_bytes(&[ 136, 151, 49, 208, 0, 70, 0, 4, 0, 0, 0, 0, 2, 83, 38, 72, 1, 0, 0, 0, 0, 2, 83, 38, 32, 1, 87, 198, 240, 135, 97, 119, 45, 125, 38, 29, 155, 161, 140, 141, 255, 210, 0, 0, 0, 0, 2, 83, 38, 72, 0, 0, 0, 0, 1, 73, 240, 192, 0, 0, 0, 0, 1, 73,
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/remote_timeline_client/manifest.rs
pageserver/src/tenant/remote_timeline_client/manifest.rs
use chrono::NaiveDateTime; use serde::{Deserialize, Serialize}; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::shard::ShardStripeSize; /// Tenant shard manifest, stored in remote storage. Contains offloaded timelines and other tenant /// shard-wide information that must be persisted in remote storage. /// /// The manifest is always updated on tenant attach, and as needed. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct TenantManifest { /// The manifest version. Incremented on manifest format changes, even non-breaking ones. /// Manifests must generally always be backwards and forwards compatible for one release, to /// allow release rollbacks. pub version: usize, /// This tenant's stripe size. This is only advisory, and used to recover tenant data from /// remote storage. The autoritative source is the storage controller. If None, assume the /// original default value of 32768 blocks (256 MB). #[serde(skip_serializing_if = "Option::is_none")] pub stripe_size: Option<ShardStripeSize>, /// The list of offloaded timelines together with enough information /// to not have to actually load them. /// /// Note: the timelines mentioned in this list might be deleted, i.e. /// we don't hold an invariant that the references aren't dangling. /// Existence of index-part.json is the actual indicator of timeline existence. #[serde(default)] pub offloaded_timelines: Vec<OffloadedTimelineManifest>, } /// The remote level representation of an offloaded timeline. /// /// Very similar to [`pageserver_api::models::OffloadedTimelineInfo`], /// but the two datastructures serve different needs, this is for a persistent disk format /// that must be backwards compatible, while the other is only for informative purposes. #[derive(Clone, Debug, Serialize, Deserialize, Copy, PartialEq, Eq)] pub struct OffloadedTimelineManifest { pub timeline_id: TimelineId, /// Whether the timeline has a parent it has been branched off from or not pub ancestor_timeline_id: Option<TimelineId>, /// Whether to retain the branch lsn at the ancestor or not pub ancestor_retain_lsn: Option<Lsn>, /// The time point when the timeline was archived pub archived_at: NaiveDateTime, } /// The newest manifest version. This should be incremented on changes, even non-breaking ones. We /// do not use deny_unknown_fields, so new fields are not breaking. /// /// 1: initial version /// 2: +stripe_size /// /// When adding new versions, also add a parse_vX test case below. pub const LATEST_TENANT_MANIFEST_VERSION: usize = 2; impl TenantManifest { /// Returns true if the manifests are equal, ignoring the version number. This avoids /// re-uploading all manifests just because the version number is bumped. pub fn eq_ignoring_version(&self, other: &Self) -> bool { // Fast path: if the version is equal, just compare directly. if self.version == other.version { return self == other; } // We could alternatively just clone and modify the version here. let Self { version: _, // ignore version stripe_size, offloaded_timelines, } = self; stripe_size == &other.stripe_size && offloaded_timelines == &other.offloaded_timelines } /// Decodes a manifest from JSON. pub fn from_json_bytes(bytes: &[u8]) -> Result<Self, serde_json::Error> { serde_json::from_slice(bytes) } /// Encodes a manifest as JSON. pub fn to_json_bytes(&self) -> serde_json::Result<Vec<u8>> { serde_json::to_vec(self) } } #[cfg(test)] mod tests { use std::str::FromStr; use utils::id::TimelineId; use super::*; /// Empty manifests should be parsed. Version is required. #[test] fn parse_empty() -> anyhow::Result<()> { let json = r#"{ "version": 0 }"#; let expected = TenantManifest { version: 0, stripe_size: None, offloaded_timelines: Vec::new(), }; assert_eq!(expected, TenantManifest::from_json_bytes(json.as_bytes())?); Ok(()) } /// Unknown fields should be ignored, for forwards compatibility. #[test] fn parse_unknown_fields() -> anyhow::Result<()> { let json = r#"{ "version": 1, "foo": "bar" }"#; let expected = TenantManifest { version: 1, stripe_size: None, offloaded_timelines: Vec::new(), }; assert_eq!(expected, TenantManifest::from_json_bytes(json.as_bytes())?); Ok(()) } /// v1 manifests should be parsed, for backwards compatibility. #[test] fn parse_v1() -> anyhow::Result<()> { let json = r#"{ "version": 1, "offloaded_timelines": [ { "timeline_id": "5c4df612fd159e63c1b7853fe94d97da", "archived_at": "2025-03-07T11:07:11.373105434" }, { "timeline_id": "f3def5823ad7080d2ea538d8e12163fa", "ancestor_timeline_id": "5c4df612fd159e63c1b7853fe94d97da", "ancestor_retain_lsn": "0/1F79038", "archived_at": "2025-03-05T11:10:22.257901390" } ] }"#; let expected = TenantManifest { version: 1, stripe_size: None, offloaded_timelines: vec![ OffloadedTimelineManifest { timeline_id: TimelineId::from_str("5c4df612fd159e63c1b7853fe94d97da")?, ancestor_timeline_id: None, ancestor_retain_lsn: None, archived_at: NaiveDateTime::from_str("2025-03-07T11:07:11.373105434")?, }, OffloadedTimelineManifest { timeline_id: TimelineId::from_str("f3def5823ad7080d2ea538d8e12163fa")?, ancestor_timeline_id: Some(TimelineId::from_str( "5c4df612fd159e63c1b7853fe94d97da", )?), ancestor_retain_lsn: Some(Lsn::from_str("0/1F79038")?), archived_at: NaiveDateTime::from_str("2025-03-05T11:10:22.257901390")?, }, ], }; assert_eq!(expected, TenantManifest::from_json_bytes(json.as_bytes())?); Ok(()) } /// v2 manifests should be parsed, for backwards compatibility. #[test] fn parse_v2() -> anyhow::Result<()> { let json = r#"{ "version": 2, "stripe_size": 32768, "offloaded_timelines": [ { "timeline_id": "5c4df612fd159e63c1b7853fe94d97da", "archived_at": "2025-03-07T11:07:11.373105434" }, { "timeline_id": "f3def5823ad7080d2ea538d8e12163fa", "ancestor_timeline_id": "5c4df612fd159e63c1b7853fe94d97da", "ancestor_retain_lsn": "0/1F79038", "archived_at": "2025-03-05T11:10:22.257901390" } ] }"#; let expected = TenantManifest { version: 2, stripe_size: Some(ShardStripeSize(32768)), offloaded_timelines: vec![ OffloadedTimelineManifest { timeline_id: TimelineId::from_str("5c4df612fd159e63c1b7853fe94d97da")?, ancestor_timeline_id: None, ancestor_retain_lsn: None, archived_at: NaiveDateTime::from_str("2025-03-07T11:07:11.373105434")?, }, OffloadedTimelineManifest { timeline_id: TimelineId::from_str("f3def5823ad7080d2ea538d8e12163fa")?, ancestor_timeline_id: Some(TimelineId::from_str( "5c4df612fd159e63c1b7853fe94d97da", )?), ancestor_retain_lsn: Some(Lsn::from_str("0/1F79038")?), archived_at: NaiveDateTime::from_str("2025-03-05T11:10:22.257901390")?, }, ], }; assert_eq!(expected, TenantManifest::from_json_bytes(json.as_bytes())?); Ok(()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/remote_timeline_client/upload.rs
pageserver/src/tenant/remote_timeline_client/upload.rs
//! Helper functions to upload files to remote storage with a RemoteStorage use std::io::{ErrorKind, SeekFrom}; use std::num::NonZeroU32; use std::time::SystemTime; use anyhow::{Context, bail}; use bytes::Bytes; use camino::Utf8Path; use fail::fail_point; use pageserver_api::shard::TenantShardId; use remote_storage::{GenericRemoteStorage, RemotePath, TimeTravelError}; use tokio::fs::{self, File}; use tokio::io::AsyncSeekExt; use tokio_util::sync::CancellationToken; use tracing::info; use utils::id::{TenantId, TimelineId}; use utils::{backoff, pausable_failpoint}; use super::Generation; use super::index::IndexPart; use super::manifest::TenantManifest; use crate::tenant::remote_timeline_client::{ remote_index_path, remote_initdb_archive_path, remote_initdb_preserved_archive_path, remote_tenant_manifest_path, }; /// Serializes and uploads the given index part data to the remote storage. pub(crate) async fn upload_index_part( storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, generation: Generation, index_part: &IndexPart, cancel: &CancellationToken, ) -> anyhow::Result<()> { tracing::trace!("uploading new index part"); fail_point!("before-upload-index", |_| { bail!("failpoint before-upload-index") }); pausable_failpoint!("before-upload-index-pausable"); // Safety: refuse to persist invalid index metadata, to mitigate the impact of any bug that produces this // (this should never happen) index_part.validate().map_err(|e| anyhow::anyhow!(e))?; // FIXME: this error comes too late let serialized = index_part.to_json_bytes()?; let serialized = Bytes::from(serialized); let index_part_size = serialized.len(); let remote_path = remote_index_path(tenant_shard_id, timeline_id, generation); storage .upload_storage_object( futures::stream::once(futures::future::ready(Ok(serialized))), index_part_size, &remote_path, cancel, ) .await .with_context(|| format!("upload index part for '{tenant_shard_id} / {timeline_id}'")) } /// Serializes and uploads the given tenant manifest data to the remote storage. pub(crate) async fn upload_tenant_manifest( storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, generation: Generation, tenant_manifest: &TenantManifest, cancel: &CancellationToken, ) -> anyhow::Result<()> { tracing::trace!("uploading new tenant manifest"); fail_point!("before-upload-manifest", |_| { bail!("failpoint before-upload-manifest") }); pausable_failpoint!("before-upload-manifest-pausable"); let serialized = Bytes::from(tenant_manifest.to_json_bytes()?); let tenant_manifest_size = serialized.len(); let remote_path = remote_tenant_manifest_path(tenant_shard_id, generation); storage .upload_storage_object( futures::stream::once(futures::future::ready(Ok(serialized))), tenant_manifest_size, &remote_path, cancel, ) .await .with_context(|| format!("upload tenant manifest for '{tenant_shard_id}'")) } /// Attempts to upload given layer files. /// No extra checks for overlapping files is made and any files that are already present remotely will be overwritten, if submitted during the upload. /// /// On an error, bumps the retries count and reschedules the entire task. pub(super) async fn upload_timeline_layer<'a>( storage: &'a GenericRemoteStorage, local_path: &'a Utf8Path, remote_path: &'a RemotePath, metadata_size: u64, cancel: &CancellationToken, ) -> anyhow::Result<()> { fail_point!("before-upload-layer", |_| { bail!("failpoint before-upload-layer") }); pausable_failpoint!("before-upload-layer-pausable"); let source_file_res = fs::File::open(&local_path).await; let source_file = match source_file_res { Ok(source_file) => source_file, Err(e) if e.kind() == ErrorKind::NotFound => { // If we encounter this arm, it wasn't intended, but it's also not // a big problem, if it's because the file was deleted before an // upload. However, a nonexistent file can also be indicative of // something worse, like when a file is scheduled for upload before // it has been written to disk yet. // // This is tested against `test_compaction_delete_before_upload` info!(path = %local_path, "File to upload doesn't exist. Likely the file has been deleted and an upload is not required any more."); return Ok(()); } Err(e) => Err(e).with_context(|| format!("open a source file for layer {local_path:?}"))?, }; let fs_size = source_file .metadata() .await .with_context(|| format!("get the source file metadata for layer {local_path:?}"))? .len(); if metadata_size != fs_size { bail!( "File {local_path:?} has its current FS size {fs_size} diferent from initially determined {metadata_size}" ); } let fs_size = usize::try_from(fs_size) .with_context(|| format!("convert {local_path:?} size {fs_size} usize"))?; /* BEGIN_HADRON */ let mut metadata = None; match storage { // Pass the file path as a storage metadata to minimize changes to neon. // Otherwise, we need to change the upload interface. GenericRemoteStorage::AzureBlob(s) => { let block_size_mb = s.put_block_size_mb.unwrap_or(0); if block_size_mb > 0 && fs_size > block_size_mb * 1024 * 1024 { metadata = Some(remote_storage::StorageMetadata::from([( "databricks_azure_put_block", local_path.as_str(), )])); } } GenericRemoteStorage::LocalFs(_) => {} GenericRemoteStorage::AwsS3(_) => {} GenericRemoteStorage::Unreliable(_) => {} GenericRemoteStorage::GCS(_) => {} }; /* END_HADRON */ let reader = tokio_util::io::ReaderStream::with_capacity(source_file, super::BUFFER_SIZE); storage .upload(reader, fs_size, remote_path, metadata, cancel) .await .with_context(|| format!("upload layer from local path '{local_path}'")) } pub(super) async fn copy_timeline_layer( storage: &GenericRemoteStorage, source_path: &RemotePath, target_path: &RemotePath, cancel: &CancellationToken, ) -> anyhow::Result<()> { fail_point!("before-copy-layer", |_| { bail!("failpoint before-copy-layer") }); pausable_failpoint!("before-copy-layer-pausable"); storage .copy_object(source_path, target_path, cancel) .await .with_context(|| format!("copy layer {source_path} to {target_path}")) } /// Uploads the given `initdb` data to the remote storage. pub(crate) async fn upload_initdb_dir( storage: &GenericRemoteStorage, tenant_id: &TenantId, timeline_id: &TimelineId, mut initdb_tar_zst: File, size: u64, cancel: &CancellationToken, ) -> anyhow::Result<()> { tracing::trace!("uploading initdb dir"); // We might have read somewhat into the file already in the prior retry attempt initdb_tar_zst.seek(SeekFrom::Start(0)).await?; let file = tokio_util::io::ReaderStream::with_capacity(initdb_tar_zst, super::BUFFER_SIZE); let remote_path = remote_initdb_archive_path(tenant_id, timeline_id); storage .upload_storage_object(file, size as usize, &remote_path, cancel) .await .with_context(|| format!("upload initdb dir for '{tenant_id} / {timeline_id}'")) } pub(crate) async fn preserve_initdb_archive( storage: &GenericRemoteStorage, tenant_id: &TenantId, timeline_id: &TimelineId, cancel: &CancellationToken, ) -> anyhow::Result<()> { let source_path = remote_initdb_archive_path(tenant_id, timeline_id); let dest_path = remote_initdb_preserved_archive_path(tenant_id, timeline_id); storage .copy_object(&source_path, &dest_path, cancel) .await .with_context(|| format!("backing up initdb archive for '{tenant_id} / {timeline_id}'")) } pub(crate) async fn time_travel_recover_tenant( storage: &GenericRemoteStorage, tenant_shard_id: &TenantShardId, timestamp: SystemTime, done_if_after: SystemTime, cancel: &CancellationToken, ) -> Result<(), TimeTravelError> { let warn_after = 3; let max_attempts = 10; let mut prefixes = Vec::with_capacity(2); if tenant_shard_id.is_shard_zero() { // Also recover the unsharded prefix for a shard of zero: // - if the tenant is totally unsharded, the unsharded prefix contains all the data // - if the tenant is sharded, we still want to recover the initdb data, but we only // want to do it once, so let's do it on the 0 shard let timelines_path_unsharded = super::remote_timelines_path_unsharded(&tenant_shard_id.tenant_id); prefixes.push(timelines_path_unsharded); } if !tenant_shard_id.is_unsharded() { // If the tenant is sharded, we need to recover the sharded prefix let timelines_path = super::remote_timelines_path(tenant_shard_id); prefixes.push(timelines_path); } // Limit the number of versions deletions, mostly so that we don't // keep requesting forever if the list is too long, as we'd put the // list in RAM. // Building a list of 100k entries that reaches the limit roughly takes // 40 seconds, and roughly corresponds to tenants of 2 TiB physical size. const COMPLEXITY_LIMIT: Option<NonZeroU32> = NonZeroU32::new(100_000); for prefix in &prefixes { backoff::retry( || async { storage .time_travel_recover( Some(prefix), timestamp, done_if_after, cancel, COMPLEXITY_LIMIT, ) .await }, |e| !matches!(e, TimeTravelError::Other(_)), warn_after, max_attempts, "time travel recovery of tenant prefix", cancel, ) .await .ok_or_else(|| TimeTravelError::Cancelled) .and_then(|x| x)?; } Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/heatmap_layers_downloader.rs
pageserver/src/tenant/timeline/heatmap_layers_downloader.rs
//! Timeline utility module to hydrate everything from the current heatmap. //! //! Provides utilities to spawn and abort a background task where the downloads happen. //! See /v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers. use std::sync::{Arc, Mutex}; use futures::StreamExt; use http_utils::error::ApiError; use tokio_util::sync::CancellationToken; use utils::sync::gate::Gate; use crate::context::RequestContext; use super::Timeline; // This status is not strictly necessary now, but gives us a nice place // to store progress information if we ever wish to expose it. pub(super) enum HeatmapLayersDownloadStatus { InProgress, Complete, } pub(super) struct HeatmapLayersDownloader { handle: tokio::task::JoinHandle<()>, status: Arc<Mutex<HeatmapLayersDownloadStatus>>, cancel: CancellationToken, downloads_guard: Arc<Gate>, } impl HeatmapLayersDownloader { fn new( timeline: Arc<Timeline>, concurrency: usize, recurse: bool, ctx: RequestContext, ) -> Result<HeatmapLayersDownloader, ApiError> { let tl_guard = timeline.gate.enter().map_err(|_| ApiError::Cancelled)?; let cancel = timeline.cancel.child_token(); let downloads_guard = Arc::new(Gate::default()); let status = Arc::new(Mutex::new(HeatmapLayersDownloadStatus::InProgress)); let handle = tokio::task::spawn({ let status = status.clone(); let downloads_guard = downloads_guard.clone(); let cancel = cancel.clone(); async move { let _guard = tl_guard; scopeguard::defer! { *status.lock().unwrap() = HeatmapLayersDownloadStatus::Complete; } let Some(heatmap) = timeline.generate_heatmap().await else { tracing::info!("Heatmap layers download failed to generate heatmap"); return; }; tracing::info!( resident_size=%timeline.resident_physical_size(), heatmap_layers=%heatmap.all_layers().count(), "Starting heatmap layers download" ); let stream = futures::stream::iter(heatmap.all_layers().cloned().filter_map( |layer| { let ctx = ctx.attached_child(); let tl = timeline.clone(); let dl_guard = match downloads_guard.enter() { Ok(g) => g, Err(_) => { // [`Self::shutdown`] was called. Don't spawn any more downloads. return None; } }; Some(async move { let _dl_guard = dl_guard; let res = tl.download_layer(&layer.name, &ctx).await; if let Err(err) = res { if !err.is_cancelled() { tracing::warn!(layer=%layer.name,"Failed to download heatmap layer: {err}") } } }) } )).buffered(concurrency); tokio::select! { _ = stream.collect::<()>() => { tracing::info!( resident_size=%timeline.resident_physical_size(), "Heatmap layers download completed" ); }, _ = cancel.cancelled() => { tracing::info!("Heatmap layers download cancelled"); return; } } if recurse { if let Some(ancestor) = timeline.ancestor_timeline() { let ctx = ctx.attached_child(); let res = ancestor.start_heatmap_layers_download(concurrency, recurse, &ctx); if let Err(err) = res { tracing::info!( "Failed to start heatmap layers download for ancestor: {err}" ); } } } } }); Ok(Self { status, handle, cancel, downloads_guard, }) } fn is_complete(&self) -> bool { matches!( *self.status.lock().unwrap(), HeatmapLayersDownloadStatus::Complete ) } /// Drive any in-progress downloads to completion and stop spawning any new ones. /// /// This has two callers and they behave differently /// 1. [`Timeline::shutdown`]: the drain will be immediate since downloads themselves /// are sensitive to timeline cancellation. /// /// 2. Endpoint handler in [`crate::http::routes`]: the drain will wait for any in-progress /// downloads to complete. async fn stop_and_drain(self) { // Counterintuitive: close the guard before cancelling. // Something needs to poll the already created download futures to completion. // If we cancel first, then the underlying task exits and we lost // the poller. self.downloads_guard.close().await; self.cancel.cancel(); if let Err(err) = self.handle.await { tracing::warn!("Failed to join heatmap layer downloader task: {err}"); } } } impl Timeline { pub(crate) fn start_heatmap_layers_download( self: &Arc<Self>, concurrency: usize, recurse: bool, ctx: &RequestContext, ) -> Result<(), ApiError> { let mut locked = self.heatmap_layers_downloader.lock().unwrap(); if locked.as_ref().map(|dl| dl.is_complete()).unwrap_or(true) { let dl = HeatmapLayersDownloader::new( self.clone(), concurrency, recurse, ctx.attached_child(), )?; *locked = Some(dl); Ok(()) } else { Err(ApiError::Conflict("Already running".to_string())) } } pub(crate) async fn stop_and_drain_heatmap_layers_download(&self) { // This can race with the start of a new downloader and lead to a situation // where one donloader is shutting down and another one is in-flight. // The only impact is that we'd end up using more remote storage semaphore // units than expected. let downloader = self.heatmap_layers_downloader.lock().unwrap().take(); if let Some(dl) = downloader { dl.stop_and_drain().await; } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/compaction.rs
pageserver/src/tenant/timeline/compaction.rs
//! New compaction implementation. The algorithm itself is implemented in the //! compaction crate. This file implements the callbacks and structs that allow //! the algorithm to drive the process. //! //! The old legacy algorithm is implemented directly in `timeline.rs`. use std::cmp::min; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::ops::{Deref, Range}; use std::sync::Arc; use std::time::{Duration, Instant}; use super::layer_manager::LayerManagerLockHolder; use super::{ CompactFlags, CompactOptions, CompactionError, CreateImageLayersError, DurationRecorder, GetVectoredError, ImageLayerCreationMode, LastImageLayerCreationStatus, RecordedDuration, Timeline, }; use crate::pgdatadir_mapping::CollectKeySpaceError; use crate::tenant::timeline::{DeltaEntry, RepartitionError}; use crate::walredo::RedoAttemptType; use anyhow::{Context, anyhow}; use bytes::Bytes; use enumset::EnumSet; use fail::fail_point; use futures::FutureExt; use itertools::Itertools; use once_cell::sync::Lazy; use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE; use pageserver_api::key::{KEY_SIZE, Key}; use pageserver_api::keyspace::{KeySpace, ShardedRange}; use pageserver_api::models::{CompactInfoResponse, CompactKeyRange}; use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId}; use pageserver_compaction::helpers::{fully_contains, overlaps_with}; use pageserver_compaction::interface::*; use serde::Serialize; use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, error, info, info_span, trace, warn}; use utils::critical_timeline; use utils::id::TimelineId; use utils::lsn::Lsn; use wal_decoder::models::record::NeonWalRecord; use wal_decoder::models::value::Value; use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder}; use crate::page_cache; use crate::statvfs::Statvfs; use crate::tenant::checks::check_valid_layermap; use crate::tenant::gc_block::GcBlock; use crate::tenant::layer_map::LayerMap; use crate::tenant::remote_timeline_client::WaitCompletionError; use crate::tenant::remote_timeline_client::index::GcCompactionState; use crate::tenant::storage_layer::batch_split_writer::{ BatchWriterResult, SplitDeltaLayerWriter, SplitImageLayerWriter, }; use crate::tenant::storage_layer::filter_iterator::FilterIterator; use crate::tenant::storage_layer::merge_iterator::MergeIterator; use crate::tenant::storage_layer::{ AsLayerDesc, LayerVisibilityHint, PersistentLayerDesc, PersistentLayerKey, ValueReconstructState, }; use crate::tenant::tasks::log_compaction_error; use crate::tenant::timeline::{ DeltaLayerWriter, ImageLayerCreationOutcome, ImageLayerWriter, IoConcurrency, Layer, ResidentLayer, drop_layer_manager_rlock, }; use crate::tenant::{DeltaLayer, MaybeOffloaded, PageReconstructError}; use crate::virtual_file::{MaybeFatalIo, VirtualFile}; /// Maximum number of deltas before generating an image layer in bottom-most compaction. const COMPACTION_DELTA_THRESHOLD: usize = 5; /// Ratio of shard-local pages below which we trigger shard ancestor layer rewrites. 0.3 means that /// <= 30% of layer pages must belong to the descendant shard to rewrite the layer. /// /// We choose a value < 0.5 to avoid rewriting all visible layers every time we do a power-of-two /// shard split, which gets expensive for large tenants. const ANCESTOR_COMPACTION_REWRITE_THRESHOLD: f64 = 0.3; #[derive(Default, Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize)] pub struct GcCompactionJobId(pub usize); impl std::fmt::Display for GcCompactionJobId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } pub struct GcCompactionCombinedSettings { pub gc_compaction_enabled: bool, pub gc_compaction_verification: bool, pub gc_compaction_initial_threshold_kb: u64, pub gc_compaction_ratio_percent: u64, } #[derive(Debug, Clone)] pub enum GcCompactionQueueItem { MetaJob { /// Compaction options options: CompactOptions, /// Whether the compaction is triggered automatically (determines whether we need to update L2 LSN) auto: bool, }, SubCompactionJob { i: usize, total: usize, options: CompactOptions, }, Notify(GcCompactionJobId, Option<Lsn>), } /// Statistics for gc-compaction meta jobs, which contains several sub compaction jobs. #[derive(Debug, Clone, Serialize, Default)] pub struct GcCompactionMetaStatistics { /// The total number of sub compaction jobs. pub total_sub_compaction_jobs: usize, /// The total number of sub compaction jobs that failed. pub failed_sub_compaction_jobs: usize, /// The total number of sub compaction jobs that succeeded. pub succeeded_sub_compaction_jobs: usize, /// The layer size before compaction. pub before_compaction_layer_size: u64, /// The layer size after compaction. pub after_compaction_layer_size: u64, /// The start time of the meta job. pub start_time: Option<chrono::DateTime<chrono::Utc>>, /// The end time of the meta job. pub end_time: Option<chrono::DateTime<chrono::Utc>>, /// The duration of the meta job. pub duration_secs: f64, /// The id of the meta job. pub meta_job_id: GcCompactionJobId, /// The LSN below which the layers are compacted, used to compute the statistics. pub below_lsn: Lsn, /// The retention ratio of the meta job (after_compaction_layer_size / before_compaction_layer_size) pub retention_ratio: f64, } impl GcCompactionMetaStatistics { fn finalize(&mut self) { let end_time = chrono::Utc::now(); if let Some(start_time) = self.start_time { if end_time > start_time { let delta = end_time - start_time; if let Ok(std_dur) = delta.to_std() { self.duration_secs = std_dur.as_secs_f64(); } } } self.retention_ratio = self.after_compaction_layer_size as f64 / (self.before_compaction_layer_size as f64 + 1.0); self.end_time = Some(end_time); } } impl GcCompactionQueueItem { pub fn into_compact_info_resp( self, id: GcCompactionJobId, running: bool, ) -> Option<CompactInfoResponse> { match self { GcCompactionQueueItem::MetaJob { options, .. } => Some(CompactInfoResponse { compact_key_range: options.compact_key_range, compact_lsn_range: options.compact_lsn_range, sub_compaction: options.sub_compaction, running, job_id: id.0, }), GcCompactionQueueItem::SubCompactionJob { options, .. } => Some(CompactInfoResponse { compact_key_range: options.compact_key_range, compact_lsn_range: options.compact_lsn_range, sub_compaction: options.sub_compaction, running, job_id: id.0, }), GcCompactionQueueItem::Notify(_, _) => None, } } } #[derive(Default)] struct GcCompactionGuardItems { notify: Option<tokio::sync::oneshot::Sender<()>>, permit: Option<OwnedSemaphorePermit>, } struct GcCompactionQueueInner { running: Option<(GcCompactionJobId, GcCompactionQueueItem)>, queued: VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>, guards: HashMap<GcCompactionJobId, GcCompactionGuardItems>, last_id: GcCompactionJobId, meta_statistics: Option<GcCompactionMetaStatistics>, } impl GcCompactionQueueInner { fn next_id(&mut self) -> GcCompactionJobId { let id = self.last_id; self.last_id = GcCompactionJobId(id.0 + 1); id } } /// A structure to store gc_compaction jobs. pub struct GcCompactionQueue { /// All items in the queue, and the currently-running job. inner: std::sync::Mutex<GcCompactionQueueInner>, /// Ensure only one thread is consuming the queue. consumer_lock: tokio::sync::Mutex<()>, } static CONCURRENT_GC_COMPACTION_TASKS: Lazy<Arc<Semaphore>> = Lazy::new(|| { // Only allow one timeline on one pageserver to run gc compaction at a time. Arc::new(Semaphore::new(1)) }); impl GcCompactionQueue { pub fn new() -> Self { GcCompactionQueue { inner: std::sync::Mutex::new(GcCompactionQueueInner { running: None, queued: VecDeque::new(), guards: HashMap::new(), last_id: GcCompactionJobId(0), meta_statistics: None, }), consumer_lock: tokio::sync::Mutex::new(()), } } pub fn cancel_scheduled(&self) { let mut guard = self.inner.lock().unwrap(); guard.queued.clear(); // TODO: if there is a running job, we should keep the gc guard. However, currently, the cancel // API is only used for testing purposes, so we can drop everything here. guard.guards.clear(); } /// Schedule a manual compaction job. pub fn schedule_manual_compaction( &self, options: CompactOptions, notify: Option<tokio::sync::oneshot::Sender<()>>, ) -> GcCompactionJobId { let mut guard = self.inner.lock().unwrap(); let id = guard.next_id(); guard.queued.push_back(( id, GcCompactionQueueItem::MetaJob { options, auto: false, }, )); guard.guards.entry(id).or_default().notify = notify; info!("scheduled compaction job id={}", id); id } /// Schedule an auto compaction job. fn schedule_auto_compaction( &self, options: CompactOptions, permit: OwnedSemaphorePermit, ) -> GcCompactionJobId { let mut guard = self.inner.lock().unwrap(); let id = guard.next_id(); guard.queued.push_back(( id, GcCompactionQueueItem::MetaJob { options, auto: true, }, )); guard.guards.entry(id).or_default().permit = Some(permit); id } /// Trigger an auto compaction. pub async fn trigger_auto_compaction( &self, timeline: &Arc<Timeline>, ) -> Result<(), CompactionError> { let GcCompactionCombinedSettings { gc_compaction_enabled, gc_compaction_initial_threshold_kb, gc_compaction_ratio_percent, .. } = timeline.get_gc_compaction_settings(); if !gc_compaction_enabled { return Ok(()); } if self.remaining_jobs_num() > 0 { // Only schedule auto compaction when the queue is empty return Ok(()); } if timeline.ancestor_timeline().is_some() { // Do not trigger auto compaction for child timelines. We haven't tested // it enough in staging yet. return Ok(()); } if timeline.get_gc_compaction_watermark() == Lsn::INVALID { // If the gc watermark is not set, we don't need to trigger auto compaction. // This check is the same as in `gc_compaction_split_jobs` but we don't log // here and we can also skip the computation of the trigger condition earlier. return Ok(()); } let Ok(permit) = CONCURRENT_GC_COMPACTION_TASKS.clone().try_acquire_owned() else { // Only allow one compaction run at a time. TODO: As we do `try_acquire_owned`, we cannot ensure // the fairness of the lock across timelines. We should listen for both `acquire` and `l0_compaction_trigger` // to ensure the fairness while avoid starving other tasks. return Ok(()); }; let gc_compaction_state = timeline.get_gc_compaction_state(); let l2_lsn = gc_compaction_state .map(|x| x.last_completed_lsn) .unwrap_or(Lsn::INVALID); let layers = { let guard = timeline .layers .read(LayerManagerLockHolder::GetLayerMapInfo) .await; let layer_map = guard.layer_map()?; layer_map.iter_historic_layers().collect_vec() }; let mut l2_size: u64 = 0; let mut l1_size = 0; let gc_cutoff = *timeline.get_applied_gc_cutoff_lsn(); for layer in layers { if layer.lsn_range.start <= l2_lsn { l2_size += layer.file_size(); } else if layer.lsn_range.start <= gc_cutoff { l1_size += layer.file_size(); } } fn trigger_compaction( l1_size: u64, l2_size: u64, gc_compaction_initial_threshold_kb: u64, gc_compaction_ratio_percent: u64, ) -> bool { const AUTO_TRIGGER_LIMIT: u64 = 150 * 1024 * 1024 * 1024; // 150GB if l1_size + l2_size >= AUTO_TRIGGER_LIMIT { // Do not auto-trigger when physical size >= 150GB return false; } // initial trigger if l2_size == 0 && l1_size >= gc_compaction_initial_threshold_kb * 1024 { info!( "trigger auto-compaction because l1_size={} >= gc_compaction_initial_threshold_kb={}", l1_size, gc_compaction_initial_threshold_kb ); return true; } // size ratio trigger if l2_size == 0 { return false; } if l1_size as f64 / l2_size as f64 >= (gc_compaction_ratio_percent as f64 / 100.0) { info!( "trigger auto-compaction because l1_size={} / l2_size={} > gc_compaction_ratio_percent={}", l1_size, l2_size, gc_compaction_ratio_percent ); return true; } false } if trigger_compaction( l1_size, l2_size, gc_compaction_initial_threshold_kb, gc_compaction_ratio_percent, ) { self.schedule_auto_compaction( CompactOptions { flags: { let mut flags = EnumSet::new(); flags |= CompactFlags::EnhancedGcBottomMostCompaction; if timeline.get_compaction_l0_first() { flags |= CompactFlags::YieldForL0; } flags }, sub_compaction: true, // Only auto-trigger gc-compaction over the data keyspace due to concerns in // https://github.com/neondatabase/neon/issues/11318. compact_key_range: Some(CompactKeyRange { start: Key::MIN, end: Key::metadata_key_range().start, }), compact_lsn_range: None, sub_compaction_max_job_size_mb: None, gc_compaction_do_metadata_compaction: false, }, permit, ); info!( "scheduled auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}", l1_size, l2_size, l2_lsn, gc_cutoff ); } else { debug!( "did not trigger auto gc-compaction: l1_size={}, l2_size={}, l2_lsn={}, gc_cutoff={}", l1_size, l2_size, l2_lsn, gc_cutoff ); } Ok(()) } async fn collect_layer_below_lsn( &self, timeline: &Arc<Timeline>, lsn: Lsn, ) -> Result<u64, CompactionError> { let guard = timeline .layers .read(LayerManagerLockHolder::GetLayerMapInfo) .await; let layer_map = guard.layer_map()?; let layers = layer_map.iter_historic_layers().collect_vec(); let mut size = 0; for layer in layers { if layer.lsn_range.start <= lsn { size += layer.file_size(); } } Ok(size) } /// Notify the caller the job has finished and unblock GC. fn notify_and_unblock(&self, id: GcCompactionJobId) { info!("compaction job id={} finished", id); let mut guard = self.inner.lock().unwrap(); if let Some(items) = guard.guards.remove(&id) { if let Some(tx) = items.notify { let _ = tx.send(()); } } if let Some(ref meta_statistics) = guard.meta_statistics { if meta_statistics.meta_job_id == id { if let Ok(stats) = serde_json::to_string(&meta_statistics) { info!( "gc-compaction meta statistics for job id = {}: {}", id, stats ); } } } } fn clear_running_job(&self) { let mut guard = self.inner.lock().unwrap(); guard.running = None; } async fn handle_sub_compaction( &self, id: GcCompactionJobId, options: CompactOptions, timeline: &Arc<Timeline>, auto: bool, ) -> Result<(), CompactionError> { info!( "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs" ); let res = timeline .gc_compaction_split_jobs( GcCompactJob::from_compact_options(options.clone()), options.sub_compaction_max_job_size_mb, ) .await; let jobs = match res { Ok(jobs) => jobs, Err(err) => { warn!("cannot split gc-compaction jobs: {}, unblocked gc", err); self.notify_and_unblock(id); return Err(err); } }; if jobs.is_empty() { info!("no jobs to run, skipping scheduled compaction task"); self.notify_and_unblock(id); } else { let jobs_len = jobs.len(); let mut pending_tasks = Vec::new(); // gc-compaction might pick more layers or fewer layers to compact. The L2 LSN does not need to be accurate. // And therefore, we simply assume the maximum LSN of all jobs is the expected L2 LSN. let expected_l2_lsn = jobs .iter() .map(|job| job.compact_lsn_range.end) .max() .unwrap(); for (i, job) in jobs.into_iter().enumerate() { // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions` // until we do further refactors to allow directly call `compact_with_gc`. let mut flags: EnumSet<CompactFlags> = EnumSet::default(); flags |= CompactFlags::EnhancedGcBottomMostCompaction; if job.dry_run { flags |= CompactFlags::DryRun; } if options.flags.contains(CompactFlags::YieldForL0) { flags |= CompactFlags::YieldForL0; } let options = CompactOptions { flags, sub_compaction: false, compact_key_range: Some(job.compact_key_range.into()), compact_lsn_range: Some(job.compact_lsn_range.into()), sub_compaction_max_job_size_mb: None, gc_compaction_do_metadata_compaction: false, }; pending_tasks.push(GcCompactionQueueItem::SubCompactionJob { options, i, total: jobs_len, }); } if !auto { pending_tasks.push(GcCompactionQueueItem::Notify(id, None)); } else { pending_tasks.push(GcCompactionQueueItem::Notify(id, Some(expected_l2_lsn))); } let layer_size = self .collect_layer_below_lsn(timeline, expected_l2_lsn) .await?; { let mut guard = self.inner.lock().unwrap(); let mut tasks = Vec::new(); for task in pending_tasks { let id = guard.next_id(); tasks.push((id, task)); } tasks.reverse(); for item in tasks { guard.queued.push_front(item); } guard.meta_statistics = Some(GcCompactionMetaStatistics { meta_job_id: id, start_time: Some(chrono::Utc::now()), before_compaction_layer_size: layer_size, below_lsn: expected_l2_lsn, total_sub_compaction_jobs: jobs_len, ..Default::default() }); } info!( "scheduled enhanced gc bottom-most compaction with sub-compaction, split into {} jobs", jobs_len ); } Ok(()) } /// Take a job from the queue and process it. Returns if there are still pending tasks. pub async fn iteration( &self, cancel: &CancellationToken, ctx: &RequestContext, gc_block: &GcBlock, timeline: &Arc<Timeline>, ) -> Result<CompactionOutcome, CompactionError> { let res = self.iteration_inner(cancel, ctx, gc_block, timeline).await; if let Err(err) = &res { log_compaction_error(err, None, cancel.is_cancelled(), true); } match res { Ok(res) => Ok(res), Err(e) if e.is_cancel() => Err(e), Err(_) => { // There are some cases where traditional gc might collect some layer // files causing gc-compaction cannot read the full history of the key. // This needs to be resolved in the long-term by improving the compaction // process. For now, let's simply avoid such errors triggering the // circuit breaker. Ok(CompactionOutcome::Skipped) } } } async fn iteration_inner( &self, cancel: &CancellationToken, ctx: &RequestContext, gc_block: &GcBlock, timeline: &Arc<Timeline>, ) -> Result<CompactionOutcome, CompactionError> { let Ok(_one_op_at_a_time_guard) = self.consumer_lock.try_lock() else { return Err(CompactionError::Other(anyhow::anyhow!( "cannot run gc-compaction because another gc-compaction is running. This should not happen because we only call this function from the gc-compaction queue." ))); }; let has_pending_tasks; let mut yield_for_l0 = false; let Some((id, item)) = ({ let mut guard = self.inner.lock().unwrap(); if let Some((id, item)) = guard.queued.pop_front() { guard.running = Some((id, item.clone())); has_pending_tasks = !guard.queued.is_empty(); Some((id, item)) } else { has_pending_tasks = false; None } }) else { self.trigger_auto_compaction(timeline).await?; // Always yield after triggering auto-compaction. Gc-compaction is a low-priority task and we // have not implemented preemption mechanism yet. We always want to yield it to more important // tasks if there is one. return Ok(CompactionOutcome::Done); }; match item { GcCompactionQueueItem::MetaJob { options, auto } => { if !options .flags .contains(CompactFlags::EnhancedGcBottomMostCompaction) { warn!( "ignoring scheduled compaction task: scheduled task must be gc compaction: {:?}", options ); } else if options.sub_compaction { info!( "running scheduled enhanced gc bottom-most compaction with sub-compaction, splitting compaction jobs" ); self.handle_sub_compaction(id, options, timeline, auto) .await?; } else { // Auto compaction always enables sub-compaction so we don't need to handle update_l2_lsn // in this branch. let _gc_guard = match gc_block.start().await { Ok(guard) => guard, Err(e) => { self.notify_and_unblock(id); self.clear_running_job(); return Err(CompactionError::Other(anyhow!( "cannot run gc-compaction because gc is blocked: {}", e ))); } }; let res = timeline.compact_with_options(cancel, options, ctx).await; let compaction_result = match res { Ok(res) => res, Err(err) => { warn!(%err, "failed to run gc-compaction"); self.notify_and_unblock(id); self.clear_running_job(); return Err(err); } }; if compaction_result == CompactionOutcome::YieldForL0 { yield_for_l0 = true; } } } GcCompactionQueueItem::SubCompactionJob { options, i, total } => { // TODO: error handling, clear the queue if any task fails? let _gc_guard = match gc_block.start().await { Ok(guard) => guard, Err(e) => { self.clear_running_job(); return Err(CompactionError::Other(anyhow!( "cannot run gc-compaction because gc is blocked: {}", e ))); } }; info!("running gc-compaction subcompaction job {}/{}", i, total); let res = timeline.compact_with_options(cancel, options, ctx).await; let compaction_result = match res { Ok(res) => res, Err(err) => { warn!(%err, "failed to run gc-compaction subcompaction job"); self.clear_running_job(); let mut guard = self.inner.lock().unwrap(); if let Some(ref mut meta_statistics) = guard.meta_statistics { meta_statistics.failed_sub_compaction_jobs += 1; } return Err(err); } }; if compaction_result == CompactionOutcome::YieldForL0 { // We will permenantly give up a task if we yield for L0 compaction: the preempted subcompaction job won't be running // again. This ensures that we don't keep doing duplicated work within gc-compaction. Not directly returning here because // we need to clean things up before returning from the function. yield_for_l0 = true; } { let mut guard = self.inner.lock().unwrap(); if let Some(ref mut meta_statistics) = guard.meta_statistics { meta_statistics.succeeded_sub_compaction_jobs += 1; } } } GcCompactionQueueItem::Notify(id, l2_lsn) => { let below_lsn = { let mut guard = self.inner.lock().unwrap(); if let Some(ref mut meta_statistics) = guard.meta_statistics { meta_statistics.below_lsn } else { Lsn::INVALID } }; let layer_size = if below_lsn != Lsn::INVALID { self.collect_layer_below_lsn(timeline, below_lsn).await? } else { 0 }; { let mut guard = self.inner.lock().unwrap(); if let Some(ref mut meta_statistics) = guard.meta_statistics { meta_statistics.after_compaction_layer_size = layer_size; meta_statistics.finalize(); } } self.notify_and_unblock(id); if let Some(l2_lsn) = l2_lsn { let current_l2_lsn = timeline .get_gc_compaction_state() .map(|x| x.last_completed_lsn) .unwrap_or(Lsn::INVALID); if l2_lsn >= current_l2_lsn { info!("l2_lsn updated to {}", l2_lsn); timeline .update_gc_compaction_state(GcCompactionState { last_completed_lsn: l2_lsn, }) .map_err(CompactionError::Other)?; } else { warn!( "l2_lsn updated to {} but it is less than the current l2_lsn {}", l2_lsn, current_l2_lsn ); } } } } self.clear_running_job(); Ok(if yield_for_l0 { tracing::info!("give up gc-compaction: yield for L0 compaction"); CompactionOutcome::YieldForL0 } else if has_pending_tasks { CompactionOutcome::Pending } else { CompactionOutcome::Done }) } #[allow(clippy::type_complexity)] pub fn remaining_jobs( &self, ) -> ( Option<(GcCompactionJobId, GcCompactionQueueItem)>, VecDeque<(GcCompactionJobId, GcCompactionQueueItem)>, ) { let guard = self.inner.lock().unwrap(); (guard.running.clone(), guard.queued.clone()) } pub fn remaining_jobs_num(&self) -> usize { let guard = self.inner.lock().unwrap(); guard.queued.len() + if guard.running.is_some() { 1 } else { 0 } } } /// A job description for the gc-compaction job. This structure describes the rectangle range that the job will /// process. The exact layers that need to be compacted/rewritten will be generated when `compact_with_gc` gets /// called. #[derive(Debug, Clone)] pub(crate) struct GcCompactJob { pub dry_run: bool, /// The key range to be compacted. The compaction algorithm will only regenerate key-value pairs within this range /// [left inclusive, right exclusive), and other pairs will be rewritten into new files if necessary. pub compact_key_range: Range<Key>, /// The LSN range to be compacted. The compaction algorithm will use this range to determine the layers to be /// selected for the compaction, and it does not guarantee the generated layers will have exactly the same LSN range /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`]. /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here. pub compact_lsn_range: Range<Lsn>, /// See [`CompactOptions::gc_compaction_do_metadata_compaction`]. pub do_metadata_compaction: bool, } impl GcCompactJob { pub fn from_compact_options(options: CompactOptions) -> Self { GcCompactJob { dry_run: options.flags.contains(CompactFlags::DryRun), compact_key_range: options .compact_key_range .map(|x| x.into()) .unwrap_or(Key::MIN..Key::MAX), compact_lsn_range: options .compact_lsn_range .map(|x| x.into()) .unwrap_or(Lsn::INVALID..Lsn::MAX), do_metadata_compaction: options.gc_compaction_do_metadata_compaction, } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/walreceiver.rs
pageserver/src/tenant/timeline/walreceiver.rs
//! WAL receiver manages an open connection to safekeeper, to get the WAL it streams into. //! To do so, a current implementation needs to do the following: //! //! * acknowledge the timelines that it needs to stream WAL into. //! Pageserver is able to dynamically (un)load tenants on attach and detach, //! hence WAL receiver needs to react on such events. //! //! * get a broker subscription, stream data from it to determine that a timeline needs WAL streaming. //! For that, it watches specific keys in storage_broker and pulls the relevant data periodically. //! The data is produced by safekeepers, that push it periodically and pull it to synchronize between each other. //! Without this data, no WAL streaming is possible currently. //! //! Only one active WAL streaming connection is allowed at a time. //! The connection is supposed to be updated periodically, based on safekeeper timeline data. //! //! * handle the actual connection and WAL streaming //! //! Handling happens dynamically, by portions of WAL being processed and registered in the server. //! Along with the registration, certain metadata is written to show WAL streaming progress and rely on that when considering safekeepers for connection. //! //! The current module contains high-level primitives used in the submodules; general synchronization, timeline acknowledgement and shutdown logic. mod connection_manager; mod walreceiver_connection; use std::future::Future; use std::num::NonZeroU64; use std::sync::Arc; use std::time::Duration; use storage_broker::BrokerClientChannel; use tokio::sync::watch; use tokio_util::sync::CancellationToken; use tracing::*; use utils::postgres_client::PostgresClientProtocol; use self::connection_manager::ConnectionManagerStatus; use super::Timeline; use crate::context::{DownloadBehavior, RequestContext}; use crate::task_mgr::{TaskKind, WALRECEIVER_RUNTIME}; use crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::timeline::walreceiver::connection_manager::{ ConnectionManagerState, connection_manager_loop_step, }; #[derive(Clone)] pub struct WalReceiverConf { pub protocol: PostgresClientProtocol, /// The timeout on the connection to safekeeper for WAL streaming. pub wal_connect_timeout: Duration, /// The timeout to use to determine when the current connection is "stale" and reconnect to the other one. pub lagging_wal_timeout: Duration, /// The Lsn lag to use to determine when the current connection is lagging to much behind and reconnect to the other one. pub max_lsn_wal_lag: NonZeroU64, pub auth_token: Option<Arc<String>>, pub availability_zone: Option<String>, pub ingest_batch_size: u64, pub validate_wal_contiguity: bool, } pub struct WalReceiver { manager_status: Arc<std::sync::RwLock<Option<ConnectionManagerStatus>>>, /// All task spawned by [`WalReceiver::start`] and its children are sensitive to this token. /// It's a child token of [`Timeline`] so that timeline shutdown can cancel WalReceiver tasks early for `freeze_and_flush=true`. cancel: CancellationToken, } impl WalReceiver { pub fn start( timeline: Arc<Timeline>, conf: WalReceiverConf, mut broker_client: BrokerClientChannel, ctx: &RequestContext, ) -> Self { let tenant_shard_id = timeline.tenant_shard_id; let timeline_id = timeline.timeline_id; let walreceiver_ctx = ctx.detached_child(TaskKind::WalReceiverManager, DownloadBehavior::Error); let loop_status = Arc::new(std::sync::RwLock::new(None)); let manager_status = Arc::clone(&loop_status); let cancel = timeline.cancel.child_token(); let _task = WALRECEIVER_RUNTIME.spawn({ let cancel = cancel.clone(); async move { debug_assert_current_span_has_tenant_and_timeline_id(); // acquire timeline gate so we know the task doesn't outlive the Timeline let Ok(_guard) = timeline.gate.enter() else { debug!("WAL receiver manager could not enter the gate timeline gate, it's closed already"); return; }; debug!("WAL receiver manager started, connecting to broker"); let mut connection_manager_state = ConnectionManagerState::new( timeline, conf, cancel.clone(), ); while !cancel.is_cancelled() { let loop_step_result = connection_manager_loop_step( &mut broker_client, &mut connection_manager_state, &walreceiver_ctx, &cancel, &loop_status, ).await; match loop_step_result { Ok(()) => continue, Err(_cancelled) => { trace!("Connection manager loop ended, shutting down"); break; } } } connection_manager_state.shutdown().await; *loop_status.write().unwrap() = None; info!("task exits"); } .instrument(info_span!(parent: None, "wal_connection_manager", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), timeline_id = %timeline_id)) }); Self { manager_status, cancel, } } #[instrument(skip_all, level = tracing::Level::DEBUG)] pub async fn cancel(self) { debug_assert_current_span_has_tenant_and_timeline_id(); debug!("cancelling walreceiver tasks"); self.cancel.cancel(); } pub(crate) fn status(&self) -> Option<ConnectionManagerStatus> { self.manager_status.read().unwrap().clone() } } /// A handle of an asynchronous task. /// The task has a channel that it can use to communicate its lifecycle events in a certain form, see [`TaskEvent`] /// and a cancellation token that it can listen to for earlier interrupts. /// /// Note that the communication happens via the `watch` channel, that does not accumulate the events, replacing the old one with the never one on submission. /// That may lead to certain events not being observed by the listener. #[derive(Debug)] struct TaskHandle<E> { join_handle: Option<tokio::task::JoinHandle<anyhow::Result<()>>>, events_receiver: watch::Receiver<TaskStateUpdate<E>>, cancellation: CancellationToken, } enum TaskEvent<E> { Update(TaskStateUpdate<E>), End(anyhow::Result<()>), } #[derive(Debug, Clone)] enum TaskStateUpdate<E> { Started, Progress(E), } impl<E: Clone> TaskHandle<E> { /// Initializes the task, starting it immediately after the creation. /// /// The second argument to `task` is a child token of `cancel_parent` ([`CancellationToken::child_token`]). /// It being a child token enables us to provide a [`Self::shutdown`] method. fn spawn<Fut>( cancel_parent: &CancellationToken, task: impl FnOnce(watch::Sender<TaskStateUpdate<E>>, CancellationToken) -> Fut + Send + 'static, ) -> Self where Fut: Future<Output = anyhow::Result<()>> + Send, E: Send + Sync + 'static, { let cancellation = cancel_parent.child_token(); let (events_sender, events_receiver) = watch::channel(TaskStateUpdate::Started); let cancellation_clone = cancellation.clone(); let join_handle = WALRECEIVER_RUNTIME.spawn(async move { events_sender.send(TaskStateUpdate::Started).ok(); task(events_sender, cancellation_clone).await // events_sender is dropped at some point during the .await above. // But the task is still running on WALRECEIVER_RUNTIME. // That is the window when `!jh.is_finished()` // is true inside `fn next_task_event()` below. }); TaskHandle { join_handle: Some(join_handle), events_receiver, cancellation, } } /// # Cancel-Safety /// /// Cancellation-safe. async fn next_task_event(&mut self) -> TaskEvent<E> { match self.events_receiver.changed().await { Ok(()) => TaskEvent::Update((self.events_receiver.borrow()).clone()), Err(_task_channel_part_dropped) => { TaskEvent::End(match self.join_handle.as_mut() { Some(jh) => { if !jh.is_finished() { // See: https://github.com/neondatabase/neon/issues/2885 trace!("sender is dropped while join handle is still alive"); } let res = match jh.await { Ok(res) => res, Err(je) if je.is_cancelled() => unreachable!("not used"), Err(je) if je.is_panic() => { // already logged Ok(()) } Err(je) => Err(anyhow::Error::new(je).context("join walreceiver task")), }; // For cancellation-safety, drop join_handle only after successful .await. self.join_handle = None; res } None => { // Another option is to have an enum, join handle or result and give away the reference to it Err(anyhow::anyhow!("Task was joined more than once")) } }) } } } /// Aborts current task, waiting for it to finish. async fn shutdown(self) { if let Some(jh) = self.join_handle { self.cancellation.cancel(); match jh.await { Ok(Ok(())) => debug!("Shutdown success"), Ok(Err(e)) => error!("Shutdown task error: {e:?}"), Err(je) if je.is_cancelled() => unreachable!("not used"), Err(je) if je.is_panic() => { // already logged } Err(je) => { error!("Shutdown task join error: {je}") } } } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/import_pgdata.rs
pageserver/src/tenant/timeline/import_pgdata.rs
use std::sync::Arc; use anyhow::{Context, bail}; use importbucket_client::{ControlFile, RemoteStorageWrapper}; use pageserver_api::models::ShardImportStatus; use remote_storage::RemotePath; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tracing::info; use utils::lsn::Lsn; use utils::pausable_failpoint; use utils::sync::gate::Gate; use super::{Timeline, TimelineDeleteProgress}; use crate::context::RequestContext; use crate::controller_upcall_client::{StorageControllerUpcallApi, StorageControllerUpcallClient}; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; mod flow; mod importbucket_client; mod importbucket_format; pub(crate) mod index_part_format; pub struct ImportingTimeline { pub import_task_handle: JoinHandle<()>, pub import_task_gate: Gate, pub timeline: Arc<Timeline>, pub delete_progress: TimelineDeleteProgress, } impl std::fmt::Debug for ImportingTimeline { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ImportingTimeline<{}>", self.timeline.timeline_id) } } impl ImportingTimeline { pub async fn shutdown(&self) { self.import_task_handle.abort(); self.import_task_gate.close().await; self.timeline.remote_client.shutdown().await; } } pub async fn doit( timeline: &Arc<Timeline>, index_part: index_part_format::Root, ctx: &RequestContext, cancel: CancellationToken, ) -> anyhow::Result<()> { let index_part_format::Root::V1(v1) = index_part; let index_part_format::InProgress { location, idempotency_key: _, started_at: _, } = match v1 { index_part_format::V1::Done(_) => return Ok(()), index_part_format::V1::InProgress(in_progress) => in_progress, }; let storcon_client = StorageControllerUpcallClient::new(timeline.conf, &cancel); let shard_status = storcon_client .get_timeline_import_status( timeline.tenant_shard_id, timeline.timeline_id, timeline.generation, ) .await .map_err(|_err| anyhow::anyhow!("Shut down while getting timeline import status"))?; info!(?shard_status, "peeking shard status"); match shard_status { ShardImportStatus::InProgress(maybe_progress) => { let storage = importbucket_client::new(timeline.conf, &location, cancel.clone()).await?; let control_file_res = if maybe_progress.is_none() { // Only prepare the import once when there's no progress. prepare_import(timeline, storage.clone(), &cancel).await } else { storage.get_control_file().await }; let control_file = match control_file_res { Ok(cf) => cf, Err(err) => { return Err( terminate_flow_with_error(timeline, err, &storcon_client, &cancel).await, ); } }; let res = flow::run( timeline.clone(), control_file, storage.clone(), maybe_progress, ctx, ) .await; if let Err(err) = res { return Err( terminate_flow_with_error(timeline, err, &storcon_client, &cancel).await, ); } tracing::info!("Import plan executed. Flushing remote changes and notifying storcon"); timeline .remote_client .schedule_index_upload_for_file_changes()?; timeline.remote_client.wait_completion().await?; pausable_failpoint!("import-timeline-pre-success-notify-pausable"); // Communicate that shard is done. // Ensure at-least-once delivery of the upcall to storage controller // before we mark the task as done and never come here again. // // Note that we do not mark the import complete in the index part now. // This happens in [`Tenant::finalize_importing_timeline`] in response // to the storage controller calling // `/v1/tenant/:tenant_id/timeline/:timeline_id/activate_post_import`. storcon_client .put_timeline_import_status( timeline.tenant_shard_id, timeline.timeline_id, timeline.generation, ShardImportStatus::Done, ) .await .map_err(|_err| { anyhow::anyhow!("Shut down while putting timeline import status") })?; } ShardImportStatus::Error(err) => { info!( "shard status indicates that the shard is done (error), skipping import {}", err ); } ShardImportStatus::Done => { info!("shard status indicates that the shard is done (success), skipping import"); } } Ok(()) } async fn prepare_import( timeline: &Arc<Timeline>, storage: RemoteStorageWrapper, cancel: &CancellationToken, ) -> anyhow::Result<ControlFile> { // Wipe the slate clean before starting the import as a precaution. // This method is only called when there's no recorded checkpoint for the import // in the storage controller. // // Note that this is split-brain safe (two imports for same timeline shards running in // different generations) because we go through the usual deletion path, including deletion queue. info!("wipe the slate clean"); { // TODO: do we need to hold GC lock for this? let mut guard = timeline .layers .write(LayerManagerLockHolder::ImportPgData) .await; assert!( guard.layer_map()?.open_layer.is_none(), "while importing, there should be no in-memory layer" // this just seems like a good place to assert it ); let all_layers_keys = guard.all_persistent_layers(); let all_layers: Vec<_> = all_layers_keys .iter() .map(|key| guard.get_from_key(key)) .collect(); let open = guard.open_mut().context("open_mut")?; timeline.remote_client.schedule_gc_update(&all_layers)?; open.finish_gc_timeline(&all_layers); } // // Wait for pgdata to finish uploading // info!("wait for pgdata to reach status 'done'"); let status_prefix = RemotePath::from_string("status").unwrap(); let pgdata_status_key = status_prefix.join("pgdata"); loop { let res = async { let pgdata_status: Option<importbucket_format::PgdataStatus> = storage .get_json(&pgdata_status_key) .await .context("get pgdata status")?; info!(?pgdata_status, "peeking pgdata status"); if pgdata_status.map(|st| st.done).unwrap_or(false) { Ok(()) } else { Err(anyhow::anyhow!("pgdata not done yet")) } } .await; match res { Ok(_) => break, Err(_err) => { info!("indefinitely waiting for pgdata to finish"); if tokio::time::timeout(std::time::Duration::from_secs(10), cancel.cancelled()) .await .is_ok() { bail!("cancelled while waiting for pgdata"); } } } } let control_file = storage.get_control_file().await?; let base_lsn = control_file.base_lsn(); info!("update TimelineMetadata based on LSNs from control file"); { let pg_version = control_file.pg_version(); async move { // FIXME: The 'disk_consistent_lsn' should be the LSN at the *end* of the // checkpoint record, and prev_record_lsn should point to its beginning. // We should read the real end of the record from the WAL, but here we // just fake it. let disk_consistent_lsn = Lsn(base_lsn.0 + 8); let prev_record_lsn = base_lsn; let metadata = TimelineMetadata::new( disk_consistent_lsn, Some(prev_record_lsn), None, // no ancestor Lsn(0), // no ancestor lsn base_lsn, // latest_gc_cutoff_lsn base_lsn, // initdb_lsn pg_version, ); let _start_lsn = disk_consistent_lsn + 1; timeline .remote_client .schedule_index_upload_for_full_metadata_update(&metadata)?; timeline.remote_client.wait_completion().await?; anyhow::Ok(()) } } .await?; Ok(control_file) } async fn terminate_flow_with_error( timeline: &Arc<Timeline>, error: anyhow::Error, storcon_client: &StorageControllerUpcallClient, cancel: &CancellationToken, ) -> anyhow::Error { // The import task is a aborted on tenant shutdown, so in principle, it should // never be cancelled. To be on the safe side, check the cancellation tokens // before marking the import as failed. if !(cancel.is_cancelled() || timeline.cancel.is_cancelled()) { let notify_res = storcon_client .put_timeline_import_status( timeline.tenant_shard_id, timeline.timeline_id, timeline.generation, ShardImportStatus::Error(format!("{error:#}")), ) .await; if let Err(_notify_error) = notify_res { // The [`StorageControllerUpcallClient::put_timeline_import_status`] retries // forever internally, so errors returned by it can only be due to cancellation. info!("failed to notify storcon about permanent import error"); } // Will be logged by [`Tenant::create_timeline_import_pgdata_task`] error } else { anyhow::anyhow!("Import task cancelled") } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/analysis.rs
pageserver/src/tenant/timeline/analysis.rs
use std::collections::BTreeSet; use std::ops::Range; use utils::lsn::Lsn; use super::Timeline; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; #[derive(serde::Serialize)] pub(crate) struct RangeAnalysis { start: String, end: String, has_image: bool, num_of_deltas_above_image: usize, total_num_of_deltas: usize, num_of_l0: usize, } impl Timeline { pub(crate) async fn perf_info(&self) -> Vec<RangeAnalysis> { // First, collect all split points of the layers. let mut split_points = BTreeSet::new(); let mut delta_ranges = Vec::new(); let mut image_ranges = Vec::new(); let num_of_l0; let all_layer_files = { let guard = self .layers .read(LayerManagerLockHolder::GetLayerMapInfo) .await; num_of_l0 = guard.layer_map().unwrap().level0_deltas().len(); guard.all_persistent_layers() }; let lsn = self.get_last_record_lsn(); for key in all_layer_files { split_points.insert(key.key_range.start); split_points.insert(key.key_range.end); if key.is_delta { delta_ranges.push((key.key_range.clone(), key.lsn_range.clone())); } else { image_ranges.push((key.key_range.clone(), key.lsn_range.start)); } } // For each split range, compute the estimated read amplification. let split_points = split_points.into_iter().collect::<Vec<_>>(); let mut result = Vec::new(); for i in 0..(split_points.len() - 1) { let start = split_points[i]; let end = split_points[i + 1]; // Find the latest image layer that contains the information. let mut maybe_image_layers = image_ranges .iter() // We insert split points for all image layers, and therefore a `contains` check for the start point should be enough. .filter(|(key_range, img_lsn)| key_range.contains(&start) && img_lsn <= &lsn) .cloned() .collect::<Vec<_>>(); maybe_image_layers.sort_by(|a, b| a.1.cmp(&b.1)); let image_layer = maybe_image_layers.last().cloned(); let lsn_filter_start = image_layer .as_ref() .map(|(_, lsn)| *lsn) .unwrap_or(Lsn::INVALID); fn overlaps_with(lsn_range_a: &Range<Lsn>, lsn_range_b: &Range<Lsn>) -> bool { !(lsn_range_a.end <= lsn_range_b.start || lsn_range_a.start >= lsn_range_b.end) } let maybe_delta_layers = delta_ranges .iter() .filter(|(key_range, lsn_range)| { key_range.contains(&start) && overlaps_with(&(lsn_filter_start..lsn), lsn_range) }) .cloned() .collect::<Vec<_>>(); let pitr_delta_layers = delta_ranges .iter() .filter(|(key_range, _)| key_range.contains(&start)) .cloned() .collect::<Vec<_>>(); result.push(RangeAnalysis { start: start.to_string(), end: end.to_string(), has_image: image_layer.is_some(), num_of_deltas_above_image: maybe_delta_layers.len(), total_num_of_deltas: pitr_delta_layers.len(), num_of_l0, }); } result } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/logical_size.rs
pageserver/src/tenant/timeline/logical_size.rs
use std::sync::atomic::{AtomicBool, AtomicI64, Ordering as AtomicOrdering}; use anyhow::Context; use once_cell::sync::OnceCell; use tokio_util::sync::CancellationToken; use utils::lsn::Lsn; /// Internal structure to hold all data needed for logical size calculation. /// /// Calculation consists of two stages: /// /// 1. Initial size calculation. That might take a long time, because it requires /// reading all layers containing relation sizes at `initial_part_end`. /// /// 2. Collecting an incremental part and adding that to the initial size. /// Increments are appended on walreceiver writing new timeline data, /// which result in increase or decrease of the logical size. pub(super) struct LogicalSize { /// Size, potentially slow to compute. Calculating this might require reading multiple /// layers, and even ancestor's layers. /// /// NOTE: size at a given LSN is constant, but after a restart we will calculate /// the initial size at a different LSN. pub initial_logical_size: OnceCell<( u64, crate::metrics::initial_logical_size::FinishedCalculationGuard, )>, /// Cancellation for the best-effort logical size calculation. /// /// The token is kept in a once-cell so that we can error out if a higher priority /// request comes in *before* we have started the normal logical size calculation. pub(crate) cancel_wait_for_background_loop_concurrency_limit_semaphore: OnceCell<CancellationToken>, /// Once the initial logical size is initialized, this is notified. pub(crate) initialized: tokio::sync::Semaphore, /// Latest Lsn that has its size uncalculated, could be absent for freshly created timelines. pub initial_part_end: Option<Lsn>, /// All other size changes after startup, combined together. /// /// Size shouldn't ever be negative, but this is signed for two reasons: /// /// 1. If we initialized the "baseline" size lazily, while we already /// process incoming WAL, the incoming WAL records could decrement the /// variable and temporarily make it negative. (This is just future-proofing; /// the initialization is currently not done lazily.) /// /// 2. If there is a bug and we e.g. forget to increment it in some cases /// when size grows, but remember to decrement it when it shrinks again, the /// variable could go negative. In that case, it seems better to at least /// try to keep tracking it, rather than clamp or overflow it. Note that /// get_current_logical_size() will clamp the returned value to zero if it's /// negative, and log an error. Could set it permanently to zero or some /// special value to indicate "broken" instead, but this will do for now. /// /// Note that we also expose a copy of this value as a prometheus metric, /// see `current_logical_size_gauge`. Use the `update_current_logical_size` /// to modify this, it will also keep the prometheus metric in sync. pub size_added_after_initial: AtomicI64, /// For [`crate::metrics::initial_logical_size::TIMELINES_WHERE_WALRECEIVER_GOT_APPROXIMATE_SIZE`]. pub(super) did_return_approximate_to_walreceiver: AtomicBool, } /// Normalized current size, that the data in pageserver occupies. #[derive(Debug, Clone, Copy)] pub(crate) enum CurrentLogicalSize { /// The size is not yet calculated to the end, this is an intermediate result, /// constructed from walreceiver increments and normalized: logical data could delete some objects, hence be negative, /// yet total logical size cannot be below 0. Approximate(Approximate), // Fully calculated logical size, only other future walreceiver increments are changing it, and those changes are // available for observation without any calculations. Exact(Exact), } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub(crate) enum Accuracy { Approximate, Exact, } #[derive(Debug, Clone, Copy)] pub(crate) struct Approximate(u64); #[derive(Debug, Clone, Copy)] pub(crate) struct Exact(u64); impl From<&Approximate> for u64 { fn from(value: &Approximate) -> Self { value.0 } } impl From<&Exact> for u64 { fn from(val: &Exact) -> Self { val.0 } } impl Approximate { /// For use in situations where we don't have a sane logical size value but need /// to return something, e.g. in HTTP API on shard >0 of a sharded tenant. pub(crate) fn zero() -> Self { Self(0) } } impl CurrentLogicalSize { pub(crate) fn size_dont_care_about_accuracy(&self) -> u64 { match self { Self::Approximate(size) => size.into(), Self::Exact(size) => size.into(), } } pub(crate) fn accuracy(&self) -> Accuracy { match self { Self::Approximate(_) => Accuracy::Approximate, Self::Exact(_) => Accuracy::Exact, } } pub(crate) fn is_exact(&self) -> bool { matches!(self, Self::Exact(_)) } } impl LogicalSize { pub(super) fn empty_initial() -> Self { Self { initial_logical_size: OnceCell::with_value((0, { crate::metrics::initial_logical_size::START_CALCULATION .first(crate::metrics::initial_logical_size::StartCircumstances::EmptyInitial) .calculation_result_saved() })), cancel_wait_for_background_loop_concurrency_limit_semaphore: OnceCell::new(), initial_part_end: None, size_added_after_initial: AtomicI64::new(0), did_return_approximate_to_walreceiver: AtomicBool::new(false), initialized: tokio::sync::Semaphore::new(0), } } pub(super) fn deferred_initial(compute_to: Lsn) -> Self { Self { initial_logical_size: OnceCell::new(), cancel_wait_for_background_loop_concurrency_limit_semaphore: OnceCell::new(), initial_part_end: Some(compute_to), size_added_after_initial: AtomicI64::new(0), did_return_approximate_to_walreceiver: AtomicBool::new(false), initialized: tokio::sync::Semaphore::new(0), } } pub(super) fn current_size(&self) -> CurrentLogicalSize { let size_increment: i64 = self.size_added_after_initial.load(AtomicOrdering::Acquire); // ^^^ keep this type explicit so that the casts in this function break if // we change the type. match self.initial_logical_size.get() { Some((initial_size, _)) => { CurrentLogicalSize::Exact(Exact(initial_size.checked_add_signed(size_increment) .with_context(|| format!("Overflow during logical size calculation, initial_size: {initial_size}, size_increment: {size_increment}")) .unwrap())) } None => { let non_negative_size_increment = u64::try_from(size_increment).unwrap_or(0); CurrentLogicalSize::Approximate(Approximate(non_negative_size_increment)) } } } pub(super) fn increment_size(&self, delta: i64) { self.size_added_after_initial .fetch_add(delta, AtomicOrdering::SeqCst); } /// Make the value computed by initial logical size computation /// available for re-use. This doesn't contain the incremental part. pub(super) fn initialized_size(&self, lsn: Lsn) -> Option<u64> { match self.initial_part_end { Some(v) if v == lsn => self.initial_logical_size.get().map(|(s, _)| *s), _ => None, } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/detach_ancestor.rs
pageserver/src/tenant/timeline/detach_ancestor.rs
use std::collections::HashSet; use std::sync::Arc; use anyhow::Context; use bytes::Bytes; use http_utils::error::ApiError; use pageserver_api::key::Key; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::DetachBehavior; use pageserver_api::models::detach_ancestor::AncestorDetached; use pageserver_api::shard::ShardIdentity; use pageserver_compaction::helpers::overlaps_with; use tokio::sync::Semaphore; use tokio_util::sync::CancellationToken; use tracing::Instrument; use utils::completion; use utils::generation::Generation; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::sync::gate::GateError; use super::layer_manager::{LayerManager, LayerManagerLockHolder}; use super::{FlushLayerError, Timeline}; use crate::context::{DownloadBehavior, RequestContext}; use crate::task_mgr::TaskKind; use crate::tenant::TenantShard; use crate::tenant::remote_timeline_client::index::GcBlockingReason::DetachAncestor; use crate::tenant::storage_layer::layer::local_layer_path; use crate::tenant::storage_layer::{ AsLayerDesc as _, DeltaLayerWriter, ImageLayerWriter, IoConcurrency, Layer, ResidentLayer, ValuesReconstructState, }; use crate::tenant::timeline::VersionedKeySpaceQuery; use crate::virtual_file::{MaybeFatalIo, VirtualFile}; #[derive(Debug, thiserror::Error)] pub(crate) enum Error { #[error("no ancestors")] NoAncestor, #[error("too many ancestors")] TooManyAncestors, #[error("ancestor is not empty")] AncestorNotEmpty, #[error("shutting down, please retry later")] ShuttingDown, #[error("archived: {}", .0)] Archived(TimelineId), #[error(transparent)] NotFound(crate::tenant::GetTimelineError), #[error("failed to reparent all candidate timelines, please retry")] FailedToReparentAll, #[error("ancestor is already being detached by: {}", .0)] OtherTimelineDetachOngoing(TimelineId), #[error("preparing to timeline ancestor detach failed")] Prepare(#[source] anyhow::Error), #[error("detaching and reparenting failed")] DetachReparent(#[source] anyhow::Error), #[error("completing ancestor detach failed")] Complete(#[source] anyhow::Error), #[error("failpoint: {}", .0)] Failpoint(&'static str), } impl Error { /// Try to catch cancellation from within the `anyhow::Error`, or wrap the anyhow as the given /// variant or fancier `or_else`. fn launder<F>(e: anyhow::Error, or_else: F) -> Error where F: Fn(anyhow::Error) -> Error, { use remote_storage::TimeoutOrCancel; use crate::tenant::remote_timeline_client::WaitCompletionError; use crate::tenant::upload_queue::NotInitialized; if e.is::<NotInitialized>() || TimeoutOrCancel::caused_by_cancel(&e) || e.downcast_ref::<remote_storage::DownloadError>() .is_some_and(|e| e.is_cancelled()) || e.is::<WaitCompletionError>() { Error::ShuttingDown } else { or_else(e) } } } impl From<Error> for ApiError { fn from(value: Error) -> Self { match value { Error::NoAncestor => ApiError::Conflict(value.to_string()), Error::TooManyAncestors | Error::AncestorNotEmpty => { ApiError::BadRequest(anyhow::anyhow!("{value}")) } Error::ShuttingDown => ApiError::ShuttingDown, Error::Archived(_) => ApiError::BadRequest(anyhow::anyhow!("{value}")), Error::OtherTimelineDetachOngoing(_) | Error::FailedToReparentAll => { ApiError::ResourceUnavailable(value.to_string().into()) } Error::NotFound(e) => ApiError::from(e), // these variants should have no cancellation errors because of Error::launder Error::Prepare(_) | Error::DetachReparent(_) | Error::Complete(_) | Error::Failpoint(_) => ApiError::InternalServerError(value.into()), } } } impl From<crate::tenant::upload_queue::NotInitialized> for Error { fn from(_: crate::tenant::upload_queue::NotInitialized) -> Self { // treat all as shutting down signals, even though that is not entirely correct // (uninitialized state) Error::ShuttingDown } } impl From<super::layer_manager::Shutdown> for Error { fn from(_: super::layer_manager::Shutdown) -> Self { Error::ShuttingDown } } pub(crate) enum Progress { Prepared(Attempt, PreparedTimelineDetach), Done(AncestorDetached), } pub(crate) struct PreparedTimelineDetach { layers: Vec<Layer>, } // TODO: this should be part of PageserverConf because we cannot easily modify cplane arguments. #[derive(Debug)] pub(crate) struct Options { pub(crate) rewrite_concurrency: std::num::NonZeroUsize, pub(crate) copy_concurrency: std::num::NonZeroUsize, } impl Default for Options { fn default() -> Self { Self { rewrite_concurrency: std::num::NonZeroUsize::new(2).unwrap(), copy_concurrency: std::num::NonZeroUsize::new(100).unwrap(), } } } /// Represents an across tenant reset exclusive single attempt to detach ancestor. #[derive(Debug)] pub(crate) struct Attempt { pub(crate) timeline_id: TimelineId, pub(crate) ancestor_timeline_id: TimelineId, pub(crate) ancestor_lsn: Lsn, _guard: completion::Completion, gate_entered: Option<utils::sync::gate::GateGuard>, } impl Attempt { pub(crate) fn before_reset_tenant(&mut self) { let taken = self.gate_entered.take(); assert!(taken.is_some()); } pub(crate) fn new_barrier(&self) -> completion::Barrier { self._guard.barrier() } } pub(crate) async fn generate_tombstone_image_layer( detached: &Arc<Timeline>, ancestor: &Arc<Timeline>, ancestor_lsn: Lsn, historic_layers_to_copy: &Vec<Layer>, ctx: &RequestContext, ) -> Result<Option<ResidentLayer>, Error> { tracing::info!( "removing non-inherited keys by writing an image layer with tombstones at the detach LSN" ); let io_concurrency = IoConcurrency::spawn_from_conf( detached.conf.get_vectored_concurrent_io, detached.gate.enter().map_err(|_| Error::ShuttingDown)?, ); let mut reconstruct_state = ValuesReconstructState::new(io_concurrency); // Directly use `get_vectored_impl` to skip the max_vectored_read_key limit check. Note that the keyspace should // not contain too many keys, otherwise this takes a lot of memory. Currently we limit it to 10k keys in the compute. let key_range = Key::sparse_non_inherited_keyspace(); // avoid generating a "future layer" which will then be removed let image_lsn = ancestor_lsn; { for layer in historic_layers_to_copy { let desc = layer.layer_desc(); if !desc.is_delta && desc.lsn_range.start == image_lsn && overlaps_with(&key_range, &desc.key_range) { tracing::info!( layer=%layer, "will copy tombstone from ancestor instead of creating a new one" ); return Ok(None); } } let layers = detached .layers .read(LayerManagerLockHolder::DetachAncestor) .await; for layer in layers.all_persistent_layers() { if !layer.is_delta && layer.lsn_range.start == image_lsn && overlaps_with(&key_range, &layer.key_range) { tracing::warn!( layer=%layer, "image layer at the detach LSN already exists, skipping removing aux files" ); return Ok(None); } } } let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key_range.clone()), image_lsn); let data = ancestor .get_vectored_impl(query, &mut reconstruct_state, ctx) .await .context("failed to retrieve aux keys") .map_err(|e| Error::launder(e, Error::Prepare))?; if !data.is_empty() { // TODO: is it possible that we can have an image at `image_lsn`? Unlikely because image layers are only generated // upon compaction but theoretically possible. let mut image_layer_writer = ImageLayerWriter::new( detached.conf, detached.timeline_id, detached.tenant_shard_id, &key_range, image_lsn, &detached.gate, detached.cancel.clone(), ctx, ) .await .context("failed to create image layer writer") .map_err(Error::Prepare)?; for key in data.keys() { image_layer_writer .put_image(*key, Bytes::new(), ctx) .await .context("failed to write key") .map_err(|e| Error::launder(e, Error::Prepare))?; } let (desc, path) = image_layer_writer .finish(ctx) .await .context("failed to finish image layer writer for removing the metadata keys") .map_err(|e| Error::launder(e, Error::Prepare))?; let generated = Layer::finish_creating(detached.conf, detached, desc, &path) .map_err(|e| Error::launder(e, Error::Prepare))?; detached .remote_client .upload_layer_file(&generated, &detached.cancel) .await .map_err(|e| Error::launder(e, Error::Prepare))?; tracing::info!(layer=%generated, "wrote image layer"); Ok(Some(generated)) } else { tracing::info!("no aux keys found in ancestor"); Ok(None) } } /// See [`Timeline::prepare_to_detach_from_ancestor`] pub(super) async fn prepare( detached: &Arc<Timeline>, tenant: &TenantShard, behavior: DetachBehavior, options: Options, ctx: &RequestContext, ) -> Result<Progress, Error> { use Error::*; let Some((mut ancestor, mut ancestor_lsn)) = detached .ancestor_timeline .as_ref() .map(|tl| (tl.clone(), detached.ancestor_lsn)) else { let ancestor_id; let ancestor_lsn; let still_in_progress = { let accessor = detached.remote_client.initialized_upload_queue()?; // we are safe to inspect the latest uploaded, because we can only witness this after // restart is complete and ancestor is no more. let latest = accessor.latest_uploaded_index_part(); let Some((id, lsn)) = latest.lineage.detached_previous_ancestor() else { return Err(NoAncestor); }; ancestor_id = id; ancestor_lsn = lsn; latest .gc_blocking .as_ref() .is_some_and(|b| b.blocked_by(DetachAncestor)) }; if still_in_progress { // gc is still blocked, we can still reparent and complete. // we are safe to reparent remaining, because they were locked in in the beginning. let attempt = continue_with_blocked_gc(detached, tenant, ancestor_id, ancestor_lsn).await?; // because the ancestor of detached is already set to none, we have published all // of the layers, so we are still "prepared." return Ok(Progress::Prepared( attempt, PreparedTimelineDetach { layers: Vec::new() }, )); } let reparented_timelines = reparented_direct_children(detached, tenant)?; return Ok(Progress::Done(AncestorDetached { reparented_timelines, })); }; if detached.is_archived() != Some(false) { return Err(Archived(detached.timeline_id)); } if !ancestor_lsn.is_valid() { // rare case, probably wouldn't even load tracing::error!("ancestor is set, but ancestor_lsn is invalid, this timeline needs fixing"); return Err(NoAncestor); } check_no_archived_children_of_ancestor(tenant, detached, &ancestor, ancestor_lsn, behavior)?; if let DetachBehavior::MultiLevelAndNoReparent = behavior { // If the ancestor has an ancestor, we might be able to fast-path detach it if the current ancestor does not have any data written/used by the detaching timeline. while let Some(ancestor_of_ancestor) = ancestor.ancestor_timeline.clone() { if ancestor_lsn != ancestor.ancestor_lsn { // non-technical requirement; we could flatten still if ancestor LSN does not match but that needs // us to copy and cut more layers. return Err(AncestorNotEmpty); } // Use the ancestor of the ancestor as the new ancestor (only when the ancestor LSNs are the same) ancestor_lsn = ancestor.ancestor_lsn; // Get the LSN first before resetting the `ancestor` variable ancestor = ancestor_of_ancestor; // TODO: do we still need to check if we don't want to reparent? check_no_archived_children_of_ancestor( tenant, detached, &ancestor, ancestor_lsn, behavior, )?; } } else if ancestor.ancestor_timeline.is_some() { // non-technical requirement; we could flatten N ancestors just as easily but we chose // not to, at least initially return Err(TooManyAncestors); } tracing::info!( "attempt to detach the timeline from the ancestor: {}@{}, behavior={:?}", ancestor.timeline_id, ancestor_lsn, behavior ); let attempt = start_new_attempt(detached, tenant, ancestor.timeline_id, ancestor_lsn).await?; utils::pausable_failpoint!("timeline-detach-ancestor::before_starting_after_locking-pausable"); fail::fail_point!( "timeline-detach-ancestor::before_starting_after_locking", |_| Err(Error::Failpoint( "timeline-detach-ancestor::before_starting_after_locking" )) ); if ancestor_lsn >= ancestor.get_disk_consistent_lsn() { let span = tracing::info_span!("freeze_and_flush", ancestor_timeline_id=%ancestor.timeline_id); async { let started_at = std::time::Instant::now(); let freeze_and_flush = ancestor.freeze_and_flush0(); let mut freeze_and_flush = std::pin::pin!(freeze_and_flush); let res = tokio::time::timeout(std::time::Duration::from_secs(1), &mut freeze_and_flush) .await; let res = match res { Ok(res) => res, Err(_elapsed) => { tracing::info!("freezing and flushing ancestor is still ongoing"); freeze_and_flush.await } }; res.map_err(|e| { use FlushLayerError::*; match e { Cancelled | NotRunning(_) => { // FIXME(#6424): technically statically unreachable right now, given how we never // drop the sender Error::ShuttingDown } CreateImageLayersError(_) | Other(_) => Error::Prepare(e.into()), } })?; // we do not need to wait for uploads to complete but we do need `struct Layer`, // copying delta prefix is unsupported currently for `InMemoryLayer`. tracing::info!( elapsed_ms = started_at.elapsed().as_millis(), "froze and flushed the ancestor" ); Ok::<_, Error>(()) } .instrument(span) .await?; } let end_lsn = ancestor_lsn + 1; let (filtered_layers, straddling_branchpoint, rest_of_historic) = { // we do not need to start from our layers, because they can only be layers that come // *after* ancestor_lsn let layers = tokio::select! { guard = ancestor.layers.read(LayerManagerLockHolder::DetachAncestor) => guard, _ = detached.cancel.cancelled() => { return Err(ShuttingDown); } _ = ancestor.cancel.cancelled() => { return Err(ShuttingDown); } }; // between retries, these can change if compaction or gc ran in between. this will mean // we have to redo work. partition_work(ancestor_lsn, &layers)? }; // TODO: layers are already sorted by something: use that to determine how much of remote // copies are already done -- gc is blocked, but a compaction could had happened on ancestor, // which is something to keep in mind if copy skipping is implemented. tracing::info!(filtered=%filtered_layers, to_rewrite = straddling_branchpoint.len(), historic=%rest_of_historic.len(), "collected layers"); // TODO: copying and lsn prefix copying could be done at the same time with a single fsync after let mut new_layers: Vec<Layer> = Vec::with_capacity(straddling_branchpoint.len() + rest_of_historic.len() + 1); if let Some(tombstone_layer) = generate_tombstone_image_layer(detached, &ancestor, ancestor_lsn, &rest_of_historic, ctx) .await? { new_layers.push(tombstone_layer.into()); } { tracing::info!(to_rewrite = %straddling_branchpoint.len(), "copying prefix of delta layers"); let mut tasks = tokio::task::JoinSet::new(); let mut wrote_any = false; let limiter = Arc::new(Semaphore::new(options.rewrite_concurrency.get())); for layer in straddling_branchpoint { let limiter = limiter.clone(); let timeline = detached.clone(); let ctx = ctx.detached_child(TaskKind::DetachAncestor, DownloadBehavior::Download); let span = tracing::info_span!("upload_rewritten_layer", %layer); tasks.spawn( async move { let _permit = limiter.acquire().await; let copied = upload_rewritten_layer(end_lsn, &layer, &timeline, &timeline.cancel, &ctx) .await?; if let Some(copied) = copied.as_ref() { tracing::info!(%copied, "rewrote and uploaded"); } Ok(copied) } .instrument(span), ); } while let Some(res) = tasks.join_next().await { match res { Ok(Ok(Some(copied))) => { wrote_any = true; new_layers.push(copied); } Ok(Ok(None)) => {} Ok(Err(e)) => return Err(e), Err(je) => return Err(Error::Prepare(je.into())), } } // FIXME: the fsync should be mandatory, after both rewrites and copies if wrote_any { fsync_timeline_dir(detached, ctx).await; } } let mut tasks = tokio::task::JoinSet::new(); let limiter = Arc::new(Semaphore::new(options.copy_concurrency.get())); let cancel_eval = CancellationToken::new(); for adopted in rest_of_historic { let limiter = limiter.clone(); let timeline = detached.clone(); let cancel_eval = cancel_eval.clone(); tasks.spawn( async move { let _permit = tokio::select! { permit = limiter.acquire() => { permit } // Wait for the cancellation here instead of letting the entire task be cancelled. // Cancellations are racy in that they might leave layers on disk. _ = cancel_eval.cancelled() => { Err(Error::ShuttingDown)? } }; let (owned, did_hardlink) = remote_copy( &adopted, &timeline, timeline.generation, timeline.shard_identity, &timeline.cancel, ) .await?; tracing::info!(layer=%owned, did_hard_link=%did_hardlink, "remote copied"); Ok((owned, did_hardlink)) } .in_current_span(), ); } fn delete_layers(timeline: &Timeline, layers: Vec<Layer>) -> Result<(), Error> { // We are deleting layers, so we must hold the gate let _gate = timeline.gate.enter().map_err(|e| match e { GateError::GateClosed => Error::ShuttingDown, })?; { layers.into_iter().for_each(|l: Layer| { l.delete_on_drop(); std::mem::drop(l); }); } Ok(()) } let mut should_fsync = false; let mut first_err = None; while let Some(res) = tasks.join_next().await { match res { Ok(Ok((owned, did_hardlink))) => { if did_hardlink { should_fsync = true; } new_layers.push(owned); } // Don't stop the evaluation on errors, so that we get the full set of hardlinked layers to delete. Ok(Err(failed)) => { cancel_eval.cancel(); first_err.get_or_insert(failed); } Err(je) => { cancel_eval.cancel(); first_err.get_or_insert(Error::Prepare(je.into())); } } } if let Some(failed) = first_err { delete_layers(detached, new_layers)?; return Err(failed); } // fsync directory again if we hardlinked something if should_fsync { fsync_timeline_dir(detached, ctx).await; } let prepared = PreparedTimelineDetach { layers: new_layers }; Ok(Progress::Prepared(attempt, prepared)) } async fn start_new_attempt( detached: &Timeline, tenant: &TenantShard, ancestor_timeline_id: TimelineId, ancestor_lsn: Lsn, ) -> Result<Attempt, Error> { let attempt = obtain_exclusive_attempt(detached, tenant, ancestor_timeline_id, ancestor_lsn)?; // insert the block in the index_part.json, if not already there. let _dont_care = tenant .gc_block .insert( detached, crate::tenant::remote_timeline_client::index::GcBlockingReason::DetachAncestor, ) .await .map_err(|e| Error::launder(e, Error::Prepare))?; Ok(attempt) } async fn continue_with_blocked_gc( detached: &Timeline, tenant: &TenantShard, ancestor_timeline_id: TimelineId, ancestor_lsn: Lsn, ) -> Result<Attempt, Error> { // FIXME: it would be nice to confirm that there is an in-memory version, since we've just // verified there is a persistent one? obtain_exclusive_attempt(detached, tenant, ancestor_timeline_id, ancestor_lsn) } fn obtain_exclusive_attempt( detached: &Timeline, tenant: &TenantShard, ancestor_timeline_id: TimelineId, ancestor_lsn: Lsn, ) -> Result<Attempt, Error> { use Error::{OtherTimelineDetachOngoing, ShuttingDown}; // ensure we are the only active attempt for this tenant let (guard, barrier) = completion::channel(); { let mut guard = tenant.ongoing_timeline_detach.lock().unwrap(); if let Some((tl, other)) = guard.as_ref() { if !other.is_ready() { return Err(OtherTimelineDetachOngoing(*tl)); } // FIXME: no test enters here } *guard = Some((detached.timeline_id, barrier)); } // ensure the gate is still open let _gate_entered = detached.gate.enter().map_err(|_| ShuttingDown)?; Ok(Attempt { timeline_id: detached.timeline_id, ancestor_timeline_id, ancestor_lsn, _guard: guard, gate_entered: Some(_gate_entered), }) } fn reparented_direct_children( detached: &Arc<Timeline>, tenant: &TenantShard, ) -> Result<HashSet<TimelineId>, Error> { let mut all_direct_children = tenant .timelines .lock() .unwrap() .values() .filter_map(|tl| { let is_direct_child = matches!(tl.ancestor_timeline.as_ref(), Some(ancestor) if Arc::ptr_eq(ancestor, detached)); if is_direct_child { Some(tl.clone()) } else { if let Some(timeline) = tl.ancestor_timeline.as_ref() { assert_ne!(timeline.timeline_id, detached.timeline_id, "we cannot have two timelines with the same timeline_id live"); } None } }) // Collect to avoid lock taking order problem with Tenant::timelines and // Timeline::remote_client .collect::<Vec<_>>(); let mut any_shutdown = false; all_direct_children.retain(|tl| match tl.remote_client.initialized_upload_queue() { Ok(accessor) => accessor .latest_uploaded_index_part() .lineage .is_reparented(), Err(_shutdownalike) => { // not 100% a shutdown, but let's bail early not to give inconsistent results in // sharded enviroment. any_shutdown = true; true } }); if any_shutdown { // it could be one or many being deleted; have client retry return Err(Error::ShuttingDown); } Ok(all_direct_children .into_iter() .map(|tl| tl.timeline_id) .collect()) } fn partition_work( ancestor_lsn: Lsn, source: &LayerManager, ) -> Result<(usize, Vec<Layer>, Vec<Layer>), Error> { let mut straddling_branchpoint = vec![]; let mut rest_of_historic = vec![]; let mut later_by_lsn = 0; for desc in source.layer_map()?.iter_historic_layers() { // off by one chances here: // - start is inclusive // - end is exclusive if desc.lsn_range.start > ancestor_lsn { later_by_lsn += 1; continue; } let target = if desc.lsn_range.start <= ancestor_lsn && desc.lsn_range.end > ancestor_lsn && desc.is_delta { // TODO: image layer at Lsn optimization &mut straddling_branchpoint } else { &mut rest_of_historic }; target.push(source.get_from_desc(&desc)); } Ok((later_by_lsn, straddling_branchpoint, rest_of_historic)) } async fn upload_rewritten_layer( end_lsn: Lsn, layer: &Layer, target: &Arc<Timeline>, cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<Option<Layer>, Error> { let copied = copy_lsn_prefix(end_lsn, layer, target, ctx).await?; let Some(copied) = copied else { return Ok(None); }; target .remote_client .upload_layer_file(&copied, cancel) .await .map_err(|e| Error::launder(e, Error::Prepare))?; Ok(Some(copied.into())) } async fn copy_lsn_prefix( end_lsn: Lsn, layer: &Layer, target_timeline: &Arc<Timeline>, ctx: &RequestContext, ) -> Result<Option<ResidentLayer>, Error> { if target_timeline.cancel.is_cancelled() { return Err(Error::ShuttingDown); } tracing::debug!(%layer, %end_lsn, "copying lsn prefix"); let mut writer = DeltaLayerWriter::new( target_timeline.conf, target_timeline.timeline_id, target_timeline.tenant_shard_id, layer.layer_desc().key_range.start, layer.layer_desc().lsn_range.start..end_lsn, &target_timeline.gate, target_timeline.cancel.clone(), ctx, ) .await .with_context(|| format!("prepare to copy lsn prefix of ancestors {layer}")) .map_err(Error::Prepare)?; let resident = layer.download_and_keep_resident(ctx).await.map_err(|e| { if e.is_cancelled() { Error::ShuttingDown } else { Error::Prepare(e.into()) } })?; let records = resident .copy_delta_prefix(&mut writer, end_lsn, ctx) .await .with_context(|| format!("copy lsn prefix of ancestors {layer}")) .map_err(Error::Prepare)?; drop(resident); tracing::debug!(%layer, records, "copied records"); if records == 0 { drop(writer); // TODO: we might want to store an empty marker in remote storage for this // layer so that we will not needlessly walk `layer` on repeated attempts. Ok(None) } else { // reuse the key instead of adding more holes between layers by using the real // highest key in the layer. let reused_highest_key = layer.layer_desc().key_range.end; let (desc, path) = writer .finish(reused_highest_key, ctx) .await .map_err(Error::Prepare)?; let copied = Layer::finish_creating(target_timeline.conf, target_timeline, desc, &path) .map_err(Error::Prepare)?; tracing::debug!(%layer, %copied, "new layer produced"); Ok(Some(copied)) } } /// Creates a new Layer instance for the adopted layer, and ensures it is found in the remote /// storage on successful return. without the adopted layer being added to `index_part.json`. /// Returns (Layer, did hardlink) async fn remote_copy( adopted: &Layer, adoptee: &Arc<Timeline>, generation: Generation, shard_identity: ShardIdentity, cancel: &CancellationToken, ) -> Result<(Layer, bool), Error> { let mut metadata = adopted.metadata(); debug_assert!(metadata.generation <= generation); metadata.generation = generation; metadata.shard = shard_identity.shard_index(); let conf = adoptee.conf; let file_name = adopted.layer_desc().layer_name(); // We don't want to shut the timeline down during this operation because we do `delete_on_drop` below let _gate = adoptee.gate.enter().map_err(|e| match e { GateError::GateClosed => Error::ShuttingDown, })?; // depending if Layer::keep_resident, do a hardlink let did_hardlink; let owned = if let Some(adopted_resident) = adopted.keep_resident().await { let adopted_path = adopted_resident.local_path(); let adoptee_path = local_layer_path( conf, &adoptee.tenant_shard_id, &adoptee.timeline_id, &file_name, &metadata.generation, ); match std::fs::hard_link(adopted_path, &adoptee_path) { Ok(()) => {} Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => { // In theory we should not get into this situation as we are doing cleanups of the layer file after errors. // However, we don't do cleanups for errors past `prepare`, so there is the slight chance to get to this branch. // Double check that the file is orphan (probably from an earlier attempt), then delete it let key = file_name.clone().into(); if adoptee .layers .read(LayerManagerLockHolder::DetachAncestor) .await .contains_key(&key) { // We are supposed to filter out such cases before coming to this function return Err(Error::Prepare(anyhow::anyhow!( "layer file {file_name} already present and inside layer map" ))); } tracing::info!("Deleting orphan layer file to make way for hard linking"); // Delete orphan layer file and try again, to ensure this layer has a well understood source std::fs::remove_file(&adoptee_path) .map_err(|e| Error::launder(e.into(), Error::Prepare))?; std::fs::hard_link(adopted_path, &adoptee_path) .map_err(|e| Error::launder(e.into(), Error::Prepare))?; } Err(e) => { return Err(Error::launder(e.into(), Error::Prepare)); } }; did_hardlink = true; Layer::for_resident(conf, adoptee, adoptee_path, file_name, metadata).drop_eviction_guard() } else { did_hardlink = false; Layer::for_evicted(conf, adoptee, file_name, metadata) }; let layer = match adoptee .remote_client .copy_timeline_layer(adopted, &owned, cancel) .await { Ok(()) => owned, Err(e) => { {
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/span.rs
pageserver/src/tenant/timeline/span.rs
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/offload.rs
pageserver/src/tenant/timeline/offload.rs
use std::sync::Arc; use pageserver_api::models::{TenantState, TimelineState}; use super::Timeline; use super::delete::{DeletionGuard, delete_local_timeline_directory}; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::remote_timeline_client::ShutdownIfArchivedError; use crate::tenant::timeline::delete::{TimelineDeleteGuardKind, make_timeline_delete_guard}; use crate::tenant::{ DeleteTimelineError, OffloadedTimeline, TenantManifestError, TenantShard, TimelineOrOffloaded, }; #[derive(thiserror::Error, Debug)] pub(crate) enum OffloadError { #[error("Cancelled")] Cancelled, #[error("Timeline is not archived")] NotArchived, #[error("Offload or deletion already in progress")] AlreadyInProgress, #[error("Unexpected offload error: {0}")] Other(anyhow::Error), } impl From<TenantManifestError> for OffloadError { fn from(e: TenantManifestError) -> Self { match e { TenantManifestError::Cancelled => Self::Cancelled, TenantManifestError::RemoteStorage(e) => Self::Other(e), } } } pub(crate) async fn offload_timeline( tenant: &TenantShard, timeline: &Arc<Timeline>, ) -> Result<(), OffloadError> { debug_assert_current_span_has_tenant_and_timeline_id(); tracing::info!("offloading archived timeline"); let delete_guard_res = make_timeline_delete_guard( tenant, timeline.timeline_id, TimelineDeleteGuardKind::Offload, ); let (timeline, guard) = match delete_guard_res { Ok(timeline_and_guard) => timeline_and_guard, Err(DeleteTimelineError::HasChildren(children)) => { let is_archived = timeline.is_archived(); if is_archived == Some(true) { tracing::error!("timeline is archived but has non-archived children: {children:?}"); return Err(OffloadError::NotArchived); } tracing::info!( ?is_archived, "timeline is not archived and has unarchived children" ); return Err(OffloadError::NotArchived); } Err(DeleteTimelineError::AlreadyInProgress(_)) => { tracing::info!("timeline offload or deletion already in progress"); return Err(OffloadError::AlreadyInProgress); } Err(e) => return Err(OffloadError::Other(anyhow::anyhow!(e))), }; let TimelineOrOffloaded::Timeline(timeline) = timeline else { tracing::error!("timeline already offloaded, but given timeline object"); return Ok(()); }; match timeline.remote_client.shutdown_if_archived().await { Ok(()) => {} Err(ShutdownIfArchivedError::NotInitialized(_)) => { // Either the timeline is being deleted, the operation is being retried, or we are shutting down. // Don't return cancelled here to keep it idempotent. } Err(ShutdownIfArchivedError::NotArchived) => return Err(OffloadError::NotArchived), } timeline.set_state(TimelineState::Stopping); // Now that the Timeline is in Stopping state, request all the related tasks to shut down. timeline.shutdown(super::ShutdownMode::Reload).await; // TODO extend guard mechanism above with method // to make deletions possible while offloading is in progress let conf = &tenant.conf; delete_local_timeline_directory(conf, tenant.tenant_shard_id, &timeline).await; let remaining_refcount = remove_timeline_from_tenant(tenant, &timeline, &guard); { let mut offloaded_timelines = tenant.timelines_offloaded.lock().unwrap(); if matches!( tenant.current_state(), TenantState::Stopping { .. } | TenantState::Broken { .. } ) { // Cancel the operation if the tenant is shutting down. Do this while the // timelines_offloaded lock is held to prevent a race with Tenant::shutdown // for defusing the lock return Err(OffloadError::Cancelled); } offloaded_timelines.insert( timeline.timeline_id, Arc::new( OffloadedTimeline::from_timeline(&timeline) .expect("we checked above that timeline was ready"), ), ); } // Last step: mark timeline as offloaded in S3 // TODO: maybe move this step above, right above deletion of the local timeline directory, // then there is no potential race condition where we partially offload a timeline, and // at the next restart attach it again. // For that to happen, we'd need to make the manifest reflect our *intended* state, // not our actual state of offloaded timelines. tenant.maybe_upload_tenant_manifest().await?; tracing::info!("Timeline offload complete (remaining arc refcount: {remaining_refcount})"); Ok(()) } /// It is important that this gets called when DeletionGuard is being held. /// For more context see comments in [`make_timeline_delete_guard`] /// /// Returns the strong count of the timeline `Arc` fn remove_timeline_from_tenant( tenant: &TenantShard, timeline: &Timeline, _: &DeletionGuard, // using it as a witness ) -> usize { // Remove the timeline from the map. let mut timelines = tenant.timelines.lock().unwrap(); let children_exist = timelines .iter() .any(|(_, entry)| entry.get_ancestor_timeline_id() == Some(timeline.timeline_id)); // XXX this can happen because `branch_timeline` doesn't check `TimelineState::Stopping`. // We already deleted the layer files, so it's probably best to panic. // (Ideally, above remove_dir_all is atomic so we don't see this timeline after a restart) if children_exist { panic!("Timeline grew children while we removed layer files"); } let timeline = timelines .remove(&timeline.timeline_id) .expect("timeline that we were deleting was concurrently removed from 'timelines' map"); // Clear the compaction queue for this timeline tenant .scheduled_compaction_tasks .lock() .unwrap() .remove(&timeline.timeline_id); Arc::strong_count(&timeline) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/init.rs
pageserver/src/tenant/timeline/init.rs
use std::collections::{HashMap, hash_map}; use std::str::FromStr; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use utils::lsn::Lsn; use crate::is_temporary; use crate::tenant::ephemeral_file::is_ephemeral_file; use crate::tenant::remote_timeline_client::index::{IndexPart, LayerFileMetadata}; use crate::tenant::remote_timeline_client::{self}; use crate::tenant::storage_layer::LayerName; /// Identified files in the timeline directory. pub(super) enum Discovered { /// The only one we care about Layer(LayerName, LocalLayerFileMetadata), /// Old ephmeral files from previous launches, should be removed Ephemeral(String), /// Old temporary timeline files, unsure what these really are, should be removed Temporary(String), /// Temporary on-demand download files, should be removed TemporaryDownload(String), /// Backup file from previously future layers IgnoredBackup(Utf8PathBuf), /// Unrecognized, warn about these Unknown(String), } /// Scans the timeline directory for interesting files. pub(super) fn scan_timeline_dir(path: &Utf8Path) -> anyhow::Result<Vec<Discovered>> { let mut ret = Vec::new(); for direntry in path.read_dir_utf8()? { let direntry = direntry?; let file_name = direntry.file_name().to_string(); let discovered = match LayerName::from_str(&file_name) { Ok(file_name) => { let file_size = direntry.metadata()?.len(); Discovered::Layer( file_name, LocalLayerFileMetadata::new(direntry.path().to_owned(), file_size), ) } Err(_) => { if file_name.ends_with(".old") { // ignore these Discovered::IgnoredBackup(direntry.path().to_owned()) } else if remote_timeline_client::is_temp_download_file(direntry.path()) { Discovered::TemporaryDownload(file_name) } else if is_ephemeral_file(&file_name) { Discovered::Ephemeral(file_name) } else if is_temporary(direntry.path()) { Discovered::Temporary(file_name) } else { Discovered::Unknown(file_name) } } }; ret.push(discovered); } Ok(ret) } /// Whereas `LayerFileMetadata` describes the metadata we would store in remote storage, /// this structure extends it with metadata describing the layer's presence in local storage. #[derive(Clone, Debug)] pub(super) struct LocalLayerFileMetadata { pub(super) file_size: u64, pub(super) local_path: Utf8PathBuf, } impl LocalLayerFileMetadata { pub fn new(local_path: Utf8PathBuf, file_size: u64) -> Self { Self { local_path, file_size, } } } /// For a layer that is present in remote metadata, this type describes how to handle /// it during startup: it is either Resident (and we have some metadata about a local file), /// or it is Evicted (and we only have remote metadata). #[derive(Clone, Debug)] pub(super) enum Decision { /// The layer is not present locally. Evicted(LayerFileMetadata), /// The layer is present locally, and metadata matches: we may hook up this layer to the /// existing file in local storage. Resident { local: LocalLayerFileMetadata, remote: LayerFileMetadata, }, } /// A layer needs to be left out of the layer map. #[derive(Debug)] pub(super) enum DismissedLayer { /// The related layer is is in future compared to disk_consistent_lsn, it must not be loaded. Future { /// `None` if the layer is only known through [`IndexPart`]. local: Option<LocalLayerFileMetadata>, }, /// The layer only exists locally. /// /// In order to make crash safe updates to layer map, we must dismiss layers which are only /// found locally or not yet included in the remote `index_part.json`. LocalOnly(LocalLayerFileMetadata), /// The layer exists in remote storage but the local layer's metadata (e.g. file size) /// does not match it BadMetadata(LocalLayerFileMetadata), } /// Merges local discoveries and remote [`IndexPart`] to a collection of decisions. pub(super) fn reconcile( local_layers: Vec<(LayerName, LocalLayerFileMetadata)>, index_part: &IndexPart, disk_consistent_lsn: Lsn, ) -> Vec<(LayerName, Result<Decision, DismissedLayer>)> { let mut result = Vec::new(); let mut remote_layers = HashMap::new(); // Construct Decisions for layers that are found locally, if they're in remote metadata. Otherwise // construct DismissedLayers to get rid of them. for (layer_name, local_metadata) in local_layers { let Some(remote_metadata) = index_part.layer_metadata.get(&layer_name) else { result.push((layer_name, Err(DismissedLayer::LocalOnly(local_metadata)))); continue; }; if remote_metadata.file_size != local_metadata.file_size { result.push((layer_name, Err(DismissedLayer::BadMetadata(local_metadata)))); continue; } remote_layers.insert( layer_name, Decision::Resident { local: local_metadata, remote: remote_metadata.clone(), }, ); } // Construct Decision for layers that were not found locally index_part .layer_metadata .iter() .for_each(|(name, metadata)| { if let hash_map::Entry::Vacant(entry) = remote_layers.entry(name.clone()) { entry.insert(Decision::Evicted(metadata.clone())); } }); // For layers that were found in authoritative remote metadata, apply a final check that they are within // the disk_consistent_lsn. result.extend(remote_layers.into_iter().map(|(name, decision)| { if name.is_in_future(disk_consistent_lsn) { match decision { Decision::Evicted(_remote) => (name, Err(DismissedLayer::Future { local: None })), Decision::Resident { local, remote: _remote, } => (name, Err(DismissedLayer::Future { local: Some(local) })), } } else { (name, Ok(decision)) } })); result } pub(super) fn cleanup(path: &Utf8Path, kind: &str) -> anyhow::Result<()> { let file_name = path.file_name().expect("must be file path"); tracing::debug!(kind, ?file_name, "cleaning up"); std::fs::remove_file(path).with_context(|| format!("failed to remove {kind} at {path}")) } pub(super) fn cleanup_local_file_for_remote(local: &LocalLayerFileMetadata) -> anyhow::Result<()> { let local_size = local.file_size; let path = &local.local_path; let file_name = path.file_name().expect("must be file path"); tracing::warn!( "removing local file {file_name:?} because it has unexpected length {local_size};" ); std::fs::remove_file(path).with_context(|| format!("failed to remove layer at {path}")) } pub(super) fn cleanup_future_layer( path: &Utf8Path, name: &LayerName, disk_consistent_lsn: Lsn, ) -> anyhow::Result<()> { // future image layers are allowed to be produced always for not yet flushed to disk // lsns stored in InMemoryLayer. let kind = name.kind(); tracing::info!("found future {kind} layer {name} disk_consistent_lsn is {disk_consistent_lsn}"); std::fs::remove_file(path)?; Ok(()) } pub(super) fn cleanup_local_only_file( name: &LayerName, local: &LocalLayerFileMetadata, ) -> anyhow::Result<()> { let kind = name.kind(); tracing::info!( "found local-only {kind} layer {name} size {}", local.file_size ); std::fs::remove_file(&local.local_path)?; Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/uninit.rs
pageserver/src/tenant/timeline/uninit.rs
use std::collections::hash_map::Entry; use std::fs; use std::future::Future; use std::sync::Arc; use anyhow::Context; use camino::Utf8PathBuf; use tracing::{error, info, info_span}; use utils::fs_ext; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::sync::gate::GateGuard; use super::Timeline; use crate::context::RequestContext; use crate::import_datadir; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::{ CreateTimelineError, CreateTimelineIdempotency, TenantShard, TimelineOrOffloaded, }; /// A timeline with some of its files on disk, being initialized. /// This struct ensures the atomicity of the timeline init: it's either properly created and inserted into pageserver's memory, or /// its local files are removed. If we crash while this class exists, then the timeline's local /// state is cleaned up during [`TenantShard::clean_up_timelines`], because the timeline's content isn't in remote storage. /// /// The caller is responsible for proper timeline data filling before the final init. #[must_use] pub struct UninitializedTimeline<'t> { pub(crate) owning_tenant: &'t TenantShard, timeline_id: TimelineId, raw_timeline: Option<(Arc<Timeline>, TimelineCreateGuard)>, /// Whether we spawned the inner Timeline's tasks such that we must later shut it down /// if aborting the timeline creation needs_shutdown: bool, } impl<'t> UninitializedTimeline<'t> { pub(crate) fn new( owning_tenant: &'t TenantShard, timeline_id: TimelineId, raw_timeline: Option<(Arc<Timeline>, TimelineCreateGuard)>, ) -> Self { Self { owning_tenant, timeline_id, raw_timeline, needs_shutdown: false, } } /// When writing data to this timeline during creation, use this wrapper: it will take care of /// setup of Timeline tasks required for I/O (flush loop) and making sure they are torn down /// later. pub(crate) async fn write<F, Fut>(&mut self, f: F) -> anyhow::Result<()> where F: FnOnce(Arc<Timeline>) -> Fut, Fut: Future<Output = Result<(), CreateTimelineError>>, { debug_assert_current_span_has_tenant_and_timeline_id(); // Remember that we did I/O (spawned the flush loop), so that we can check we shut it down on drop self.needs_shutdown = true; let timeline = self.raw_timeline()?; // Spawn flush loop so that the Timeline is ready to accept writes timeline.maybe_spawn_flush_loop(); // Invoke the provided function, which will write some data into the new timeline if let Err(e) = f(timeline.clone()).await { self.abort().await; return Err(e.into()); } // Flush the underlying timeline's ephemeral layers to disk if let Err(e) = timeline .freeze_and_flush() .await .context("Failed to flush after timeline creation writes") { self.abort().await; return Err(e); } Ok(()) } pub(crate) async fn abort(&self) { if let Some((raw_timeline, _)) = self.raw_timeline.as_ref() { raw_timeline.shutdown(super::ShutdownMode::Hard).await; } } /// Finish timeline creation: insert it into the Tenant's timelines map /// /// This function launches the flush loop if not already done. /// /// The caller is responsible for activating the timeline (function `.activate()`). pub(crate) async fn finish_creation(mut self) -> anyhow::Result<Arc<Timeline>> { let timeline_id = self.timeline_id; let tenant_shard_id = self.owning_tenant.tenant_shard_id; if self.raw_timeline.is_none() { self.abort().await; return Err(anyhow::anyhow!( "No timeline for initialization found for {tenant_shard_id}/{timeline_id}" )); } // Check that the caller initialized disk_consistent_lsn let new_disk_consistent_lsn = self .raw_timeline .as_ref() .expect("checked above") .0 .get_disk_consistent_lsn(); if !new_disk_consistent_lsn.is_valid() { self.abort().await; return Err(anyhow::anyhow!( "new timeline {tenant_shard_id}/{timeline_id} has invalid disk_consistent_lsn" )); } let mut timelines = self.owning_tenant.timelines.lock().unwrap(); match timelines.entry(timeline_id) { Entry::Occupied(_) => { // Unexpected, bug in the caller. Tenant is responsible for preventing concurrent creation of the same timeline. // // We do not call Self::abort here. Because we don't cleanly shut down our Timeline, [`Self::drop`] should // skip trying to delete the timeline directory too. anyhow::bail!( "Found freshly initialized timeline {tenant_shard_id}/{timeline_id} in the tenant map" ) } Entry::Vacant(v) => { // after taking here should be no fallible operations, because the drop guard will not // cleanup after and would block for example the tenant deletion let (new_timeline, _create_guard) = self.raw_timeline.take().expect("already checked"); v.insert(Arc::clone(&new_timeline)); new_timeline.maybe_spawn_flush_loop(); Ok(new_timeline) } } } pub(crate) fn finish_creation_myself(&mut self) -> (Arc<Timeline>, TimelineCreateGuard) { self.raw_timeline.take().expect("already checked") } /// Prepares timeline data by loading it from the basebackup archive. pub(crate) async fn import_basebackup_from_tar( mut self, tenant: Arc<TenantShard>, copyin_read: &mut (impl tokio::io::AsyncRead + Send + Sync + Unpin), base_lsn: Lsn, broker_client: storage_broker::BrokerClientChannel, ctx: &RequestContext, ) -> anyhow::Result<Arc<Timeline>> { self.write(|raw_timeline| async move { import_datadir::import_basebackup_from_tar(&raw_timeline, copyin_read, base_lsn, ctx) .await .context("Failed to import basebackup") .map_err(CreateTimelineError::Other)?; fail::fail_point!("before-checkpoint-new-timeline", |_| { Err(CreateTimelineError::Other(anyhow::anyhow!( "failpoint before-checkpoint-new-timeline" ))) }); Ok(()) }) .await?; // All the data has been imported. Insert the Timeline into the tenant's timelines map let tl = self.finish_creation().await?; tl.activate(tenant, broker_client, None, ctx); Ok(tl) } pub(crate) fn raw_timeline(&self) -> anyhow::Result<&Arc<Timeline>> { Ok(&self .raw_timeline .as_ref() .with_context(|| { format!( "No raw timeline {}/{} found", self.owning_tenant.tenant_shard_id, self.timeline_id ) })? .0) } } impl Drop for UninitializedTimeline<'_> { fn drop(&mut self) { if let Some((timeline, create_guard)) = self.raw_timeline.take() { let _entered = info_span!("drop_uninitialized_timeline", tenant_id = %self.owning_tenant.tenant_shard_id.tenant_id, shard_id = %self.owning_tenant.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id).entered(); if self.needs_shutdown && !timeline.gate.close_complete() { // This should not happen: caller should call [`Self::abort`] on failures tracing::warn!( "Timeline not shut down after initialization failure, cannot clean up files" ); } else { // This is unusual, but can happen harmlessly if the pageserver is stopped while // creating a timeline. info!("Timeline got dropped without initializing, cleaning its files"); cleanup_timeline_directory(create_guard); } } } } pub(crate) fn cleanup_timeline_directory(create_guard: TimelineCreateGuard) { let timeline_path = &create_guard.timeline_path; match fs_ext::ignore_absent_files(|| fs::remove_dir_all(timeline_path)) { Ok(()) => { info!("Timeline dir {timeline_path:?} removed successfully") } Err(e) => { error!("Failed to clean up uninitialized timeline directory {timeline_path:?}: {e:?}") } } // Having cleaned up, we can release this TimelineId in `[TenantShard::timelines_creating]` to allow other // timeline creation attempts under this TimelineId to proceed drop(create_guard); } /// A guard for timeline creations in process: as long as this object exists, the timeline ID /// is kept in `[TenantShard::timelines_creating]` to exclude concurrent attempts to create the same timeline. #[must_use] pub(crate) struct TimelineCreateGuard { pub(crate) _tenant_gate_guard: GateGuard, pub(crate) owning_tenant: Arc<TenantShard>, pub(crate) timeline_id: TimelineId, pub(crate) timeline_path: Utf8PathBuf, pub(crate) idempotency: CreateTimelineIdempotency, } /// Errors when acquiring exclusive access to a timeline ID for creation #[derive(thiserror::Error, Debug)] pub(crate) enum TimelineExclusionError { #[error("Already exists")] AlreadyExists { existing: TimelineOrOffloaded, arg: CreateTimelineIdempotency, }, #[error("Already creating")] AlreadyCreating, #[error("Shutting down")] ShuttingDown, // e.g. I/O errors, or some failure deep in postgres initdb #[error(transparent)] Other(#[from] anyhow::Error), } impl TimelineCreateGuard { pub(crate) fn new( owning_tenant: &Arc<TenantShard>, timeline_id: TimelineId, timeline_path: Utf8PathBuf, idempotency: CreateTimelineIdempotency, allow_offloaded: bool, ) -> Result<Self, TimelineExclusionError> { let _tenant_gate_guard = owning_tenant .gate .enter() .map_err(|_| TimelineExclusionError::ShuttingDown)?; // Lock order: this is the only place we take both locks. During drop() we only // lock creating_timelines let timelines = owning_tenant.timelines.lock().unwrap(); let timelines_offloaded = owning_tenant.timelines_offloaded.lock().unwrap(); let mut creating_timelines: std::sync::MutexGuard< '_, std::collections::HashSet<TimelineId>, > = owning_tenant.timelines_creating.lock().unwrap(); if let Some(existing) = timelines.get(&timeline_id) { return Err(TimelineExclusionError::AlreadyExists { existing: TimelineOrOffloaded::Timeline(existing.clone()), arg: idempotency, }); } if !allow_offloaded { if let Some(existing) = timelines_offloaded.get(&timeline_id) { return Err(TimelineExclusionError::AlreadyExists { existing: TimelineOrOffloaded::Offloaded(existing.clone()), arg: idempotency, }); } } if creating_timelines.contains(&timeline_id) { return Err(TimelineExclusionError::AlreadyCreating); } creating_timelines.insert(timeline_id); drop(creating_timelines); drop(timelines_offloaded); drop(timelines); Ok(Self { _tenant_gate_guard, owning_tenant: Arc::clone(owning_tenant), timeline_id, timeline_path, idempotency, }) } } impl Drop for TimelineCreateGuard { fn drop(&mut self) { self.owning_tenant .timelines_creating .lock() .unwrap() .remove(&self.timeline_id); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/layer_manager.rs
pageserver/src/tenant/timeline/layer_manager.rs
use std::collections::HashMap; use std::mem::ManuallyDrop; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use std::time::Duration; use anyhow::{Context, bail, ensure}; use itertools::Itertools; use pageserver_api::keyspace::KeySpace; use pageserver_api::shard::TenantShardId; use tokio_util::sync::CancellationToken; use tracing::trace; use utils::id::TimelineId; use utils::lsn::{AtomicLsn, Lsn}; use super::{LayerFringe, ReadableLayer, TimelineWriterState}; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::metrics::TimelineMetrics; use crate::tenant::layer_map::{BatchedUpdates, LayerMap, SearchResult}; use crate::tenant::storage_layer::{ AsLayerDesc, InMemoryLayer, Layer, LayerVisibilityHint, PersistentLayerDesc, PersistentLayerKey, ReadableLayerWeak, ResidentLayer, }; /// Warn if the lock was held for longer than this threshold. /// It's very generous and we should bring this value down over time. const LAYER_MANAGER_LOCK_WARN_THRESHOLD: Duration = Duration::from_secs(5); const LAYER_MANAGER_LOCK_READ_WARN_THRESHOLD: Duration = Duration::from_secs(30); /// Describes the operation that is holding the layer manager lock #[derive(Debug, Clone, Copy, strum_macros::Display)] #[strum(serialize_all = "kebab_case")] pub(crate) enum LayerManagerLockHolder { GetLayerMapInfo, GenerateHeatmap, GetPage, Init, LoadLayerMap, GetLayerForWrite, TryFreezeLayer, FlushFrozenLayer, FlushLoop, Compaction, GarbageCollection, Shutdown, ImportPgData, DetachAncestor, Eviction, ComputeImageConsistentLsn, #[cfg(test)] Testing, } /// Wrapper for the layer manager that tracks the amount of time during which /// it was held under read or write lock #[derive(Default)] pub(crate) struct LockedLayerManager { locked: tokio::sync::RwLock<LayerManager>, } pub(crate) struct LayerManagerReadGuard<'a> { guard: ManuallyDrop<tokio::sync::RwLockReadGuard<'a, LayerManager>>, acquired_at: std::time::Instant, holder: LayerManagerLockHolder, } pub(crate) struct LayerManagerWriteGuard<'a> { guard: ManuallyDrop<tokio::sync::RwLockWriteGuard<'a, LayerManager>>, acquired_at: std::time::Instant, holder: LayerManagerLockHolder, } impl Drop for LayerManagerReadGuard<'_> { fn drop(&mut self) { // Drop the lock first, before potentially warning if it was held for too long. // SAFETY: ManuallyDrop in Drop implementation unsafe { ManuallyDrop::drop(&mut self.guard) }; let held_for = self.acquired_at.elapsed(); if held_for >= LAYER_MANAGER_LOCK_READ_WARN_THRESHOLD { tracing::warn!( holder=%self.holder, "Layer manager read lock held for {}s", held_for.as_secs_f64(), ); } } } impl Drop for LayerManagerWriteGuard<'_> { fn drop(&mut self) { // Drop the lock first, before potentially warning if it was held for too long. // SAFETY: ManuallyDrop in Drop implementation unsafe { ManuallyDrop::drop(&mut self.guard) }; let held_for = self.acquired_at.elapsed(); if held_for >= LAYER_MANAGER_LOCK_WARN_THRESHOLD { tracing::warn!( holder=%self.holder, "Layer manager write lock held for {}s", held_for.as_secs_f64(), ); } } } impl Deref for LayerManagerReadGuard<'_> { type Target = LayerManager; fn deref(&self) -> &Self::Target { self.guard.deref() } } impl Deref for LayerManagerWriteGuard<'_> { type Target = LayerManager; fn deref(&self) -> &Self::Target { self.guard.deref() } } impl DerefMut for LayerManagerWriteGuard<'_> { fn deref_mut(&mut self) -> &mut Self::Target { self.guard.deref_mut() } } impl LockedLayerManager { pub(crate) async fn read(&self, holder: LayerManagerLockHolder) -> LayerManagerReadGuard { let guard = ManuallyDrop::new(self.locked.read().await); LayerManagerReadGuard { guard, acquired_at: std::time::Instant::now(), holder, } } pub(crate) fn try_read( &self, holder: LayerManagerLockHolder, ) -> Result<LayerManagerReadGuard, tokio::sync::TryLockError> { let guard = ManuallyDrop::new(self.locked.try_read()?); Ok(LayerManagerReadGuard { guard, acquired_at: std::time::Instant::now(), holder, }) } pub(crate) async fn write(&self, holder: LayerManagerLockHolder) -> LayerManagerWriteGuard { let guard = ManuallyDrop::new(self.locked.write().await); LayerManagerWriteGuard { guard, acquired_at: std::time::Instant::now(), holder, } } pub(crate) fn try_write( &self, holder: LayerManagerLockHolder, ) -> Result<LayerManagerWriteGuard, tokio::sync::TryLockError> { let guard = ManuallyDrop::new(self.locked.try_write()?); Ok(LayerManagerWriteGuard { guard, acquired_at: std::time::Instant::now(), holder, }) } } /// Provides semantic APIs to manipulate the layer map. pub(crate) enum LayerManager { /// Open as in not shutdown layer manager; we still have in-memory layers and we can manipulate /// the layers. Open(OpenLayerManager), /// Shutdown layer manager where there are no more in-memory layers and persistent layers are /// read-only. Closed { layers: HashMap<PersistentLayerKey, Layer>, }, } impl Default for LayerManager { fn default() -> Self { LayerManager::Open(OpenLayerManager::default()) } } impl LayerManager { fn upgrade(&self, weak: ReadableLayerWeak) -> ReadableLayer { match weak { ReadableLayerWeak::PersistentLayer(desc) => { ReadableLayer::PersistentLayer(self.get_from_desc(&desc)) } ReadableLayerWeak::InMemoryLayer(desc) => { let inmem = self .layer_map() .expect("no concurrent shutdown") .in_memory_layer(&desc); ReadableLayer::InMemoryLayer(inmem) } } } pub(crate) fn get_from_key(&self, key: &PersistentLayerKey) -> Layer { // The assumption for the `expect()` is that all code maintains the following invariant: // A layer's descriptor is present in the LayerMap => the LayerFileManager contains a layer for the descriptor. self.try_get_from_key(key) .with_context(|| format!("get layer from key: {key}")) .expect("not found") .clone() } pub(crate) fn try_get_from_key(&self, key: &PersistentLayerKey) -> Option<&Layer> { self.layers().get(key) } pub(crate) fn get_from_desc(&self, desc: &PersistentLayerDesc) -> Layer { self.get_from_key(&desc.key()) } /// Get an immutable reference to the layer map. /// /// We expect users only to be able to get an immutable layer map. If users want to make modifications, /// they should use the below semantic APIs. This design makes us step closer to immutable storage state. pub(crate) fn layer_map(&self) -> Result<&LayerMap, Shutdown> { use LayerManager::*; match self { Open(OpenLayerManager { layer_map, .. }) => Ok(layer_map), Closed { .. } => Err(Shutdown), } } pub(crate) fn open_mut(&mut self) -> Result<&mut OpenLayerManager, Shutdown> { use LayerManager::*; match self { Open(open) => Ok(open), Closed { .. } => Err(Shutdown), } } /// LayerManager shutdown. The in-memory layers do cleanup on drop, so we must drop them in /// order to allow shutdown to complete. /// /// If there was a want to flush in-memory layers, it must have happened earlier. pub(crate) fn shutdown(&mut self, writer_state: &mut Option<TimelineWriterState>) { use LayerManager::*; match self { Open(OpenLayerManager { layer_map, layer_fmgr: LayerFileManager(hashmap), }) => { // NB: no need to decrement layer metrics; metrics are removed on timeline shutdown. let open = layer_map.open_layer.take(); let frozen = layer_map.frozen_layers.len(); let taken_writer_state = writer_state.take(); tracing::info!(open = open.is_some(), frozen, "dropped inmemory layers"); let layers = std::mem::take(hashmap); *self = Closed { layers }; assert_eq!(open.is_some(), taken_writer_state.is_some()); } Closed { .. } => { tracing::debug!("ignoring multiple shutdowns on layer manager") } } } /// Sum up the historic layer sizes pub(crate) fn layer_size_sum(&self) -> u64 { self.layers() .values() .map(|l| l.layer_desc().file_size) .sum() } pub(crate) fn likely_resident_layers(&self) -> impl Iterator<Item = &'_ Layer> + '_ { self.layers().values().filter(|l| l.is_likely_resident()) } pub(crate) fn visible_layers(&self) -> impl Iterator<Item = &'_ Layer> + '_ { self.layers() .values() .filter(|l| l.visibility() == LayerVisibilityHint::Visible) } pub(crate) fn contains(&self, layer: &Layer) -> bool { self.contains_key(&layer.layer_desc().key()) } pub(crate) fn contains_key(&self, key: &PersistentLayerKey) -> bool { self.layers().contains_key(key) } pub(crate) fn all_persistent_layers(&self) -> Vec<PersistentLayerKey> { self.layers().keys().cloned().collect_vec() } /// Update the [`LayerFringe`] of a read request /// /// Take a key space at a given LSN and query the layer map below each range /// of the key space to find the next layers to visit. pub(crate) fn update_search_fringe( &self, keyspace: &KeySpace, cont_lsn: Lsn, fringe: &mut LayerFringe, ) -> Result<(), Shutdown> { let map = self.layer_map()?; for range in keyspace.ranges.iter() { let results = map.range_search(range.clone(), cont_lsn); results .found .into_iter() .map(|(SearchResult { layer, lsn_floor }, keyspace_accum)| { ( self.upgrade(layer), keyspace_accum.to_keyspace(), lsn_floor..cont_lsn, ) }) .for_each(|(layer, keyspace, lsn_range)| fringe.update(layer, keyspace, lsn_range)); } Ok(()) } fn layers(&self) -> &HashMap<PersistentLayerKey, Layer> { use LayerManager::*; match self { Open(OpenLayerManager { layer_fmgr, .. }) => &layer_fmgr.0, Closed { layers } => layers, } } } #[derive(Default)] pub(crate) struct OpenLayerManager { layer_map: LayerMap, layer_fmgr: LayerFileManager<Layer>, } impl std::fmt::Debug for OpenLayerManager { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpenLayerManager") .field("layer_count", &self.layer_fmgr.0.len()) .finish() } } #[derive(Debug, thiserror::Error)] #[error("layer manager has been shutdown")] pub(crate) struct Shutdown; impl OpenLayerManager { /// Called from `load_layer_map`. Initialize the layer manager with: /// 1. all on-disk layers /// 2. next open layer (with disk disk_consistent_lsn LSN) pub(crate) fn initialize_local_layers(&mut self, layers: Vec<Layer>, next_open_layer_at: Lsn) { let mut updates = self.layer_map.batch_update(); for layer in layers { Self::insert_historic_layer(layer, &mut updates, &mut self.layer_fmgr); } updates.flush(); self.layer_map.next_open_layer_at = Some(next_open_layer_at); } /// Initialize when creating a new timeline, called in `init_empty_layer_map`. pub(crate) fn initialize_empty(&mut self, next_open_layer_at: Lsn) { self.layer_map.next_open_layer_at = Some(next_open_layer_at); } /// Open a new writable layer to append data if there is no open layer, otherwise return the /// current open layer, called within `get_layer_for_write`. #[allow(clippy::too_many_arguments)] pub(crate) async fn get_layer_for_write( &mut self, lsn: Lsn, conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, gate: &utils::sync::gate::Gate, cancel: &CancellationToken, ctx: &RequestContext, ) -> anyhow::Result<Arc<InMemoryLayer>> { ensure!(lsn.is_aligned()); // Do we have a layer open for writing already? let layer = if let Some(open_layer) = &self.layer_map.open_layer { if open_layer.get_lsn_range().start > lsn { bail!( "unexpected open layer in the future: open layers starts at {}, write lsn {}", open_layer.get_lsn_range().start, lsn ); } Arc::clone(open_layer) } else { // No writeable layer yet. Create one. let start_lsn = self .layer_map .next_open_layer_at .context("No next open layer found")?; trace!( "creating in-memory layer at {}/{} for record at {}", timeline_id, start_lsn, lsn ); let new_layer = InMemoryLayer::create( conf, timeline_id, tenant_shard_id, start_lsn, gate, cancel, ctx, ) .await?; let layer = Arc::new(new_layer); self.layer_map.open_layer = Some(layer.clone()); self.layer_map.next_open_layer_at = None; layer }; Ok(layer) } /// Tries to freeze an open layer and also manages clearing the TimelineWriterState. /// /// Returns true if anything was frozen. pub(super) async fn try_freeze_in_memory_layer( &mut self, lsn: Lsn, last_freeze_at: &AtomicLsn, write_lock: &mut tokio::sync::MutexGuard<'_, Option<TimelineWriterState>>, metrics: &TimelineMetrics, ) -> bool { let Lsn(last_record_lsn) = lsn; let end_lsn = Lsn(last_record_lsn + 1); let froze = if let Some(open_layer) = &self.layer_map.open_layer { let open_layer_rc = Arc::clone(open_layer); open_layer.freeze(end_lsn).await; // Increment the frozen layer metrics. This is decremented in `finish_flush_l0_layer()`. // TODO: It would be nicer to do this via `InMemoryLayer::drop()`, but it requires a // reference to the timeline metrics. Other methods use a metrics borrow as well. metrics.inc_frozen_layer(open_layer); // The layer is no longer open, update the layer map to reflect this. // We will replace it with on-disk historics below. self.layer_map.frozen_layers.push_back(open_layer_rc); self.layer_map.open_layer = None; self.layer_map.next_open_layer_at = Some(end_lsn); true } else { false }; // Even if there was no layer to freeze, advance last_freeze_at to last_record_lsn+1: this // accounts for regions in the LSN range where we might have ingested no data due to sharding. last_freeze_at.store(end_lsn); // the writer state must no longer have a reference to the frozen layer let taken = write_lock.take(); assert_eq!( froze, taken.is_some(), "should only had frozen a layer when TimelineWriterState existed" ); froze } /// Add image layers to the layer map, called from [`super::Timeline::create_image_layers`]. pub(crate) fn track_new_image_layers( &mut self, image_layers: &[ResidentLayer], metrics: &TimelineMetrics, ) { let mut updates = self.layer_map.batch_update(); for layer in image_layers { Self::insert_historic_layer(layer.as_ref().clone(), &mut updates, &mut self.layer_fmgr); // record these here instead of Layer::finish_creating because otherwise partial // failure with create_image_layers would balloon up the physical size gauge. downside // is that all layers need to be created before metrics are updated. metrics.record_new_file_metrics(layer.layer_desc().file_size); } updates.flush(); } /// Flush a frozen layer and add the written delta layer to the layer map. pub(crate) fn finish_flush_l0_layer( &mut self, delta_layer: Option<&ResidentLayer>, frozen_layer_for_check: &Arc<InMemoryLayer>, metrics: &TimelineMetrics, ) { let inmem = self .layer_map .frozen_layers .pop_front() .expect("there must be a inmem layer to flush"); metrics.dec_frozen_layer(&inmem); // Only one task may call this function at a time (for this // timeline). If two tasks tried to flush the same frozen // layer to disk at the same time, that would not work. assert_eq!(Arc::as_ptr(&inmem), Arc::as_ptr(frozen_layer_for_check)); if let Some(l) = delta_layer { let mut updates = self.layer_map.batch_update(); Self::insert_historic_layer(l.as_ref().clone(), &mut updates, &mut self.layer_fmgr); metrics.record_new_file_metrics(l.layer_desc().file_size); updates.flush(); } } /// Called when compaction is completed. pub(crate) fn finish_compact_l0( &mut self, compact_from: &[Layer], compact_to: &[ResidentLayer], metrics: &TimelineMetrics, ) { let mut updates = self.layer_map.batch_update(); for l in compact_to { Self::insert_historic_layer(l.as_ref().clone(), &mut updates, &mut self.layer_fmgr); metrics.record_new_file_metrics(l.layer_desc().file_size); } for l in compact_from { Self::delete_historic_layer(l, &mut updates, &mut self.layer_fmgr); } updates.flush(); } /// Called when a GC-compaction is completed. pub(crate) fn finish_gc_compaction( &mut self, compact_from: &[Layer], compact_to: &[ResidentLayer], metrics: &TimelineMetrics, ) { // gc-compaction could contain layer rewrites. We need to delete the old layers and insert the new ones. // Match the old layers with the new layers let mut add_layers = HashMap::new(); let mut rewrite_layers = HashMap::new(); let mut drop_layers = HashMap::new(); for layer in compact_from { drop_layers.insert(layer.layer_desc().key(), layer.clone()); } for layer in compact_to { if let Some(old_layer) = drop_layers.remove(&layer.layer_desc().key()) { rewrite_layers.insert(layer.layer_desc().key(), (old_layer.clone(), layer.clone())); } else { add_layers.insert(layer.layer_desc().key(), layer.clone()); } } let add_layers = add_layers.values().cloned().collect::<Vec<_>>(); let drop_layers = drop_layers.values().cloned().collect::<Vec<_>>(); let rewrite_layers = rewrite_layers.values().cloned().collect::<Vec<_>>(); self.rewrite_layers_inner(&rewrite_layers, &drop_layers, &add_layers, metrics); } /// Called post-compaction when some previous generation image layers were trimmed. pub fn rewrite_layers( &mut self, rewrite_layers: &[(Layer, ResidentLayer)], drop_layers: &[Layer], metrics: &TimelineMetrics, ) { self.rewrite_layers_inner(rewrite_layers, drop_layers, &[], metrics); } fn rewrite_layers_inner( &mut self, rewrite_layers: &[(Layer, ResidentLayer)], drop_layers: &[Layer], add_layers: &[ResidentLayer], metrics: &TimelineMetrics, ) { let mut updates = self.layer_map.batch_update(); for (old_layer, new_layer) in rewrite_layers { debug_assert_eq!( old_layer.layer_desc().key_range, new_layer.layer_desc().key_range ); debug_assert_eq!( old_layer.layer_desc().lsn_range, new_layer.layer_desc().lsn_range ); // Transfer visibility hint from old to new layer, since the new layer covers the same key space. This is not guaranteed to // be accurate (as the new layer may cover a different subset of the key range), but is a sensible default, and prevents // always marking rewritten layers as visible. new_layer.as_ref().set_visibility(old_layer.visibility()); // Safety: we may never rewrite the same file in-place. Callers are responsible // for ensuring that they only rewrite layers after something changes the path, // such as an increment in the generation number. assert_ne!(old_layer.local_path(), new_layer.local_path()); Self::delete_historic_layer(old_layer, &mut updates, &mut self.layer_fmgr); Self::insert_historic_layer( new_layer.as_ref().clone(), &mut updates, &mut self.layer_fmgr, ); metrics.record_new_file_metrics(new_layer.layer_desc().file_size); } for l in drop_layers { Self::delete_historic_layer(l, &mut updates, &mut self.layer_fmgr); } for l in add_layers { Self::insert_historic_layer(l.as_ref().clone(), &mut updates, &mut self.layer_fmgr); metrics.record_new_file_metrics(l.layer_desc().file_size); } updates.flush(); } /// Called when garbage collect has selected the layers to be removed. pub(crate) fn finish_gc_timeline(&mut self, gc_layers: &[Layer]) { let mut updates = self.layer_map.batch_update(); for doomed_layer in gc_layers { Self::delete_historic_layer(doomed_layer, &mut updates, &mut self.layer_fmgr); } updates.flush() } #[cfg(test)] pub(crate) fn force_insert_layer(&mut self, layer: ResidentLayer) { let mut updates = self.layer_map.batch_update(); Self::insert_historic_layer(layer.as_ref().clone(), &mut updates, &mut self.layer_fmgr); updates.flush() } /// Helper function to insert a layer into the layer map and file manager. fn insert_historic_layer( layer: Layer, updates: &mut BatchedUpdates<'_>, mapping: &mut LayerFileManager<Layer>, ) { updates.insert_historic(layer.layer_desc().clone()); mapping.insert(layer); } /// Removes the layer from local FS (if present) and from memory. /// Remote storage is not affected by this operation. fn delete_historic_layer( // we cannot remove layers otherwise, since gc and compaction will race layer: &Layer, updates: &mut BatchedUpdates<'_>, mapping: &mut LayerFileManager<Layer>, ) { let desc = layer.layer_desc(); // TODO Removing from the bottom of the layer map is expensive. // Maybe instead discard all layer map historic versions that // won't be needed for page reconstruction for this timeline, // and mark what we can't delete yet as deleted from the layer // map index without actually rebuilding the index. updates.remove_historic(desc); mapping.remove(layer); layer.delete_on_drop(); } #[cfg(test)] pub(crate) fn force_insert_in_memory_layer(&mut self, layer: Arc<InMemoryLayer>) { use pageserver_api::models::InMemoryLayerInfo; match layer.info() { InMemoryLayerInfo::Open { .. } => { assert!(self.layer_map.open_layer.is_none()); self.layer_map.open_layer = Some(layer); } InMemoryLayerInfo::Frozen { lsn_start, .. } => { if let Some(last) = self.layer_map.frozen_layers.back() { assert!(last.get_lsn_range().end <= lsn_start); } self.layer_map.frozen_layers.push_back(layer); } } } } pub(crate) struct LayerFileManager<T>(HashMap<PersistentLayerKey, T>); impl<T> Default for LayerFileManager<T> { fn default() -> Self { Self(HashMap::default()) } } impl<T: AsLayerDesc + Clone> LayerFileManager<T> { pub(crate) fn insert(&mut self, layer: T) { let present = self.0.insert(layer.layer_desc().key(), layer.clone()); if present.is_some() && cfg!(debug_assertions) { panic!("overwriting a layer: {:?}", layer.layer_desc()) } } pub(crate) fn remove(&mut self, layer: &T) { let present = self.0.remove(&layer.layer_desc().key()); if present.is_none() && cfg!(debug_assertions) { panic!( "removing layer that is not present in layer mapping: {:?}", layer.layer_desc() ) } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/eviction_task.rs
pageserver/src/tenant/timeline/eviction_task.rs
//! The per-timeline layer eviction task, which evicts data which has not been accessed for more //! than a given threshold. //! //! Data includes all kinds of caches, namely: //! - (in-memory layers) //! - on-demand downloaded layer files on disk //! - (cached layer file pages) //! - derived data from layer file contents, namely: //! - initial logical size //! - partitioning //! - (other currently missing unknowns) //! //! Items with parentheses are not (yet) touched by this task. //! //! See write-up on restart on-demand download spike: <https://gist.github.com/problame/2265bf7b8dc398be834abfead36c76b5> use std::collections::HashMap; use std::ops::ControlFlow; use std::sync::Arc; use std::time::{Duration, SystemTime}; use pageserver_api::models::{EvictionPolicy, EvictionPolicyLayerAccessThreshold}; use tokio::time::Instant; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, info, info_span, instrument, warn}; use utils::completion; use utils::sync::gate::GateGuard; use super::Timeline; use crate::context::{DownloadBehavior, RequestContext}; use crate::pgdatadir_mapping::CollectKeySpaceError; use crate::task_mgr::{self, BACKGROUND_RUNTIME, TaskKind}; use crate::tenant::size::CalculateSyntheticSizeError; use crate::tenant::storage_layer::LayerVisibilityHint; use crate::tenant::tasks::{BackgroundLoopKind, BackgroundLoopSemaphorePermit, sleep_random}; use crate::tenant::timeline::EvictionError; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::{LogicalSizeCalculationCause, TenantShard}; #[derive(Default)] pub struct EvictionTaskTimelineState { last_layer_access_imitation: Option<tokio::time::Instant>, } #[derive(Default)] pub struct EvictionTaskTenantState { last_layer_access_imitation: Option<Instant>, } impl Timeline { pub(super) fn launch_eviction_task( self: &Arc<Self>, parent: Arc<TenantShard>, background_tasks_can_start: Option<&completion::Barrier>, ) { let self_clone = Arc::clone(self); let background_tasks_can_start = background_tasks_can_start.cloned(); task_mgr::spawn( BACKGROUND_RUNTIME.handle(), TaskKind::Eviction, self.tenant_shard_id, Some(self.timeline_id), &format!( "layer eviction for {}/{}", self.tenant_shard_id, self.timeline_id ), async move { tokio::select! { _ = self_clone.cancel.cancelled() => { return Ok(()); } _ = completion::Barrier::maybe_wait(background_tasks_can_start) => {} }; self_clone.eviction_task(parent).await; Ok(()) }, ); } #[instrument(skip_all, fields(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id))] async fn eviction_task(self: Arc<Self>, tenant: Arc<TenantShard>) { // acquire the gate guard only once within a useful span let Ok(guard) = self.gate.enter() else { return; }; { let policy = self.get_eviction_policy(); let period = match policy { EvictionPolicy::LayerAccessThreshold(lat) => lat.period, EvictionPolicy::OnlyImitiate(lat) => lat.period, EvictionPolicy::NoEviction => Duration::from_secs(10), }; if sleep_random(period, &self.cancel).await.is_err() { return; } } let ctx = RequestContext::new(TaskKind::Eviction, DownloadBehavior::Warn) .with_scope_timeline(&self); loop { let policy = self.get_eviction_policy(); let cf = self .eviction_iteration(&tenant, &policy, &self.cancel, &guard, &ctx) .await; match cf { ControlFlow::Break(()) => break, ControlFlow::Continue(sleep_until) => { if tokio::time::timeout_at(sleep_until, self.cancel.cancelled()) .await .is_ok() { break; } } } } } #[instrument(skip_all, fields(policy_kind = policy.discriminant_str()))] async fn eviction_iteration( self: &Arc<Self>, tenant: &TenantShard, policy: &EvictionPolicy, cancel: &CancellationToken, gate: &GateGuard, ctx: &RequestContext, ) -> ControlFlow<(), Instant> { debug!("eviction iteration: {policy:?}"); let start = Instant::now(); let (period, threshold) = match policy { EvictionPolicy::NoEviction => { // check again in 10 seconds; XXX config watch mechanism return ControlFlow::Continue(Instant::now() + Duration::from_secs(10)); } EvictionPolicy::LayerAccessThreshold(p) => { match self .eviction_iteration_threshold(tenant, p, cancel, gate, ctx) .await { ControlFlow::Break(()) => return ControlFlow::Break(()), ControlFlow::Continue(()) => (), } (p.period, p.threshold) } EvictionPolicy::OnlyImitiate(p) => { if self .imitiate_only(tenant, p, cancel, gate, ctx) .await .is_break() { return ControlFlow::Break(()); } (p.period, p.threshold) } }; let elapsed = start.elapsed(); crate::tenant::tasks::warn_when_period_overrun( elapsed, period, BackgroundLoopKind::Eviction, ); // FIXME: if we were to mix policies on a pageserver, we would have no way to sense this. I // don't think that is a relevant fear however, and regardless the imitation should be the // most costly part. crate::metrics::EVICTION_ITERATION_DURATION .get_metric_with_label_values(&[ &format!("{}", period.as_secs()), &format!("{}", threshold.as_secs()), ]) .unwrap() .observe(elapsed.as_secs_f64()); ControlFlow::Continue(start + period) } async fn eviction_iteration_threshold( self: &Arc<Self>, tenant: &TenantShard, p: &EvictionPolicyLayerAccessThreshold, cancel: &CancellationToken, gate: &GateGuard, ctx: &RequestContext, ) -> ControlFlow<()> { let now = SystemTime::now(); let permit = self.acquire_imitation_permit(cancel, ctx).await?; self.imitate_layer_accesses(tenant, p, cancel, gate, permit, ctx) .await?; #[derive(Debug, Default)] struct EvictionStats { candidates: usize, evicted: usize, errors: usize, not_evictable: usize, timeouts: usize, #[allow(dead_code)] skipped_for_shutdown: usize, } let mut stats = EvictionStats::default(); // Gather layers for eviction. // NB: all the checks can be invalidated as soon as we release the layer map lock. // We don't want to hold the layer map lock during eviction. // So, we just need to deal with this. let mut js = tokio::task::JoinSet::new(); { let guard = self.layers.read(LayerManagerLockHolder::Eviction).await; guard .likely_resident_layers() .filter(|layer| { let last_activity_ts = layer.latest_activity(); let no_activity_for = match now.duration_since(last_activity_ts) { Ok(d) => d, Err(_e) => { // We reach here if `now` < `last_activity_ts`, which can legitimately // happen if there is an access between us getting `now`, and us getting // the access stats from the layer. // // The other reason why it can happen is system clock skew because // SystemTime::now() is not monotonic, so, even if there is no access // to the layer after we get `now` at the beginning of this function, // it could be that `now` < `last_activity_ts`. // // To distinguish the cases, we would need to record `Instant`s in the // access stats (i.e., monotonic timestamps), but then, the timestamps // values in the access stats would need to be `Instant`'s, and hence // they would be meaningless outside of the pageserver process. // At the time of writing, the trade-off is that access stats are more // valuable than detecting clock skew. return false; } }; match layer.visibility() { LayerVisibilityHint::Visible => { // Usual case: a visible layer might be read any time, and we will keep it // resident until it hits our configured TTL threshold. no_activity_for > p.threshold } LayerVisibilityHint::Covered => { // Covered layers: this is probably a layer that was recently covered by // an image layer during compaction. We don't evict it immediately, but // it doesn't stay resident for the full `threshold`: we just keep it // for a shorter time in case // - it is used for Timestamp->LSN lookups // - a new branch is created in recent history which will read this layer no_activity_for > p.period } } }) .cloned() .for_each(|layer| { js.spawn(async move { layer .evict_and_wait(std::time::Duration::from_secs(5)) .await }); stats.candidates += 1; }); }; let join_all = async move { while let Some(next) = js.join_next().await { match next { Ok(Ok(())) => stats.evicted += 1, Ok(Err(EvictionError::NotFound | EvictionError::Downloaded)) => { stats.not_evictable += 1; } Ok(Err(EvictionError::Timeout)) => { stats.timeouts += 1; } Err(je) if je.is_cancelled() => unreachable!("not used"), Err(je) if je.is_panic() => { /* already logged */ stats.errors += 1; } Err(je) => tracing::error!("unknown JoinError: {je:?}"), } } stats }; tokio::select! { stats = join_all => { if stats.candidates == stats.not_evictable { debug!(stats=?stats, "eviction iteration complete"); } else if stats.errors > 0 || stats.not_evictable > 0 || stats.timeouts > 0 { // reminder: timeouts are not eviction cancellations warn!(stats=?stats, "eviction iteration complete"); } else { info!(stats=?stats, "eviction iteration complete"); } } _ = cancel.cancelled() => { // just drop the joinset to "abort" } } ControlFlow::Continue(()) } /// Like `eviction_iteration_threshold`, but without any eviction. Eviction will be done by /// disk usage based eviction task. async fn imitiate_only( self: &Arc<Self>, tenant: &TenantShard, p: &EvictionPolicyLayerAccessThreshold, cancel: &CancellationToken, gate: &GateGuard, ctx: &RequestContext, ) -> ControlFlow<()> { let permit = self.acquire_imitation_permit(cancel, ctx).await?; self.imitate_layer_accesses(tenant, p, cancel, gate, permit, ctx) .await } async fn acquire_imitation_permit( &self, cancel: &CancellationToken, ctx: &RequestContext, ) -> ControlFlow<(), BackgroundLoopSemaphorePermit<'static>> { let acquire_permit = crate::tenant::tasks::acquire_concurrency_permit(BackgroundLoopKind::Eviction, ctx); tokio::select! { permit = acquire_permit => ControlFlow::Continue(permit), _ = cancel.cancelled() => ControlFlow::Break(()), _ = self.cancel.cancelled() => ControlFlow::Break(()), } } /// If we evict layers but keep cached values derived from those layers, then /// we face a storm of on-demand downloads after pageserver restart. /// The reason is that the restart empties the caches, and so, the values /// need to be re-computed by accessing layers, which we evicted while the /// caches were filled. /// /// Solutions here would be one of the following: /// 1. Have a persistent cache. /// 2. Count every access to a cached value to the access stats of all layers /// that were accessed to compute the value in the first place. /// 3. Invalidate the caches at a period of < p.threshold/2, so that the values /// get re-computed from layers, thereby counting towards layer access stats. /// 4. Make the eviction task imitate the layer accesses that typically hit caches. /// /// We follow approach (4) here because in Neon prod deployment: /// - page cache is quite small => high churn => low hit rate /// => eviction gets correct access stats /// - value-level caches such as logical size & repatition have a high hit rate, /// especially for inactive tenants /// => eviction sees zero accesses for these /// => they cause the on-demand download storm on pageserver restart /// /// We should probably move to persistent caches in the future, or avoid /// having inactive tenants attached to pageserver in the first place. #[instrument(skip_all)] async fn imitate_layer_accesses( &self, tenant: &TenantShard, p: &EvictionPolicyLayerAccessThreshold, cancel: &CancellationToken, gate: &GateGuard, permit: BackgroundLoopSemaphorePermit<'static>, ctx: &RequestContext, ) -> ControlFlow<()> { if !self.tenant_shard_id.is_shard_zero() { // Shards !=0 do not maintain accurate relation sizes, and do not need to calculate logical size // for consumption metrics (consumption metrics are only sent from shard 0). We may therefore // skip imitating logical size accesses for eviction purposes. return ControlFlow::Continue(()); } let mut state = self.eviction_task_timeline_state.lock().await; // Only do the imitate_layer accesses approximately as often as the threshold. A little // more frequently, to avoid this period racing with the threshold/period-th eviction iteration. let inter_imitate_period = p.threshold.checked_sub(p.period).unwrap_or(p.threshold); match state.last_layer_access_imitation { Some(ts) if ts.elapsed() < inter_imitate_period => { /* no need to run */ } _ => { self.imitate_timeline_cached_layer_accesses(gate, ctx).await; state.last_layer_access_imitation = Some(tokio::time::Instant::now()) } } drop(state); if cancel.is_cancelled() { return ControlFlow::Break(()); } // This task is timeline-scoped, but the synthetic size calculation is tenant-scoped. // Make one of the tenant's timelines draw the short straw and run the calculation. // The others wait until the calculation is done so that they take into account the // imitated accesses that the winner made. let (mut state, _permit) = { if let Ok(locked) = tenant.eviction_task_tenant_state.try_lock() { (locked, permit) } else { // we might need to wait for a long time here in case of pathological synthetic // size calculation performance drop(permit); let locked = tokio::select! { locked = tenant.eviction_task_tenant_state.lock() => locked, _ = self.cancel.cancelled() => { return ControlFlow::Break(()) }, _ = cancel.cancelled() => { return ControlFlow::Break(()) } }; // then reacquire -- this will be bad if there is a lot of traffic, but because we // released the permit, the overall latency will be much better. let permit = self.acquire_imitation_permit(cancel, ctx).await?; (locked, permit) } }; match state.last_layer_access_imitation { Some(ts) if ts.elapsed() < inter_imitate_period => { /* no need to run */ } _ => { self.imitate_synthetic_size_calculation_worker(tenant, cancel, ctx) .await; state.last_layer_access_imitation = Some(tokio::time::Instant::now()); } } drop(state); if cancel.is_cancelled() { return ControlFlow::Break(()); } ControlFlow::Continue(()) } /// Recompute the values which would cause on-demand downloads during restart. #[instrument(skip_all)] async fn imitate_timeline_cached_layer_accesses( &self, guard: &GateGuard, ctx: &RequestContext, ) { let lsn = self.get_last_record_lsn(); // imitiate on-restart initial logical size let size = self .calculate_logical_size( lsn, LogicalSizeCalculationCause::EvictionTaskImitation, guard, ctx, ) .instrument(info_span!("calculate_logical_size")) .await; match &size { Ok(_size) => { // good, don't log it to avoid confusion } Err(_) => { // we have known issues for which we already log this on consumption metrics, // gc, and compaction. leave logging out for now. // // https://github.com/neondatabase/neon/issues/2539 } } // imitiate repartiting on first compactation if let Err(e) = self .collect_keyspace(lsn, ctx) .instrument(info_span!("collect_keyspace")) .await { // if this failed, we probably failed logical size because these use the same keys if size.is_err() { // ignore, see above comment } else { match e { CollectKeySpaceError::Cancelled => { // Shutting down, ignore } err => { warn!( "failed to collect keyspace but succeeded in calculating logical size: {err:#}" ); } } } } } // Imitate the synthetic size calculation done by the consumption_metrics module. #[instrument(skip_all)] async fn imitate_synthetic_size_calculation_worker( &self, tenant: &TenantShard, cancel: &CancellationToken, ctx: &RequestContext, ) { if self.conf.metric_collection_endpoint.is_none() { // We don't start the consumption metrics task if this is not set in the config. // So, no need to imitate the accesses in that case. return; } // The consumption metrics are collected on a per-tenant basis, by a single // global background loop. // It limits the number of synthetic size calculations using the global // `concurrent_tenant_size_logical_size_queries` semaphore to not overload // the pageserver. (size calculation is somewhat expensive in terms of CPU and IOs). // // If we used that same semaphore here, then we'd compete for the // same permits, which may impact timeliness of consumption metrics. // That is a no-go, as consumption metrics are much more important // than what we do here. // // So, we have a separate semaphore, initialized to the same // number of permits as the `concurrent_tenant_size_logical_size_queries`. // In the worst, we would have twice the amount of concurrenct size calculations. // But in practice, the `p.threshold` >> `consumption metric interval`, and // we spread out the eviction task using `random_init_delay`. // So, the chance of the worst case is quite low in practice. // It runs as a per-tenant task, but the eviction_task.rs is per-timeline. // So, we must coordinate with other with other eviction tasks of this tenant. let limit = self .conf .eviction_task_immitated_concurrent_logical_size_queries .inner(); let mut throwaway_cache = HashMap::new(); let gather = crate::tenant::size::gather_inputs( tenant, limit, None, &mut throwaway_cache, LogicalSizeCalculationCause::EvictionTaskImitation, cancel, ctx, ) .instrument(info_span!("gather_inputs")); tokio::select! { _ = cancel.cancelled() => {} gather_result = gather => { match gather_result { Ok(_) => {}, // It can happen sometimes that we hit this instead of the cancellation token firing above Err(CalculateSyntheticSizeError::Cancelled) => {} Err(e) => { // We don't care about the result, but, if it failed, we should log it, // since consumption metric might be hitting the cached value and // thus not encountering this error. warn!("failed to imitate synthetic size calculation accesses: {e:#}") } } } } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/handle.rs
pageserver/src/tenant/timeline/handle.rs
//! A cache for [`crate::tenant::mgr`]+`Tenant::get_timeline`+`Timeline::gate.enter()`. //! //! # Motivation //! //! On a single page service connection, we're typically serving a single TenantTimelineId. //! //! Without sharding, there is a single Timeline object to which we dispatch //! all requests. For example, a getpage request gets dispatched to the //! Timeline::get method of the Timeline object that represents the //! (tenant,timeline) of that connection. //! //! With sharding, for each request that comes in on the connection, //! we first have to perform shard routing based on the requested key (=~ page number). //! The result of shard routing is a Timeline object. //! We then dispatch the request to that Timeline object. //! //! Regardless of whether the tenant is sharded or not, we want to ensure that //! we hold the Timeline gate open while we're invoking the method on the //! Timeline object. //! //! We want to avoid the overhead of doing, for each incoming request, //! - tenant manager lookup (global rwlock + btreemap lookup for shard routing) //! - cloning the `Arc<Timeline>` out of the tenant manager so we can //! release the mgr rwlock before doing any request processing work //! - re-entering the Timeline gate for each Timeline method invocation. //! //! Regardless of how we accomplish the above, it should not //! prevent the Timeline from shutting down promptly. //! //! //! # Design //! //! ## Data Structures //! //! There are two concepts expressed as associated types in the `Types` trait: //! - `TenantManager`: the thing that performs the expensive work. It produces //! a `Timeline` object, which is the other associated type. //! - `Timeline`: the item that we cache for fast (TenantTimelineId,ShardSelector) lookup. //! //! There are three user-facing data structures exposed by this module: //! - `PerTimelineState`: a struct embedded into each Timeline struct. Lifetime == Timeline lifetime. //! - `Cache`: a struct private to each connection handler; Lifetime == connection lifetime. //! - `Handle`: a smart pointer that derefs to the Types::Timeline. //! - `WeakHandle`: downgrade of a `Handle` that does not keep the gate open, but allows //! trying to ugprade back to a `Handle`. If successful, a re-upgraded Handle will always //! point to the same cached `Types::Timeline`. Upgrades never invoke the `TenantManager`. //! //! Internally, there is 0 or 1 `HandleInner` per `(Cache,Timeline)`. //! Since Cache:Connection is 1:1, there is 0 or 1 `HandleInner` per `(Connection,Timeline)`. //! //! The `HandleInner` is allocated as a `Arc<Mutex<HandleInner>>` and //! referenced weakly and strongly from various places which we are now illustrating. //! For brevity, we will omit the `Arc<Mutex<>>` part in the following and instead //! use `strong ref` and `weak ref` when referring to the `Arc<Mutex<HandleInner>>` //! or `Weak<Mutex<HandleInner>>`, respectively. //! //! - The `Handle` is a strong ref. //! - The `WeakHandle` is a weak ref. //! - The `PerTimelineState` contains a `HashMap<CacheId, strong ref>`. //! - The `Cache` is a `HashMap<unique identifier for the shard, weak ref>`. //! //! Lifetimes: //! - `WeakHandle` and `Handle`: single pagestream request. //! - `Cache`: single page service connection. //! - `PerTimelineState`: lifetime of the Timeline object (i.e., i.e., till `Timeline::shutdown`). //! //! ## Request Handling Flow (= filling and using the `Cache``) //! //! To dispatch a request, the page service connection calls `Cache::get`. //! //! A cache miss means we call Types::TenantManager::resolve for shard routing, //! cloning the `Arc<Timeline>` out of it, and entering the gate. The result of //! resolve() is the object we want to cache, and return `Handle`s to for subseqent `Cache::get` calls. //! //! We wrap the object returned from resolve() in an `Arc` and store that inside the //! `Arc<Mutex<HandleInner>>>`. A weak ref to the HandleInner is stored in the `Cache` //! and a strong ref in the `PerTimelineState`. //! Another strong ref is returned wrapped in a `Handle`. //! //! For subsequent requests, `Cache::get` will perform a "fast path" shard routing //! and find the weak ref in the cache. //! We upgrade the weak ref to a strong ref and return it wrapped in a `Handle`. //! //! The pagestream processing is pipelined and involves a batching step. //! While a request is batching, the `Handle` is downgraded to a `WeakHandle`. //! When the batch is ready to be executed, the `WeakHandle` is upgraded back to a `Handle` //! and the request handler dispatches the request to the right `<Handle as Deref<Target = Timeline>>::$request_method`. //! It then drops the `Handle`, and thus the `Arc<Mutex<HandleInner>>` inside it. //! //! # Performance //! //! Remember from the introductory section: //! //! > We want to avoid the overhead of doing, for each incoming request, //! > - tenant manager lookup (global rwlock + btreemap lookup for shard routing) //! > - cloning the `Arc<Timeline>` out of the tenant manager so we can //! > release the mgr rwlock before doing any request processing work //! > - re-entering the Timeline gate for each Timeline method invocation. //! //! All of these boil down to some state that is either globally shared among all shards //! or state shared among all tasks that serve a particular timeline. //! It is either protected by RwLock or manipulated via atomics. //! Even atomics are costly when shared across multiple cores. //! So, we want to avoid any permanent need for coordination between page_service tasks. //! //! The solution is to add indirection: we wrap the Types::Timeline object that is //! returned by Types::TenantManager into an Arc that is rivate to the `HandleInner` //! and hence to the single Cache / page_service connection. //! (Review the "Data Structures" section if that is unclear to you.) //! //! //! When upgrading a `WeakHandle`, we upgrade its weak to a strong ref (of the `Mutex<HandleInner>`), //! lock the mutex, take out a clone of the `Arc<Types::Timeline>`, and drop the Mutex. //! The Mutex is not contended because it is private to the connection. //! And again, the `Arc<Types::Timeline>` clone is cheap because that wrapper //! Arc's refcounts are private to the connection. //! //! Downgrading drops these two Arcs, which again, manipulates refcounts that are private to the connection. //! //! //! # Shutdown //! //! The attentive reader may have noticed the following reference cycle around the `Arc<Timeline>`: //! //! ```text //! Timeline --owns--> PerTimelineState --strong--> HandleInner --strong--> Types::Timeline --strong--> Timeline //! ``` //! //! Further, there is this cycle: //! //! ```text //! Timeline --owns--> PerTimelineState --strong--> HandleInner --strong--> Types::Timeline --strong--> GateGuard --keepalive--> Timeline //! ``` //! //! The former cycle is a memory leak if not broken. //! The latter cycle further prevents the Timeline from shutting down //! because we certainly won't drop the Timeline while the GateGuard is alive. //! Preventing shutdown is the whole point of this handle/cache system, //! but when the Timeline needs to shut down, we need to break the cycle. //! //! The cycle is broken by either //! - Timeline shutdown (=> `PerTimelineState::shutdown`) //! - Connection shutdown (=> dropping the `Cache`). //! //! Both transition the `HandleInner` from [`HandleInner::Open`] to //! [`HandleInner::ShutDown`], which drops the only long-lived //! `Arc<Types::Timeline>`. Once the last short-lived Arc<Types::Timeline> //! is dropped, the `Types::Timeline` gets dropped and thereby //! the `GateGuard` and the `Arc<Timeline>` that it stores, //! thereby breaking both cycles. //! //! `PerTimelineState::shutdown` drops all the `HandleInners` it contains, //! thereby breaking the cycle. //! It also initiates draining of already existing `Handle`s by //! poisoning things so that no new `HandleInner`'s can be added //! to the `PerTimelineState`, which will make subsequent `Cache::get` fail. //! //! Concurrently existing / already upgraded `Handle`s will extend the //! lifetime of the `Arc<Mutex<HandleInner>>` and hence cycles. //! However, since `Handle`s are short-lived and new `Handle`s are not //! handed out from `Cache::get` or `WeakHandle::upgrade` after //! `PerTimelineState::shutdown`, that extension of the cycle is bounded. //! //! Concurrently existing `WeakHandle`s will fail to `upgrade()`: //! while they will succeed in upgrading `Weak<Mutex<HandleInner>>`, //! they will find the inner in state `HandleInner::ShutDown` state where the //! `Arc<GateGuard>` and Timeline has already been dropped. //! //! Dropping the `Cache` undoes the registration of this `Cache`'s //! `HandleInner`s from all the `PerTimelineState`s, i.e., it //! removes the strong ref to each of its `HandleInner`s //! from all the `PerTimelineState`. //! //! # Locking Rules //! //! To prevent deadlocks we: //! //! 1. Only ever hold one of the locks at a time. //! 2. Don't add more than one Drop impl that locks on the //! cycles above. //! //! As per (2), that impl is in `Drop for Cache`. //! //! # Fast Path for Shard Routing //! //! The `Cache` has a fast path for shard routing to avoid calling into //! the tenant manager for every request. //! //! The `Cache` maintains a hash map of `ShardTimelineId` to `WeakHandle`s. //! //! The current implementation uses the first entry in the hash map //! to determine the `ShardParameters` and derive the correct //! `ShardIndex` for the requested key. //! //! It then looks up the hash map for that `ShardTimelineId := {ShardIndex,TimelineId}`. //! //! If the lookup is successful and the `WeakHandle` can be upgraded, //! it's a hit. //! //! ## Cache invalidation //! //! The insight is that cache invalidation is sufficient and most efficiently if done lazily. //! The only reasons why an entry in the cache can become stale are: //! 1. The `PerTimelineState` / Timeline is shutting down e.g. because the shard is //! being detached, timeline or shard deleted, or pageserver is shutting down. //! 2. We're doing a shard split and new traffic should be routed to the child shards. //! //! Regarding (1), we will eventually fail to upgrade the `WeakHandle` once the //! timeline has shut down, and when that happens, we remove the entry from the cache. //! //! Regarding (2), the insight is that it is toally fine to keep dispatching requests //! to the parent shard during a shard split. Eventually, the shard split task will //! shut down the parent => case (1). use std::collections::HashMap; use std::collections::hash_map; use std::sync::Arc; use std::sync::Mutex; use std::sync::Weak; use std::time::Duration; use pageserver_api::shard::ShardIdentity; use tracing::{instrument, trace}; use utils::id::TimelineId; use utils::shard::{ShardIndex, ShardNumber}; use crate::page_service::GetActiveTimelineError; use crate::tenant::GetTimelineError; use crate::tenant::mgr::{GetActiveTenantError, ShardSelector}; pub(crate) trait Types: Sized { type TenantManager: TenantManager<Self> + Sized; type Timeline: Timeline<Self> + Sized; } /// Uniquely identifies a [`Cache`] instance over the lifetime of the process. /// Required so [`Cache::drop`] can take out the handles from the [`PerTimelineState`]. /// Alternative to this would be to allocate [`Cache`] in a `Box` and identify it by the pointer. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] struct CacheId(u64); impl CacheId { fn next() -> Self { static NEXT_ID: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(1); let id = NEXT_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed); if id == 0 { panic!("CacheId::new() returned 0, overflow"); } Self(id) } } /// See module-level comment. pub(crate) struct Cache<T: Types> { id: CacheId, map: Map<T>, } type Map<T> = HashMap<ShardTimelineId, WeakHandle<T>>; impl<T: Types> Default for Cache<T> { fn default() -> Self { Self { id: CacheId::next(), map: Default::default(), } } } #[derive(PartialEq, Eq, Debug, Hash, Clone, Copy)] pub(crate) struct ShardTimelineId { pub(crate) shard_index: ShardIndex, pub(crate) timeline_id: TimelineId, } /// See module-level comment. pub(crate) struct Handle<T: Types> { inner: Arc<Mutex<HandleInner<T>>>, open: Arc<T::Timeline>, } pub(crate) struct WeakHandle<T: Types> { inner: Weak<Mutex<HandleInner<T>>>, } enum HandleInner<T: Types> { Open(Arc<T::Timeline>), ShutDown, } /// Embedded in each [`Types::Timeline`] as the anchor for the only long-lived strong ref to `HandleInner`. /// /// See module-level comment for details. pub struct PerTimelineState<T: Types> { // None = shutting down #[allow(clippy::type_complexity)] handles: Mutex<Option<HashMap<CacheId, Arc<Mutex<HandleInner<T>>>>>>, } impl<T: Types> Default for PerTimelineState<T> { fn default() -> Self { Self { handles: Mutex::new(Some(Default::default())), } } } /// Abstract view of [`crate::tenant::mgr`], for testability. pub(crate) trait TenantManager<T: Types> { /// Invoked by [`Cache::get`] to resolve a [`ShardTimelineId`] to a [`Types::Timeline`]. async fn resolve( &self, timeline_id: TimelineId, shard_selector: ShardSelector, ) -> Result<T::Timeline, GetActiveTimelineError>; } /// Abstract view of an [`Arc<Timeline>`], for testability. pub(crate) trait Timeline<T: Types> { fn shard_timeline_id(&self) -> ShardTimelineId; fn get_shard_identity(&self) -> &ShardIdentity; fn per_timeline_state(&self) -> &PerTimelineState<T>; } /// Internal type used in [`Cache::get`]. enum RoutingResult<T: Types> { FastPath(Handle<T>), SlowPath(ShardTimelineId), NeedConsultTenantManager, } impl<T: Types> Cache<T> { /* BEGIN_HADRON */ /// A wrapper of do_get to resolve the tenant shard for a get page request. #[instrument(level = "trace", skip_all)] pub(crate) async fn get( &mut self, timeline_id: TimelineId, shard_selector: ShardSelector, tenant_manager: &T::TenantManager, ) -> Result<Handle<T>, GetActiveTimelineError> { const GET_MAX_RETRIES: usize = 10; const RETRY_BACKOFF: Duration = Duration::from_millis(100); let mut attempt = 0; loop { attempt += 1; match self .do_get(timeline_id, shard_selector, tenant_manager) .await { Ok(handle) => return Ok(handle), Err( e @ GetActiveTimelineError::Tenant(GetActiveTenantError::WaitForActiveTimeout { .. }), ) => { // Retry on tenant manager error to handle tenant split more gracefully if attempt < GET_MAX_RETRIES { tokio::time::sleep(RETRY_BACKOFF).await; continue; } else { tracing::info!( "Failed to resolve tenant shard after {} attempts: {:?}", GET_MAX_RETRIES, e ); return Err(e); } } Err(err) => return Err(err), } } } /* END_HADRON */ /// See module-level comment for details. /// /// Does NOT check for the shutdown state of [`Types::Timeline`]. /// Instead, the methods of [`Types::Timeline`] that are invoked through /// the [`Handle`] are responsible for checking these conditions /// and if so, return an error that causes the page service to /// close the connection. #[instrument(level = "trace", skip_all)] async fn do_get( &mut self, timeline_id: TimelineId, shard_selector: ShardSelector, tenant_manager: &T::TenantManager, ) -> Result<Handle<T>, GetActiveTimelineError> { // terminates because when every iteration we remove an element from the map let miss: ShardSelector = loop { let routing_state = self.shard_routing(timeline_id, shard_selector); match routing_state { RoutingResult::FastPath(handle) => return Ok(handle), RoutingResult::SlowPath(key) => match self.map.get(&key) { Some(cached) => match cached.upgrade() { Ok(upgraded) => return Ok(upgraded), Err(HandleUpgradeError::ShutDown) => { // TODO: dedup with shard_routing() trace!("handle cache stale"); self.map.remove(&key).unwrap(); continue; } }, None => break ShardSelector::Known(key.shard_index), }, RoutingResult::NeedConsultTenantManager => break shard_selector, } }; self.get_miss(timeline_id, miss, tenant_manager).await } #[inline(always)] fn shard_routing( &mut self, timeline_id: TimelineId, shard_selector: ShardSelector, ) -> RoutingResult<T> { loop { // terminates because when every iteration we remove an element from the map let Some((first_key, first_handle)) = self.map.iter().next() else { return RoutingResult::NeedConsultTenantManager; }; let Ok(first_handle) = first_handle.upgrade() else { // TODO: dedup with get() trace!("handle cache stale"); let first_key_owned = *first_key; self.map.remove(&first_key_owned).unwrap(); continue; }; let first_handle_shard_identity = first_handle.get_shard_identity(); let make_shard_index = |shard_num: ShardNumber| ShardIndex { shard_number: shard_num, shard_count: first_handle_shard_identity.count, }; let need_idx = match shard_selector { ShardSelector::Page(key) => { make_shard_index(first_handle_shard_identity.get_shard_number(&key)) } ShardSelector::Zero => make_shard_index(ShardNumber(0)), ShardSelector::Known(shard_idx) => shard_idx, }; let need_shard_timeline_id = ShardTimelineId { shard_index: need_idx, timeline_id, }; let first_handle_shard_timeline_id = ShardTimelineId { shard_index: first_handle_shard_identity.shard_index(), timeline_id: first_handle.shard_timeline_id().timeline_id, }; if need_shard_timeline_id == first_handle_shard_timeline_id { return RoutingResult::FastPath(first_handle); } else { return RoutingResult::SlowPath(need_shard_timeline_id); } } } #[instrument(level = "trace", skip_all)] #[inline(always)] async fn get_miss( &mut self, timeline_id: TimelineId, shard_selector: ShardSelector, tenant_manager: &T::TenantManager, ) -> Result<Handle<T>, GetActiveTimelineError> { let timeline = tenant_manager.resolve(timeline_id, shard_selector).await?; let key = timeline.shard_timeline_id(); match &shard_selector { ShardSelector::Zero => assert_eq!(key.shard_index.shard_number, ShardNumber(0)), ShardSelector::Page(_) => (), // gotta trust tenant_manager ShardSelector::Known(idx) => assert_eq!(idx, &key.shard_index), } trace!("creating new HandleInner"); let timeline = Arc::new(timeline); let handle_inner_arc = Arc::new(Mutex::new(HandleInner::Open(Arc::clone(&timeline)))); let handle_weak = WeakHandle { inner: Arc::downgrade(&handle_inner_arc), }; let handle = handle_weak .upgrade() .ok() .expect("we just created it and it's not linked anywhere yet"); let mut lock_guard = timeline .per_timeline_state() .handles .lock() .expect("mutex poisoned"); let Some(per_timeline_state) = &mut *lock_guard else { return Err(GetActiveTimelineError::Timeline( GetTimelineError::ShuttingDown, )); }; let replaced = per_timeline_state.insert(self.id, Arc::clone(&handle_inner_arc)); assert!(replaced.is_none(), "some earlier code left a stale handle"); match self.map.entry(key) { hash_map::Entry::Occupied(_o) => { // This cannot not happen because // 1. we're the _miss_ handle, i.e., `self.map` didn't contain an entry and // 2. we were holding &mut self during .resolve().await above, so, no other thread can have inserted a handle // while we were waiting for the tenant manager. unreachable!() } hash_map::Entry::Vacant(v) => { v.insert(handle_weak); } } Ok(handle) } } pub(crate) enum HandleUpgradeError { ShutDown, } impl<T: Types> WeakHandle<T> { pub(crate) fn upgrade(&self) -> Result<Handle<T>, HandleUpgradeError> { let Some(inner) = Weak::upgrade(&self.inner) else { return Err(HandleUpgradeError::ShutDown); }; let lock_guard = inner.lock().expect("poisoned"); match &*lock_guard { HandleInner::Open(open) => { let open = Arc::clone(open); drop(lock_guard); Ok(Handle { open, inner }) } HandleInner::ShutDown => Err(HandleUpgradeError::ShutDown), } } pub(crate) fn is_same_handle_as(&self, other: &WeakHandle<T>) -> bool { Weak::ptr_eq(&self.inner, &other.inner) } } impl<T: Types> std::ops::Deref for Handle<T> { type Target = T::Timeline; fn deref(&self) -> &Self::Target { &self.open } } impl<T: Types> Handle<T> { pub(crate) fn downgrade(&self) -> WeakHandle<T> { WeakHandle { inner: Arc::downgrade(&self.inner), } } } impl<T: Types> PerTimelineState<T> { /// After this method returns, [`Cache::get`] will never again return a [`Handle`] /// to the [`Types::Timeline`] that embeds this per-timeline state. /// Even if [`TenantManager::resolve`] would still resolve to it. /// /// Already-alive [`Handle`]s for will remain open, usable, and keeping the [`Types::Timeline`] alive. /// That's ok because they're short-lived. See module-level comment for details. #[instrument(level = "trace", skip_all)] pub(super) fn shutdown(&self) { let handles = self .handles .lock() .expect("mutex poisoned") // NB: this .take() sets locked to None. // That's what makes future `Cache::get` misses fail. // Cache hits are taken care of below. .take(); let Some(handles) = handles else { trace!("already shut down"); return; }; for handle_inner_arc in handles.values() { // Make hits fail. let mut lock_guard = handle_inner_arc.lock().expect("poisoned"); lock_guard.shutdown(); } drop(handles); } } // When dropping a [`Cache`], prune its handles in the [`PerTimelineState`] to break the reference cycle. impl<T: Types> Drop for Cache<T> { fn drop(&mut self) { for ( _, WeakHandle { inner: handle_inner_weak, }, ) in self.map.drain() { let Some(handle_inner_arc) = handle_inner_weak.upgrade() else { continue; }; let Some(handle_timeline) = handle_inner_arc // locking rules: drop lock before acquiring other lock below .lock() .expect("poisoned") .shutdown() else { // Concurrent PerTimelineState::shutdown. continue; }; // Clean up per_timeline_state so the HandleInner allocation can be dropped. let per_timeline_state = handle_timeline.per_timeline_state(); let mut handles_lock_guard = per_timeline_state.handles.lock().expect("mutex poisoned"); let Some(handles) = &mut *handles_lock_guard else { continue; }; let Some(removed_handle_inner_arc) = handles.remove(&self.id) else { // Concurrent PerTimelineState::shutdown. continue; }; drop(handles_lock_guard); // locking rules! assert!(Arc::ptr_eq(&removed_handle_inner_arc, &handle_inner_arc)); } } } impl<T: Types> HandleInner<T> { fn shutdown(&mut self) -> Option<Arc<T::Timeline>> { match std::mem::replace(self, HandleInner::ShutDown) { HandleInner::Open(timeline) => Some(timeline), HandleInner::ShutDown => { // Duplicate shutdowns are possible because both Cache::drop and PerTimelineState::shutdown // may do it concurrently, but locking rules disallow holding per-timeline-state lock and // the handle lock at the same time. None } } } } #[cfg(test)] mod tests { use std::sync::Weak; use pageserver_api::key::{DBDIR_KEY, Key, rel_block_to_key}; use pageserver_api::models::ShardParameters; use pageserver_api::reltag::RelTag; use pageserver_api::shard::DEFAULT_STRIPE_SIZE; use utils::id::TenantId; use utils::shard::{ShardCount, TenantShardId}; use utils::sync::gate::GateGuard; use super::*; const FOREVER: std::time::Duration = std::time::Duration::from_secs(u64::MAX); #[derive(Debug)] struct TestTypes; impl Types for TestTypes { type TenantManager = StubManager; type Timeline = Entered; } struct StubManager { shards: Vec<Arc<StubTimeline>>, } struct StubTimeline { gate: utils::sync::gate::Gate, id: TimelineId, shard: ShardIdentity, per_timeline_state: PerTimelineState<TestTypes>, myself: Weak<StubTimeline>, } struct Entered { timeline: Arc<StubTimeline>, #[allow(dead_code)] // it's stored here to keep the gate open gate_guard: Arc<GateGuard>, } impl StubTimeline { fn getpage(&self) { // do nothing } } impl Timeline<TestTypes> for Entered { fn shard_timeline_id(&self) -> ShardTimelineId { ShardTimelineId { shard_index: self.shard.shard_index(), timeline_id: self.id, } } fn get_shard_identity(&self) -> &ShardIdentity { &self.shard } fn per_timeline_state(&self) -> &PerTimelineState<TestTypes> { &self.per_timeline_state } } impl TenantManager<TestTypes> for StubManager { async fn resolve( &self, timeline_id: TimelineId, shard_selector: ShardSelector, ) -> Result<Entered, GetActiveTimelineError> { fn enter_gate( timeline: &StubTimeline, ) -> Result<Arc<GateGuard>, GetActiveTimelineError> { Ok(Arc::new(timeline.gate.enter().map_err(|_| { GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown) })?)) } for timeline in &self.shards { if timeline.id == timeline_id { match &shard_selector { ShardSelector::Zero if timeline.shard.is_shard_zero() => { return Ok(Entered { timeline: Arc::clone(timeline), gate_guard: enter_gate(timeline)?, }); } ShardSelector::Zero => continue, ShardSelector::Page(key) if timeline.shard.is_key_local(key) => { return Ok(Entered { timeline: Arc::clone(timeline), gate_guard: enter_gate(timeline)?, }); } ShardSelector::Page(_) => continue, ShardSelector::Known(idx) if idx == &timeline.shard.shard_index() => { return Ok(Entered { timeline: Arc::clone(timeline), gate_guard: enter_gate(timeline)?, }); } ShardSelector::Known(_) => continue, } } } Err(GetActiveTimelineError::Timeline( GetTimelineError::NotFound { tenant_id: TenantShardId::unsharded(TenantId::from([0; 16])), timeline_id, }, )) } } impl std::ops::Deref for Entered { type Target = StubTimeline; fn deref(&self) -> &Self::Target { &self.timeline } } #[tokio::test(start_paused = true)] async fn test_timeline_shutdown() { crate::tenant::harness::setup_logging(); let timeline_id = TimelineId::generate(); let shard0 = Arc::new_cyclic(|myself| StubTimeline { gate: Default::default(), id: timeline_id, shard: ShardIdentity::unsharded(), per_timeline_state: PerTimelineState::default(), myself: myself.clone(), }); let mgr = StubManager { shards: vec![shard0.clone()], }; let key = DBDIR_KEY; let mut cache = Cache::<TestTypes>::default(); // // fill the cache // let handle: Handle<_> = cache .get(timeline_id, ShardSelector::Page(key), &mgr) .await .expect("we have the timeline"); assert!(Weak::ptr_eq(&handle.myself, &shard0.myself)); assert_eq!(cache.map.len(), 1); drop(handle); // // demonstrate that Handle holds up gate closure // but shutdown prevents new handles from being handed out // tokio::select! { _ = shard0.gate.close() => { panic!("cache and per-timeline handler state keep cache open"); } _ = tokio::time::sleep(FOREVER) => { // NB: first poll of close() makes it enter closing state } } let handle = cache .get(timeline_id, ShardSelector::Page(key), &mgr) .await .expect("we have the timeline"); assert!(Weak::ptr_eq(&handle.myself, &shard0.myself)); // SHUTDOWN shard0.per_timeline_state.shutdown(); // keeping handle alive across shutdown assert_eq!( cache.map.len(), 1, "this is an implementation detail but worth pointing out: we can't clear the cache from shutdown(), it's cleared on first access after" ); // this handle is perfectly usable handle.getpage(); cache .get(timeline_id, ShardSelector::Page(key), &mgr) .await .err() .expect("documented behavior: can't get new handle after shutdown, even if there is an alive Handle"); assert_eq!( cache.map.len(), 0, "first access after shutdown cleans up the Weak's from the cache" ); tokio::select! { _ = shard0.gate.close() => { panic!("handle is keeping gate open"); } _ = tokio::time::sleep(FOREVER) => { } } drop(handle); // closing gate succeeds after dropping handle tokio::select! { _ = shard0.gate.close() => { } _ = tokio::time::sleep(FOREVER) => { panic!("handle is dropped, no other gate holders exist") } } // map gets cleaned on next lookup cache
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/delete.rs
pageserver/src/tenant/timeline/delete.rs
use std::ops::{Deref, DerefMut}; use std::sync::Arc; use anyhow::Context; use pageserver_api::models::TimelineState; use pageserver_api::shard::TenantShardId; use remote_storage::DownloadError; use tokio::sync::OwnedMutexGuard; use tracing::{Instrument, error, info, info_span, instrument}; use utils::id::TimelineId; use utils::{crashsafe, fs_ext, pausable_failpoint}; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::task_mgr::{self, TaskKind}; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::remote_timeline_client::{ PersistIndexPartWithDeletedFlagError, RemoteTimelineClient, }; use crate::tenant::{ CreateTimelineCause, DeleteTimelineError, MaybeDeletedIndexPart, TenantManifestError, TenantShard, Timeline, TimelineOrOffloaded, }; use crate::virtual_file::MaybeFatalIo; /// Mark timeline as deleted in S3 so we won't pick it up next time /// during attach or pageserver restart. /// See comment in persist_index_part_with_deleted_flag. async fn set_deleted_in_remote_index( remote_client: &Arc<RemoteTimelineClient>, ) -> Result<(), DeleteTimelineError> { let res = remote_client.persist_index_part_with_deleted_flag().await; match res { // If we (now, or already) marked it successfully as deleted, we can proceed Ok(()) | Err(PersistIndexPartWithDeletedFlagError::AlreadyDeleted(_)) => (), // Bail out otherwise // // AlreadyInProgress shouldn't happen, because the 'delete_lock' prevents // two tasks from performing the deletion at the same time. The first task // that starts deletion should run it to completion. Err(e @ PersistIndexPartWithDeletedFlagError::AlreadyInProgress(_)) | Err(e @ PersistIndexPartWithDeletedFlagError::Other(_)) => { return Err(DeleteTimelineError::Other(anyhow::anyhow!(e))); } } Ok(()) } /// Grab the compaction and gc locks, and actually perform the deletion. /// /// The locks prevent GC or compaction from running at the same time. The background tasks do not /// register themselves with the timeline it's operating on, so it might still be running even /// though we called `shutdown_tasks`. /// /// Note that there are still other race conditions between /// GC, compaction and timeline deletion. See /// <https://github.com/neondatabase/neon/issues/2671> /// /// No timeout here, GC & Compaction should be responsive to the /// `TimelineState::Stopping` change. // pub(super): documentation link pub(super) async fn delete_local_timeline_directory( conf: &PageServerConf, tenant_shard_id: TenantShardId, timeline: &Timeline, ) { // Always ensure the lock order is compaction -> gc. let compaction_lock = timeline.compaction_lock.lock(); let _compaction_lock = crate::timed( compaction_lock, "acquires compaction lock", std::time::Duration::from_secs(5), ) .await; let gc_lock = timeline.gc_lock.lock(); let _gc_lock = crate::timed( gc_lock, "acquires gc lock", std::time::Duration::from_secs(5), ) .await; // NB: storage_sync upload tasks that reference these layers have been cancelled // by the caller. let local_timeline_directory = conf.timeline_path(&tenant_shard_id, &timeline.timeline_id); // NB: This need not be atomic because the deleted flag in the IndexPart // will be observed during tenant/timeline load. The deletion will be resumed there. // // ErrorKind::NotFound can happen e.g. if we race with tenant detach, because, // no locks are shared. tokio::fs::remove_dir_all(local_timeline_directory) .await .or_else(fs_ext::ignore_not_found) .fatal_err("removing timeline directory"); // Make sure previous deletions are ordered before mark removal. // Otherwise there is no guarantee that they reach the disk before mark deletion. // So its possible for mark to reach disk first and for other deletions // to be reordered later and thus missed if a crash occurs. // Note that we dont need to sync after mark file is removed // because we can tolerate the case when mark file reappears on startup. let timeline_path = conf.timelines_path(&tenant_shard_id); crashsafe::fsync_async(timeline_path) .await .fatal_err("fsync after removing timeline directory"); info!("finished deleting layer files, releasing locks"); } /// It is important that this gets called when DeletionGuard is being held. /// For more context see comments in [`make_timeline_delete_guard`] async fn remove_maybe_offloaded_timeline_from_tenant( tenant: &TenantShard, timeline: &TimelineOrOffloaded, _: &DeletionGuard, // using it as a witness ) -> anyhow::Result<()> { // Remove the timeline from the map. // This observes the locking order between timelines and timelines_offloaded let mut timelines = tenant.timelines.lock().unwrap(); let mut timelines_offloaded = tenant.timelines_offloaded.lock().unwrap(); let mut timelines_importing = tenant.timelines_importing.lock().unwrap(); let offloaded_children_exist = timelines_offloaded .iter() .any(|(_, entry)| entry.ancestor_timeline_id == Some(timeline.timeline_id())); let children_exist = timelines .iter() .any(|(_, entry)| entry.get_ancestor_timeline_id() == Some(timeline.timeline_id())); // XXX this can happen because of race conditions with branch creation. // We already deleted the remote layer files, so it's probably best to panic. if children_exist || offloaded_children_exist { panic!("Timeline grew children while we removed layer files"); } match timeline { TimelineOrOffloaded::Timeline(timeline) => { timelines.remove(&timeline.timeline_id).expect( "timeline that we were deleting was concurrently removed from 'timelines' map", ); tenant .scheduled_compaction_tasks .lock() .unwrap() .remove(&timeline.timeline_id); } TimelineOrOffloaded::Offloaded(timeline) => { let offloaded_timeline = timelines_offloaded .remove(&timeline.timeline_id) .expect("timeline that we were deleting was concurrently removed from 'timelines_offloaded' map"); offloaded_timeline.delete_from_ancestor_with_timelines(&timelines); } TimelineOrOffloaded::Importing(importing) => { timelines_importing.remove(&importing.timeline.timeline_id); } } drop(timelines_importing); drop(timelines_offloaded); drop(timelines); Ok(()) } /// Orchestrates timeline shut down of all timeline tasks, removes its in-memory structures, /// and deletes its data from both disk and s3. /// The sequence of steps: /// 1. Set deleted_at in remote index part. /// 2. Create local mark file. /// 3. Delete local files except metadata (it is simpler this way, to be able to reuse timeline initialization code that expects metadata) /// 4. Delete remote layers /// 5. Delete index part /// 6. Delete meta, timeline directory /// 7. Delete mark file /// /// It is resumable from any step in case a crash/restart occurs. /// There are two entrypoints to the process: /// 1. [`DeleteTimelineFlow::run`] this is the main one called by a management api handler. /// 2. [`DeleteTimelineFlow::resume_deletion`] is called during restarts when local metadata is still present /// and we possibly neeed to continue deletion of remote files. /// /// Note the only other place that messes around timeline delete mark is the logic that scans directory with timelines during tenant load. #[derive(Default)] pub enum DeleteTimelineFlow { #[default] NotStarted, InProgress, Finished, } impl DeleteTimelineFlow { // These steps are run in the context of management api request handler. // Long running steps are continued to run in the background. // NB: If this fails half-way through, and is retried, the retry will go through // all the same steps again. Make sure the code here is idempotent, and don't // error out if some of the shutdown tasks have already been completed! #[instrument(skip_all)] pub async fn run( tenant: &Arc<TenantShard>, timeline_id: TimelineId, ) -> Result<(), DeleteTimelineError> { super::debug_assert_current_span_has_tenant_and_timeline_id(); let (timeline, mut guard) = make_timeline_delete_guard(tenant, timeline_id, TimelineDeleteGuardKind::Delete)?; guard.mark_in_progress()?; // Now that the Timeline is in Stopping state, request all the related tasks to shut down. // TODO(vlad): shut down imported timeline here match &timeline { TimelineOrOffloaded::Timeline(timeline) => { timeline.shutdown(super::ShutdownMode::Hard).await; } TimelineOrOffloaded::Importing(importing) => { importing.shutdown().await; } TimelineOrOffloaded::Offloaded(_offloaded) => { // Nothing to shut down in this case } } tenant.gc_block.before_delete(&timeline.timeline_id()); fail::fail_point!("timeline-delete-before-index-deleted-at", |_| { Err(anyhow::anyhow!( "failpoint: timeline-delete-before-index-deleted-at" ))? }); let remote_client = match timeline.maybe_remote_client() { Some(remote_client) => remote_client, None => { let remote_client = tenant .build_timeline_client(timeline.timeline_id(), tenant.remote_storage.clone()); let result = match remote_client .download_index_file(&tenant.cancel) .instrument(info_span!("download_index_file")) .await { Ok(r) => r, Err(DownloadError::NotFound) => { // Deletion is already complete. // As we came here, we will need to remove the timeline from the tenant though. tracing::info!("Timeline already deleted in remote storage"); if let TimelineOrOffloaded::Offloaded(_) = &timeline { // We only supoprt this for offloaded timelines, as we don't know which state non-offloaded timelines are in. tracing::info!( "Timeline with gone index part is offloaded timeline. Removing from tenant." ); remove_maybe_offloaded_timeline_from_tenant(tenant, &timeline, &guard) .await?; } return Ok(()); } Err(e) => { return Err(DeleteTimelineError::Other(anyhow::anyhow!( "error: {:?}", e ))); } }; let index_part = match result { MaybeDeletedIndexPart::Deleted(p) => { tracing::info!("Timeline already set as deleted in remote index"); p } MaybeDeletedIndexPart::IndexPart(p) => p, }; let remote_client = Arc::new(remote_client); remote_client .init_upload_queue(&index_part) .map_err(DeleteTimelineError::Other)?; remote_client.shutdown().await; remote_client } }; set_deleted_in_remote_index(&remote_client).await?; fail::fail_point!("timeline-delete-before-schedule", |_| { Err(anyhow::anyhow!( "failpoint: timeline-delete-before-schedule" ))? }); Self::schedule_background( guard, tenant.conf, Arc::clone(tenant), timeline, remote_client, ); Ok(()) } fn mark_in_progress(&mut self) -> anyhow::Result<()> { match self { Self::Finished => anyhow::bail!("Bug. Is in finished state"), Self::InProgress { .. } => { /* We're in a retry */ } Self::NotStarted => { /* Fresh start */ } } *self = Self::InProgress; Ok(()) } /// Shortcut to create Timeline in stopping state and spawn deletion task. #[instrument(skip_all, fields(%timeline_id))] pub(crate) async fn resume_deletion( tenant: Arc<TenantShard>, timeline_id: TimelineId, local_metadata: &TimelineMetadata, remote_client: RemoteTimelineClient, ctx: &RequestContext, ) -> anyhow::Result<()> { // Note: here we even skip populating layer map. Timeline is essentially uninitialized. // RemoteTimelineClient is the only functioning part. let (timeline, _timeline_ctx) = tenant .create_timeline_struct( timeline_id, local_metadata, None, // Ancestor is not needed for deletion. None, // Previous heatmap is not needed for deletion tenant.get_timeline_resources_for(remote_client), // Important. We dont pass ancestor above because it can be missing. // Thus we need to skip the validation here. CreateTimelineCause::Delete, crate::tenant::CreateTimelineIdempotency::FailWithConflict, // doesn't matter what we put here None, // doesn't matter what we put here None, // doesn't matter what we put here None, // doesn't matter what we put here ctx, ) .context("create_timeline_struct")?; let mut guard = DeletionGuard( Arc::clone(&timeline.delete_progress) .try_lock_owned() .expect("cannot happen because we're the only owner"), ); // We meed to do this because when console retries delete request we shouldnt answer with 404 // because 404 means successful deletion. { let mut locked = tenant.timelines.lock().unwrap(); locked.insert(timeline_id, Arc::clone(&timeline)); } guard.mark_in_progress()?; let remote_client = timeline.remote_client.clone(); let timeline = TimelineOrOffloaded::Timeline(timeline); Self::schedule_background(guard, tenant.conf, tenant, timeline, remote_client); Ok(()) } fn schedule_background( guard: DeletionGuard, conf: &'static PageServerConf, tenant: Arc<TenantShard>, timeline: TimelineOrOffloaded, remote_client: Arc<RemoteTimelineClient>, ) { let tenant_shard_id = timeline.tenant_shard_id(); let timeline_id = timeline.timeline_id(); // Take a tenant gate guard, because timeline deletion needs access to the tenant to update its manifest. let Ok(tenant_guard) = tenant.gate.enter() else { // It is safe to simply skip here, because we only schedule background work once the timeline is durably marked for deletion. info!("Tenant is shutting down, timeline deletion will be resumed when it next starts"); return; }; task_mgr::spawn( task_mgr::BACKGROUND_RUNTIME.handle(), TaskKind::TimelineDeletionWorker, tenant_shard_id, Some(timeline_id), "timeline_delete", async move { let _guard = tenant_guard; if let Err(err) = Self::background(guard, conf, &tenant, &timeline, remote_client).await { // Only log as an error if it's not a cancellation. if matches!(err, DeleteTimelineError::Cancelled) { info!("Shutdown during timeline deletion"); }else { error!("Error: {err:#}"); } if let TimelineOrOffloaded::Timeline(timeline) = timeline { timeline.set_broken(format!("{err:#}")) } }; Ok(()) } .instrument(tracing::info_span!(parent: None, "delete_timeline", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),timeline_id=%timeline_id)), ); } async fn background( mut guard: DeletionGuard, conf: &PageServerConf, tenant: &TenantShard, timeline: &TimelineOrOffloaded, remote_client: Arc<RemoteTimelineClient>, ) -> Result<(), DeleteTimelineError> { fail::fail_point!("timeline-delete-before-rm", |_| { Err(anyhow::anyhow!("failpoint: timeline-delete-before-rm"))? }); match timeline { TimelineOrOffloaded::Timeline(timeline) => { delete_local_timeline_directory(conf, tenant.tenant_shard_id, timeline).await; } TimelineOrOffloaded::Importing(importing) => { delete_local_timeline_directory(conf, tenant.tenant_shard_id, &importing.timeline) .await; } TimelineOrOffloaded::Offloaded(_offloaded) => { // Offloaded timelines have no local state // TODO: once we persist offloaded information, delete the timeline from there, too } } fail::fail_point!("timeline-delete-after-rm", |_| { Err(anyhow::anyhow!("failpoint: timeline-delete-after-rm"))? }); remote_client.delete_all().await?; pausable_failpoint!("in_progress_delete"); remove_maybe_offloaded_timeline_from_tenant(tenant, timeline, &guard).await?; // This is susceptible to race conditions, i.e. we won't continue deletions if there is a crash // between the deletion of the index-part.json and reaching of this code. // So indeed, the tenant manifest might refer to an offloaded timeline which has already been deleted. // However, we handle this case in tenant loading code so the next time we attach, the issue is // resolved. tenant .maybe_upload_tenant_manifest() .await .map_err(|err| match err { TenantManifestError::Cancelled => DeleteTimelineError::Cancelled, err => DeleteTimelineError::Other(err.into()), })?; *guard = Self::Finished; Ok(()) } pub(crate) fn is_not_started(&self) -> bool { matches!(self, Self::NotStarted) } } #[derive(Copy, Clone, PartialEq, Eq)] pub(super) enum TimelineDeleteGuardKind { Offload, Delete, } pub(super) fn make_timeline_delete_guard( tenant: &TenantShard, timeline_id: TimelineId, guard_kind: TimelineDeleteGuardKind, ) -> Result<(TimelineOrOffloaded, DeletionGuard), DeleteTimelineError> { // Note the interaction between this guard and deletion guard. // Here we attempt to lock deletion guard when we're holding a lock on timelines. // This is important because when you take into account `remove_timeline_from_tenant` // we remove timeline from memory when we still hold the deletion guard. // So here when timeline deletion is finished timeline wont be present in timelines map at all // which makes the following sequence impossible: // T1: get preempted right before the try_lock on `Timeline::delete_progress` // T2: do a full deletion, acquire and drop `Timeline::delete_progress` // T1: acquire deletion lock, do another `DeleteTimelineFlow::run` // For more context see this discussion: `https://github.com/neondatabase/neon/pull/4552#discussion_r1253437346` let timelines = tenant.timelines.lock().unwrap(); let timelines_offloaded = tenant.timelines_offloaded.lock().unwrap(); let timelines_importing = tenant.timelines_importing.lock().unwrap(); let timeline = match timelines.get(&timeline_id) { Some(t) => TimelineOrOffloaded::Timeline(Arc::clone(t)), None => match timelines_offloaded.get(&timeline_id) { Some(t) => TimelineOrOffloaded::Offloaded(Arc::clone(t)), None => match timelines_importing.get(&timeline_id) { Some(t) => TimelineOrOffloaded::Importing(Arc::clone(t)), None => return Err(DeleteTimelineError::NotFound), }, }, }; // Ensure that there are no child timelines, because we are about to remove files, // which will break child branches let mut children = Vec::new(); if guard_kind == TimelineDeleteGuardKind::Delete { children.extend(timelines_offloaded.iter().filter_map(|(id, entry)| { (entry.ancestor_timeline_id == Some(timeline_id)).then_some(*id) })); } children.extend(timelines.iter().filter_map(|(id, entry)| { (entry.get_ancestor_timeline_id() == Some(timeline_id)).then_some(*id) })); if !children.is_empty() { return Err(DeleteTimelineError::HasChildren(children)); } // Note that using try_lock here is important to avoid a deadlock. // Here we take lock on timelines and then the deletion guard. // At the end of the operation we're holding the guard and need to lock timelines map // to remove the timeline from it. // Always if you have two locks that are taken in different order this can result in a deadlock. let delete_progress = Arc::clone(timeline.delete_progress()); let delete_lock_guard = match delete_progress.try_lock_owned() { Ok(guard) => DeletionGuard(guard), Err(_) => { // Unfortunately if lock fails arc is consumed. return Err(DeleteTimelineError::AlreadyInProgress(Arc::clone( timeline.delete_progress(), ))); } }; if guard_kind == TimelineDeleteGuardKind::Delete { if let TimelineOrOffloaded::Timeline(timeline) = &timeline { timeline.set_state(TimelineState::Stopping); } } Ok((timeline, delete_lock_guard)) } pub(super) struct DeletionGuard(OwnedMutexGuard<DeleteTimelineFlow>); impl Deref for DeletionGuard { type Target = DeleteTimelineFlow; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for DeletionGuard { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs
pageserver/src/tenant/timeline/walreceiver/connection_manager.rs
//! WAL receiver logic that ensures the pageserver gets connectected to safekeeper, //! that contains the latest WAL to stream and this connection does not go stale. //! //! To achieve that, a storage broker is used: safekepers propagate their timelines' state in it, //! the manager subscribes for changes and accumulates those to query the one with the biggest Lsn for connection. //! Current connection state is tracked too, to ensure it's not getting stale. //! //! After every connection or storage broker update fetched, the state gets updated correspondingly and rechecked for the new conneciton leader, //! then a (re)connection happens, if necessary. //! Only WAL streaming task expects to be finished, other loops (storage broker, connection management) never exit unless cancelled explicitly via the dedicated channel. use std::collections::HashMap; use std::num::NonZeroU64; use std::ops::ControlFlow; use std::sync::Arc; use std::time::Duration; use anyhow::Context; use chrono::{NaiveDateTime, Utc}; use pageserver_api::models::TimelineState; use postgres_connection::PgConnectionConfig; use storage_broker::proto::{ FilterTenantTimelineId, MessageType, SafekeeperDiscoveryRequest, SafekeeperDiscoveryResponse, SubscribeByFilterRequest, TenantTimelineId as ProtoTenantTimelineId, TypeSubscription, TypedMessage, }; use storage_broker::{BrokerClientChannel, Code, Streaming}; use tokio_util::sync::CancellationToken; use tracing::*; use utils::backoff::{ DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, exponential_backoff, }; use utils::id::{NodeId, TenantTimelineId}; use utils::lsn::Lsn; use utils::postgres_client::{ConnectionConfigArgs, wal_stream_connection_config}; use super::walreceiver_connection::{WalConnectionStatus, WalReceiverError}; use super::{TaskEvent, TaskHandle, TaskStateUpdate, WalReceiverConf}; use crate::context::{DownloadBehavior, RequestContext}; use crate::metrics::{ WALRECEIVER_ACTIVE_MANAGERS, WALRECEIVER_BROKER_UPDATES, WALRECEIVER_CANDIDATES_ADDED, WALRECEIVER_CANDIDATES_REMOVED, WALRECEIVER_SWITCHES, }; use crate::task_mgr::TaskKind; use crate::tenant::{Timeline, debug_assert_current_span_has_tenant_and_timeline_id}; pub(crate) struct Cancelled; /// Attempts to subscribe for timeline updates, pushed by safekeepers into the broker. /// Based on the updates, desides whether to start, keep or stop a WAL receiver task. /// If storage broker subscription is cancelled, exits. /// /// # Cancel-Safety /// /// Not cancellation-safe. Use `cancel` token to request cancellation. pub(super) async fn connection_manager_loop_step( broker_client: &mut BrokerClientChannel, connection_manager_state: &mut ConnectionManagerState, ctx: &RequestContext, cancel: &CancellationToken, manager_status: &std::sync::RwLock<Option<ConnectionManagerStatus>>, ) -> Result<(), Cancelled> { match tokio::select! { _ = cancel.cancelled() => { return Err(Cancelled); }, st = connection_manager_state.timeline.wait_to_become_active(ctx) => { st } } { Ok(()) => {} Err(new_state) => { debug!( ?new_state, "state changed, stopping wal connection manager loop" ); return Err(Cancelled); } } WALRECEIVER_ACTIVE_MANAGERS.inc(); scopeguard::defer! { WALRECEIVER_ACTIVE_MANAGERS.dec(); } let id = TenantTimelineId { tenant_id: connection_manager_state.timeline.tenant_shard_id.tenant_id, timeline_id: connection_manager_state.timeline.timeline_id, }; let mut timeline_state_updates = connection_manager_state .timeline .subscribe_for_state_updates(); let mut wait_lsn_status = connection_manager_state .timeline .subscribe_for_wait_lsn_updates(); // TODO: create a separate config option for discovery request interval let discovery_request_interval = connection_manager_state.conf.lagging_wal_timeout; let mut last_discovery_ts: Option<std::time::Instant> = None; // Subscribe to the broker updates. Stream shares underlying TCP connection // with other streams on this client (other connection managers). When // object goes out of scope, stream finishes in drop() automatically. let mut broker_subscription = subscribe_for_timeline_updates(broker_client, id, cancel).await?; let mut broker_reset_interval = tokio::time::interval(tokio::time::Duration::from_secs(30)); debug!("Subscribed for broker timeline updates"); loop { let time_until_next_retry = connection_manager_state.time_until_next_retry(); let any_activity = connection_manager_state.wal_connection.is_some() || !connection_manager_state.wal_stream_candidates.is_empty(); // These things are happening concurrently: // // - cancellation request // - keep receiving WAL on the current connection // - if the shared state says we need to change connection, disconnect and return // - this runs in a separate task and we receive updates via a watch channel // - change connection if the rules decide so, or if the current connection dies // - receive updates from broker // - this might change the current desired connection // - timeline state changes to something that does not allow walreceiver to run concurrently // - if there's no connection and no candidates, try to send a discovery request // NB: make sure each of the select expressions are cancellation-safe // (no need for arms to be cancellation-safe). tokio::select! { _ = cancel.cancelled() => { return Err(Cancelled); } Some(wal_connection_update) = async { match connection_manager_state.wal_connection.as_mut() { Some(wal_connection) => Some(wal_connection.connection_task.next_task_event().await), None => None, } } => { let wal_connection = connection_manager_state.wal_connection.as_mut() .expect("Should have a connection, as checked by the corresponding select! guard"); match wal_connection_update { TaskEvent::Update(TaskStateUpdate::Started) => {}, TaskEvent::Update(TaskStateUpdate::Progress(new_status)) => { if new_status.has_processed_wal { // We have advanced last_record_lsn by processing the WAL received // from this safekeeper. This is good enough to clean unsuccessful // retries history and allow reconnecting to this safekeeper without // sleeping for a long time. connection_manager_state.wal_connection_retries.remove(&wal_connection.sk_id); } wal_connection.status = new_status; } TaskEvent::End(walreceiver_task_result) => { match walreceiver_task_result { Ok(()) => debug!("WAL receiving task finished"), Err(e) => error!("wal receiver task finished with an error: {e:?}"), } connection_manager_state.drop_old_connection(false).await; }, } }, // Got a new update from the broker broker_update = broker_subscription.message() /* TODO: review cancellation-safety */ => { match broker_update { Ok(Some(broker_update)) => { broker_reset_interval.reset(); connection_manager_state.register_timeline_update(broker_update); }, Err(status) => { match status.code() { Code::Unknown if status.message().contains("stream closed because of a broken pipe") || status.message().contains("connection reset") || status.message().contains("error reading a body from connection") => { // tonic's error handling doesn't provide a clear code for disconnections: we get // "h2 protocol error: error reading a body from connection: stream closed because of a broken pipe" // => https://github.com/neondatabase/neon/issues/9562 info!("broker disconnected: {status}"); }, _ => { warn!("broker subscription failed: {status}"); } } return Ok(()); } Ok(None) => { error!("broker subscription stream ended"); // can't happen return Ok(()); } } }, // If we've not received any updates from the broker from a while, are waiting for WAL // and have no safekeeper connection or connection candidates, then it might be that // the broker subscription is wedged. Drop the current subscription and re-subscribe // with the goal of unblocking it. _ = broker_reset_interval.tick() => { let awaiting_lsn = wait_lsn_status.borrow().is_some(); let no_candidates = connection_manager_state.wal_stream_candidates.is_empty(); let no_connection = connection_manager_state.wal_connection.is_none(); if awaiting_lsn && no_candidates && no_connection { tracing::info!("No broker updates received for a while, but waiting for WAL. Re-setting stream ..."); broker_subscription = subscribe_for_timeline_updates(broker_client, id, cancel).await?; } }, new_event = async { // Reminder: this match arm needs to be cancellation-safe. loop { if connection_manager_state.timeline.current_state() == TimelineState::Loading { warn!("wal connection manager should only be launched after timeline has become active"); } match timeline_state_updates.changed().await { Ok(()) => { let new_state = connection_manager_state.timeline.current_state(); match new_state { // we're already active as walreceiver, no need to reactivate TimelineState::Active => continue, TimelineState::Broken { .. } | TimelineState::Stopping => { debug!("timeline entered terminal state {new_state:?}, stopping wal connection manager loop"); return ControlFlow::Break(()); } TimelineState::Loading => { warn!("timeline transitioned back to Loading state, that should not happen"); return ControlFlow::Continue(()); } } } Err(_sender_dropped_error) => return ControlFlow::Break(()), } } } => match new_event { ControlFlow::Continue(()) => { return Ok(()); } ControlFlow::Break(()) => { debug!("Timeline is no longer active, stopping wal connection manager loop"); return Err(Cancelled); } }, Some(()) = async { match time_until_next_retry { Some(sleep_time) => { tokio::time::sleep(sleep_time).await; Some(()) }, None => { debug!("No candidates to retry, waiting indefinitely for the broker events"); None } } } => debug!("Waking up for the next retry after waiting for {time_until_next_retry:?}"), Some(()) = async { // Reminder: this match arm needs to be cancellation-safe. // Calculating time needed to wait until sending the next discovery request. // Current implementation is conservative and sends discovery requests only when there are no candidates. if any_activity { // No need to send discovery requests if there is an active connection or candidates. return None; } // Waiting for an active wait_lsn request. while wait_lsn_status.borrow().is_none() { if wait_lsn_status.changed().await.is_err() { // wait_lsn_status channel was closed, exiting warn!("wait_lsn_status channel was closed in connection_manager_loop_step"); return None; } } // All preconditions met, preparing to send a discovery request. let now = std::time::Instant::now(); let next_discovery_ts = last_discovery_ts .map(|ts| ts + discovery_request_interval) .unwrap_or_else(|| now); if next_discovery_ts > now { // Prevent sending discovery requests too frequently. tokio::time::sleep(next_discovery_ts - now).await; } let tenant_timeline_id = Some(ProtoTenantTimelineId { tenant_id: id.tenant_id.as_ref().to_owned(), timeline_id: id.timeline_id.as_ref().to_owned(), }); let request = SafekeeperDiscoveryRequest { tenant_timeline_id }; let msg = TypedMessage { r#type: MessageType::SafekeeperDiscoveryRequest as i32, safekeeper_timeline_info: None, safekeeper_discovery_request: Some(request), safekeeper_discovery_response: None, }; last_discovery_ts = Some(std::time::Instant::now()); info!("No active connection and no candidates, sending discovery request to the broker"); // Cancellation safety: we want to send a message to the broker, but publish_one() // function can get cancelled by the other select! arm. This is absolutely fine, because // we just want to receive broker updates and discovery is not important if we already // receive updates. // // It is possible that `last_discovery_ts` will be updated, but the message will not be sent. // This is totally fine because of the reason above. // This is a fire-and-forget request, we don't care about the response let _ = broker_client.publish_one(msg).await; debug!("Discovery request sent to the broker"); None } => {} } if let Some(new_candidate) = connection_manager_state.next_connection_candidate() { info!("Switching to new connection candidate: {new_candidate:?}"); connection_manager_state .change_connection(new_candidate, ctx) .await } *manager_status.write().unwrap() = Some(connection_manager_state.manager_status()); } } /// Endlessly try to subscribe for broker updates for a given timeline. async fn subscribe_for_timeline_updates( broker_client: &mut BrokerClientChannel, id: TenantTimelineId, cancel: &CancellationToken, ) -> Result<Streaming<TypedMessage>, Cancelled> { let mut attempt = 0; loop { exponential_backoff( attempt, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, cancel, ) .await; attempt += 1; // subscribe to the specific timeline let request = SubscribeByFilterRequest { types: vec![ TypeSubscription { r#type: MessageType::SafekeeperTimelineInfo as i32, }, TypeSubscription { r#type: MessageType::SafekeeperDiscoveryResponse as i32, }, ], tenant_timeline_id: Some(FilterTenantTimelineId { enabled: true, tenant_timeline_id: Some(ProtoTenantTimelineId { tenant_id: id.tenant_id.as_ref().to_owned(), timeline_id: id.timeline_id.as_ref().to_owned(), }), }), }; match { tokio::select! { r = broker_client.subscribe_by_filter(request) => { r } _ = cancel.cancelled() => { return Err(Cancelled); } } } { Ok(resp) => { return Ok(resp.into_inner()); } Err(e) => { // Safekeeper nodes can stop pushing timeline updates to the broker, when no new writes happen and // entire WAL is streamed. Keep this noticeable with logging, but do not warn/error. info!( "Attempt #{attempt}, failed to subscribe for timeline {id} updates in broker: {e:#}" ); continue; } } } } const WALCONNECTION_RETRY_MIN_BACKOFF_SECONDS: f64 = 0.1; const WALCONNECTION_RETRY_MAX_BACKOFF_SECONDS: f64 = 15.0; const WALCONNECTION_RETRY_BACKOFF_MULTIPLIER: f64 = 1.5; /// All data that's needed to run endless broker loop and keep the WAL streaming connection alive, if possible. pub(super) struct ConnectionManagerState { id: TenantTimelineId, /// Use pageserver data about the timeline to filter out some of the safekeepers. timeline: Arc<Timeline>, /// Child token of [`super::WalReceiver::cancel`], inherited to all tasks we spawn. cancel: CancellationToken, conf: WalReceiverConf, /// Current connection to safekeeper for WAL streaming. wal_connection: Option<WalConnection>, /// Info about retries and unsuccessful attempts to connect to safekeepers. wal_connection_retries: HashMap<NodeId, RetryInfo>, /// Data about all timelines, available for connection, fetched from storage broker, grouped by their corresponding safekeeper node id. wal_stream_candidates: HashMap<NodeId, BrokerSkTimeline>, } /// An information about connection manager's current connection and connection candidates. #[derive(Debug, Clone)] pub struct ConnectionManagerStatus { existing_connection: Option<WalConnectionStatus>, wal_stream_candidates: HashMap<NodeId, BrokerSkTimeline>, } impl ConnectionManagerStatus { /// Generates a string, describing current connection status in a form, suitable for logging. pub fn to_human_readable_string(&self) -> String { let mut resulting_string = String::new(); match &self.existing_connection { Some(connection) => { if connection.has_processed_wal { resulting_string.push_str(&format!( " (update {}): streaming WAL from node {}, ", connection.latest_wal_update.format("%Y-%m-%d %H:%M:%S"), connection.node, )); match (connection.streaming_lsn, connection.commit_lsn) { (None, None) => resulting_string.push_str("no streaming data"), (None, Some(commit_lsn)) => { resulting_string.push_str(&format!("commit Lsn: {commit_lsn}")) } (Some(streaming_lsn), None) => { resulting_string.push_str(&format!("streaming Lsn: {streaming_lsn}")) } (Some(streaming_lsn), Some(commit_lsn)) => resulting_string.push_str( &format!("commit|streaming Lsn: {commit_lsn}|{streaming_lsn}"), ), } } else if connection.is_connected { resulting_string.push_str(&format!( " (update {}): connecting to node {}", connection .latest_connection_update .format("%Y-%m-%d %H:%M:%S"), connection.node, )); } else { resulting_string.push_str(&format!( " (update {}): initializing node {} connection", connection .latest_connection_update .format("%Y-%m-%d %H:%M:%S"), connection.node, )); } } None => resulting_string.push_str(": disconnected"), } resulting_string.push_str(", safekeeper candidates (id|update_time|commit_lsn): ["); let mut candidates = self.wal_stream_candidates.iter().peekable(); while let Some((node_id, candidate_info)) = candidates.next() { resulting_string.push_str(&format!( "({}|{}|{})", node_id, candidate_info.latest_update.format("%H:%M:%S"), Lsn(candidate_info.timeline.commit_lsn) )); if candidates.peek().is_some() { resulting_string.push_str(", "); } } resulting_string.push(']'); resulting_string } } /// Current connection data. #[derive(Debug)] struct WalConnection { /// Time when the connection was initiated. started_at: NaiveDateTime, /// Current safekeeper pageserver is connected to for WAL streaming. sk_id: NodeId, /// Availability zone of the safekeeper. availability_zone: Option<String>, /// Status of the connection. status: WalConnectionStatus, /// WAL streaming task handle. connection_task: TaskHandle<WalConnectionStatus>, /// Have we discovered that other safekeeper has more recent WAL than we do? discovered_new_wal: Option<NewCommittedWAL>, } /// Notion of a new committed WAL, which exists on other safekeeper. #[derive(Debug, Clone, Copy)] struct NewCommittedWAL { /// LSN of the new committed WAL. lsn: Lsn, /// When we discovered that the new committed WAL exists on other safekeeper. discovered_at: NaiveDateTime, } #[derive(Debug, Clone, Copy)] struct RetryInfo { next_retry_at: Option<NaiveDateTime>, retry_duration_seconds: f64, } /// Data about the timeline to connect to, received from the broker. #[derive(Debug, Clone)] struct BrokerSkTimeline { timeline: SafekeeperDiscoveryResponse, /// Time at which the data was fetched from the broker last time, to track the stale data. latest_update: NaiveDateTime, } impl ConnectionManagerState { pub(super) fn new( timeline: Arc<Timeline>, conf: WalReceiverConf, cancel: CancellationToken, ) -> Self { let id = TenantTimelineId { tenant_id: timeline.tenant_shard_id.tenant_id, timeline_id: timeline.timeline_id, }; Self { id, timeline, cancel, conf, wal_connection: None, wal_stream_candidates: HashMap::new(), wal_connection_retries: HashMap::new(), } } fn spawn<Fut>( &self, task: impl FnOnce( tokio::sync::watch::Sender<TaskStateUpdate<WalConnectionStatus>>, CancellationToken, ) -> Fut + Send + 'static, ) -> TaskHandle<WalConnectionStatus> where Fut: std::future::Future<Output = anyhow::Result<()>> + Send, { // TODO: get rid of TaskHandle super::TaskHandle::spawn(&self.cancel, task) } /// Shuts down the current connection (if any) and immediately starts another one with the given connection string. async fn change_connection(&mut self, new_sk: NewWalConnectionCandidate, ctx: &RequestContext) { WALRECEIVER_SWITCHES .with_label_values(&[new_sk.reason.name()]) .inc(); self.drop_old_connection(true).await; let node_id = new_sk.safekeeper_id; let connect_timeout = self.conf.wal_connect_timeout; let ingest_batch_size = self.conf.ingest_batch_size; let protocol = self.conf.protocol; let validate_wal_contiguity = self.conf.validate_wal_contiguity; let timeline = Arc::clone(&self.timeline); let ctx = ctx.detached_child( TaskKind::WalReceiverConnectionHandler, DownloadBehavior::Download, ); let span = info_span!("connection", %node_id); let connection_handle = self.spawn(move |events_sender, cancellation| { async move { debug_assert_current_span_has_tenant_and_timeline_id(); let res = super::walreceiver_connection::handle_walreceiver_connection( timeline, protocol, new_sk.wal_source_connconf, events_sender, cancellation.clone(), connect_timeout, ctx, node_id, ingest_batch_size, validate_wal_contiguity, ) .await; match res { Ok(()) => Ok(()), Err(e) => { match e { WalReceiverError::SuccessfulCompletion(msg) => { info!("walreceiver connection handling ended with success: {msg}"); Ok(()) } WalReceiverError::ExpectedSafekeeperError(e) => { info!("walreceiver connection handling ended: {e}"); Ok(()) } WalReceiverError::ClosedGate => { info!( "walreceiver connection handling ended because of closed gate" ); Ok(()) } WalReceiverError::Cancelled => Ok(()), WalReceiverError::Other(e) => { // give out an error to have task_mgr give it a really verbose logging if cancellation.is_cancelled() { // Ideally we would learn about this via some path other than Other, but // that requires refactoring all the intermediate layers of ingest code // that only emit anyhow::Error Ok(()) } else { Err(e).context("walreceiver connection handling failure") } } } } } } .instrument(span) }); let now = Utc::now().naive_utc(); self.wal_connection = Some(WalConnection { started_at: now, sk_id: new_sk.safekeeper_id, availability_zone: new_sk.availability_zone, status: WalConnectionStatus { is_connected: false, has_processed_wal: false, latest_connection_update: now, latest_wal_update: now, streaming_lsn: None, commit_lsn: None, node: node_id, }, connection_task: connection_handle, discovered_new_wal: None, }); } /// Drops the current connection (if any) and updates retry timeout for the next /// connection attempt to the same safekeeper. /// /// # Cancel-Safety /// /// Not cancellation-safe. async fn drop_old_connection(&mut self, needs_shutdown: bool) { let wal_connection = match self.wal_connection.take() { Some(wal_connection) => wal_connection, None => return, }; if needs_shutdown { wal_connection .connection_task .shutdown() // This here is why this function isn't cancellation-safe. // If we got cancelled here, then self.wal_connection is already None and we lose track of the task. // Even if our caller diligently calls Self::shutdown(), it will find a self.wal_connection=None // and thus be ineffective. .await; } let retry = self .wal_connection_retries .entry(wal_connection.sk_id) .or_insert(RetryInfo { next_retry_at: None, retry_duration_seconds: WALCONNECTION_RETRY_MIN_BACKOFF_SECONDS, }); let now = Utc::now().naive_utc(); // Schedule the next retry attempt. We want to have exponential backoff for connection attempts, // and we add backoff to the time when we started the connection attempt. If the connection // was active for a long time, then next_retry_at will be in the past. retry.next_retry_at = wal_connection .started_at .checked_add_signed(chrono::Duration::milliseconds( (retry.retry_duration_seconds * 1000.0) as i64, )); if let Some(next) = &retry.next_retry_at { if next > &now { info!( "Next connection retry to {:?} is at {}", wal_connection.sk_id, next ); } } let next_retry_duration = retry.retry_duration_seconds * WALCONNECTION_RETRY_BACKOFF_MULTIPLIER; // Clamp the next retry duration to the maximum allowed. let next_retry_duration = next_retry_duration.min(WALCONNECTION_RETRY_MAX_BACKOFF_SECONDS); // Clamp the next retry duration to the minimum allowed. let next_retry_duration = next_retry_duration.max(WALCONNECTION_RETRY_MIN_BACKOFF_SECONDS); retry.retry_duration_seconds = next_retry_duration; } /// Returns time needed to wait to have a new candidate for WAL streaming. fn time_until_next_retry(&self) -> Option<Duration> { let now = Utc::now().naive_utc(); let next_retry_at = self .wal_connection_retries .values() .filter_map(|retry| retry.next_retry_at) .filter(|next_retry_at| next_retry_at > &now) .min()?; (next_retry_at - now).to_std().ok() } /// Adds another broker timeline into the state, if its more recent than the one already added there for the same key. fn register_timeline_update(&mut self, typed_msg: TypedMessage) { let mut is_discovery = false; let timeline_update = match typed_msg.r#type() { MessageType::SafekeeperTimelineInfo => { let info = match typed_msg.safekeeper_timeline_info { Some(info) => info, None => { warn!("bad proto message from broker: no safekeeper_timeline_info"); return; } }; SafekeeperDiscoveryResponse { safekeeper_id: info.safekeeper_id, tenant_timeline_id: info.tenant_timeline_id, commit_lsn: info.commit_lsn, safekeeper_connstr: info.safekeeper_connstr, availability_zone: info.availability_zone, standby_horizon: info.standby_horizon, } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs
pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs
//! Actual Postgres connection handler to stream WAL to the server. use std::error::Error; use std::pin::pin; use std::str::FromStr; use std::sync::Arc; use std::time::{Duration, SystemTime}; use anyhow::{Context, anyhow}; use bytes::BytesMut; use chrono::{NaiveDateTime, Utc}; use fail::fail_point; use futures::StreamExt; use postgres_backend::is_expected_io_error; use postgres_connection::PgConnectionConfig; use postgres_ffi::WAL_SEGMENT_SIZE; use postgres_ffi::v14::xlog_utils::normalize_lsn; use postgres_ffi::waldecoder::WalDecodeError; use postgres_protocol::message::backend::ReplicationMessage; use postgres_types::PgLsn; use tokio::sync::watch; use tokio::{select, time}; use tokio_postgres::error::SqlState; use tokio_postgres::replication::ReplicationStream; use tokio_postgres::{Client, SimpleQueryMessage, SimpleQueryRow}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, error, info, trace, warn}; use utils::critical_timeline; use utils::id::NodeId; use utils::lsn::Lsn; use utils::pageserver_feedback::PageserverFeedback; use utils::postgres_client::PostgresClientProtocol; use utils::sync::gate::GateError; use wal_decoder::models::{FlushUncommittedRecords, InterpretedWalRecords}; use wal_decoder::wire_format::FromWireFormat; use super::TaskStateUpdate; use crate::context::RequestContext; use crate::metrics::{LIVE_CONNECTIONS, WAL_INGEST, WALRECEIVER_STARTED_CONNECTIONS}; use crate::pgdatadir_mapping::DatadirModification; use crate::task_mgr::{TaskKind, WALRECEIVER_RUNTIME}; use crate::tenant::{ Timeline, WalReceiverInfo, debug_assert_current_span_has_tenant_and_timeline_id, }; use crate::walingest::WalIngest; /// Status of the connection. #[derive(Debug, Clone, Copy)] pub(super) struct WalConnectionStatus { /// If we were able to initiate a postgres connection, this means that safekeeper process is at least running. pub is_connected: bool, /// Defines a healthy connection as one on which pageserver received WAL from safekeeper /// and is able to process it in walingest without errors. pub has_processed_wal: bool, /// Connection establishment time or the timestamp of a latest connection message received. pub latest_connection_update: NaiveDateTime, /// Time of the latest WAL message received. pub latest_wal_update: NaiveDateTime, /// Latest WAL update contained WAL up to this LSN. Next WAL message with start from that LSN. pub streaming_lsn: Option<Lsn>, /// Latest commit_lsn received from the safekeeper. Can be zero if no message has been received yet. pub commit_lsn: Option<Lsn>, /// The node it is connected to pub node: NodeId, } pub(super) enum WalReceiverError { /// An error of a type that does not indicate an issue, e.g. a connection closing ExpectedSafekeeperError(tokio_postgres::Error), /// An "error" message that carries a SUCCESSFUL_COMPLETION status code. Carries /// the message part of the original postgres error SuccessfulCompletion(String), /// Generic error Other(anyhow::Error), ClosedGate, Cancelled, } impl From<tokio_postgres::Error> for WalReceiverError { fn from(err: tokio_postgres::Error) -> Self { if let Some(dberror) = err.as_db_error().filter(|db_error| { db_error.code() == &SqlState::SUCCESSFUL_COMPLETION && db_error.message().contains("ending streaming") }) { // Strip the outer DbError, which carries a misleading "error" severity Self::SuccessfulCompletion(dberror.message().to_string()) } else if err.is_closed() || err .source() .and_then(|source| source.downcast_ref::<std::io::Error>()) .map(is_expected_io_error) .unwrap_or(false) { Self::ExpectedSafekeeperError(err) } else { Self::Other(anyhow::Error::new(err)) } } } impl From<anyhow::Error> for WalReceiverError { fn from(err: anyhow::Error) -> Self { Self::Other(err) } } impl From<WalDecodeError> for WalReceiverError { fn from(err: WalDecodeError) -> Self { Self::Other(anyhow::Error::new(err)) } } /// Open a connection to the given safekeeper and receive WAL, sending back progress /// messages as we go. #[allow(clippy::too_many_arguments)] pub(super) async fn handle_walreceiver_connection( timeline: Arc<Timeline>, protocol: PostgresClientProtocol, wal_source_connconf: PgConnectionConfig, events_sender: watch::Sender<TaskStateUpdate<WalConnectionStatus>>, cancellation: CancellationToken, connect_timeout: Duration, ctx: RequestContext, safekeeper_node: NodeId, ingest_batch_size: u64, validate_wal_contiguity: bool, ) -> Result<(), WalReceiverError> { debug_assert_current_span_has_tenant_and_timeline_id(); // prevent timeline shutdown from finishing until we have exited let _guard = timeline.gate.enter().map_err(|e| match e { GateError::GateClosed => WalReceiverError::ClosedGate, })?; // This function spawns a side-car task (WalReceiverConnectionPoller). // Get its gate guard now as well. let poller_guard = timeline.gate.enter().map_err(|e| match e { GateError::GateClosed => WalReceiverError::ClosedGate, })?; WALRECEIVER_STARTED_CONNECTIONS.inc(); // Connect to the database in replication mode. info!("connecting to {wal_source_connconf:?}"); let (replication_client, connection) = { let mut config = wal_source_connconf.to_tokio_postgres_config(); config.application_name(format!("pageserver-{}", timeline.conf.id.0).as_str()); config.replication_mode(tokio_postgres::config::ReplicationMode::Physical); match time::timeout(connect_timeout, config.connect(tokio_postgres::NoTls)).await { Ok(client_and_conn) => client_and_conn?, Err(_elapsed) => { // Timing out to connect to a safekeeper node could happen long time, due to // many reasons that pageserver cannot control. // Do not produce an error, but make it visible, that timeouts happen by logging the `event. info!( "Timed out while waiting {connect_timeout:?} for walreceiver connection to open" ); return Ok(()); } } }; debug!("connected!"); let mut connection_status = WalConnectionStatus { is_connected: true, has_processed_wal: false, latest_connection_update: Utc::now().naive_utc(), latest_wal_update: Utc::now().naive_utc(), streaming_lsn: None, commit_lsn: None, node: safekeeper_node, }; if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) { warn!( "Wal connection event listener dropped right after connection init, aborting the connection: {e}" ); return Ok(()); } // The connection object performs the actual communication with the database, // so spawn it off to run on its own. It shouldn't outlive this function, but, // due to lack of async drop, we can't enforce that. However, we ensure that // 1. it is sensitive to `cancellation` and // 2. holds the Timeline gate open so that after timeline shutdown, // we know this task is gone. let _connection_ctx = ctx.detached_child( TaskKind::WalReceiverConnectionPoller, ctx.download_behavior(), ); let connection_cancellation = cancellation.clone(); WALRECEIVER_RUNTIME.spawn( async move { debug_assert_current_span_has_tenant_and_timeline_id(); select! { connection_result = connection => match connection_result { Ok(()) => debug!("Walreceiver db connection closed"), Err(connection_error) => { match WalReceiverError::from(connection_error) { WalReceiverError::ExpectedSafekeeperError(_) => { // silence, because most likely we've already exited the outer call // with a similar error. }, WalReceiverError::SuccessfulCompletion(_) => {} WalReceiverError::Cancelled => { debug!("Connection cancelled") } WalReceiverError::ClosedGate => { // doesn't happen at runtime } WalReceiverError::Other(err) => { warn!("Connection aborted: {err:#}") } } } }, _ = connection_cancellation.cancelled() => debug!("Connection cancelled"), } drop(poller_guard); } // Enrich the log lines emitted by this closure with meaningful context. // TODO: technically, this task outlives the surrounding function, so, the // spans won't be properly nested. .instrument(tracing::info_span!("poller")), ); let _guard = LIVE_CONNECTIONS .with_label_values(&["wal_receiver"]) .guard(); let identify = identify_system(&replication_client).await?; info!("{identify:?}"); let end_of_wal = Lsn::from(u64::from(identify.xlogpos)); let mut caught_up = false; connection_status.latest_connection_update = Utc::now().naive_utc(); connection_status.latest_wal_update = Utc::now().naive_utc(); connection_status.commit_lsn = Some(end_of_wal); if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) { warn!( "Wal connection event listener dropped after IDENTIFY_SYSTEM, aborting the connection: {e}" ); return Ok(()); } // // Start streaming the WAL, from where we left off previously. // // If we had previously received WAL up to some point in the middle of a WAL record, we // better start from the end of last full WAL record, not in the middle of one. let mut last_rec_lsn = timeline.get_last_record_lsn(); let mut startpoint = last_rec_lsn; if startpoint == Lsn(0) { return Err(WalReceiverError::Other(anyhow!("No previous WAL position"))); } // There might be some padding after the last full record, skip it. startpoint += startpoint.calc_padding(8u32); // If the starting point is at a WAL page boundary, skip past the page header. We don't need the page headers // for anything, and in some corner cases, the compute node might have never generated the WAL for page headers //. That happens if you create a branch at page boundary: the start point of the branch is at the page boundary, // but when the compute node first starts on the branch, we normalize the first REDO position to just after the page // header (see generate_pg_control()), so the WAL for the page header is never streamed from the compute node // to the safekeepers. startpoint = normalize_lsn(startpoint, WAL_SEGMENT_SIZE); info!( "last_record_lsn {last_rec_lsn} starting replication from {startpoint}, safekeeper is at {end_of_wal}..." ); let query = format!("START_REPLICATION PHYSICAL {startpoint}"); let copy_stream = replication_client.copy_both_simple(&query).await?; let mut physical_stream = pin!(ReplicationStream::new(copy_stream)); let mut walingest = WalIngest::new(timeline.as_ref(), startpoint, &ctx) .await .map_err(|e| match e.kind { crate::walingest::WalIngestErrorKind::Cancelled => WalReceiverError::Cancelled, _ => WalReceiverError::Other(e.into()), })?; let (format, compression) = match protocol { PostgresClientProtocol::Interpreted { format, compression, } => (format, compression), PostgresClientProtocol::Vanilla => { return Err(WalReceiverError::Other(anyhow!( "Vanilla WAL receiver protocol is no longer supported for ingest" ))); } }; let mut expected_wal_start = startpoint; while let Some(replication_message) = { select! { biased; _ = cancellation.cancelled() => { debug!("walreceiver interrupted"); None } replication_message = physical_stream.next() => replication_message, } } { let replication_message = replication_message?; let now = Utc::now().naive_utc(); let last_rec_lsn_before_msg = last_rec_lsn; // Update the connection status before processing the message. If the message processing // fails (e.g. in walingest), we still want to know latests LSNs from the safekeeper. match &replication_message { ReplicationMessage::PrimaryKeepAlive(keepalive) => { connection_status.latest_connection_update = now; connection_status.commit_lsn = Some(Lsn::from(keepalive.wal_end())); } ReplicationMessage::RawInterpretedWalRecords(raw) => { connection_status.latest_connection_update = now; if !raw.data().is_empty() { connection_status.latest_wal_update = now; } connection_status.commit_lsn = Some(Lsn::from(raw.commit_lsn())); connection_status.streaming_lsn = Some(Lsn::from(raw.streaming_lsn())); } &_ => {} }; if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) { warn!("Wal connection event listener dropped, aborting the connection: {e}"); return Ok(()); } let status_update = match replication_message { ReplicationMessage::RawInterpretedWalRecords(raw) => { WAL_INGEST.bytes_received.inc_by(raw.data().len() as u64); let mut uncommitted_records = 0; // This is the end LSN of the raw WAL from which the records // were interpreted. let streaming_lsn = Lsn::from(raw.streaming_lsn()); let batch = InterpretedWalRecords::from_wire(raw.data(), format, compression) .await .with_context(|| { anyhow::anyhow!( "Failed to deserialize interpreted records ending at LSN {streaming_lsn}" ) })?; // Guard against WAL gaps. If the start LSN of the PG WAL section // from which the interpreted records were extracted, doesn't match // the end of the previous batch (or the starting point for the first batch), // then kill this WAL receiver connection and start a new one. if validate_wal_contiguity { if let Some(raw_wal_start_lsn) = batch.raw_wal_start_lsn { match raw_wal_start_lsn.cmp(&expected_wal_start) { std::cmp::Ordering::Greater => { let msg = format!( "Gap in streamed WAL: [{expected_wal_start}, {raw_wal_start_lsn}" ); critical_timeline!( timeline.tenant_shard_id, timeline.timeline_id, Some(&timeline.corruption_detected), "{msg}" ); return Err(WalReceiverError::Other(anyhow!(msg))); } std::cmp::Ordering::Less => { // Other shards are reading WAL behind us. // This is valid, but check that we received records // that we haven't seen before. if let Some(first_rec) = batch.records.first() { if first_rec.next_record_lsn < last_rec_lsn { let msg = format!( "Received record with next_record_lsn multiple times ({} < {})", first_rec.next_record_lsn, expected_wal_start ); critical_timeline!( timeline.tenant_shard_id, timeline.timeline_id, Some(&timeline.corruption_detected), "{msg}" ); return Err(WalReceiverError::Other(anyhow!(msg))); } } } std::cmp::Ordering::Equal => {} } } } let InterpretedWalRecords { records, next_record_lsn, raw_wal_start_lsn: _, } = batch; tracing::debug!( "Received WAL up to {} with next_record_lsn={}", streaming_lsn, next_record_lsn ); // We start the modification at 0 because each interpreted record // advances it to its end LSN. 0 is just an initialization placeholder. let mut modification = timeline.begin_modification(Lsn(0)); async fn commit( modification: &mut DatadirModification<'_>, ctx: &RequestContext, uncommitted: &mut u64, ) -> anyhow::Result<()> { let stats = modification.stats(); modification.commit(ctx).await?; WAL_INGEST.records_committed.inc_by(*uncommitted); WAL_INGEST.inc_values_committed(&stats); *uncommitted = 0; Ok(()) } if !records.is_empty() { timeline .metrics .wal_records_received .inc_by(records.len() as u64); } for interpreted in records { if matches!(interpreted.flush_uncommitted, FlushUncommittedRecords::Yes) && uncommitted_records > 0 { commit(&mut modification, &ctx, &mut uncommitted_records).await?; } let local_next_record_lsn = interpreted.next_record_lsn; if interpreted.is_observed() { WAL_INGEST.records_observed.inc(); } walingest .ingest_record(interpreted, &mut modification, &ctx) .await .with_context(|| { format!("could not ingest record at {local_next_record_lsn}") }) .inspect_err(|err| { // TODO: we can't differentiate cancellation errors with // anyhow::Error, so just ignore it if we're cancelled. if !cancellation.is_cancelled() && !timeline.is_stopping() { critical_timeline!( timeline.tenant_shard_id, timeline.timeline_id, Some(&timeline.corruption_detected), "{err:?}" ); } })?; uncommitted_records += 1; // FIXME: this cannot be made pausable_failpoint without fixing the // failpoint library; in tests, the added amount of debugging will cause us // to timeout the tests. fail_point!("walreceiver-after-ingest"); // Commit every ingest_batch_size records. Even if we filtered out // all records, we still need to call commit to advance the LSN. if uncommitted_records >= ingest_batch_size || modification.approx_pending_bytes() > DatadirModification::MAX_PENDING_BYTES { commit(&mut modification, &ctx, &mut uncommitted_records).await?; } } // Records might have been filtered out on the safekeeper side, but we still // need to advance last record LSN on all shards. If we've not ingested the latest // record, then set the LSN of the modification past it. This way all shards // advance their last record LSN at the same time. let needs_last_record_lsn_advance = if next_record_lsn > modification.get_lsn() { modification.set_lsn(next_record_lsn).unwrap(); true } else { false }; if uncommitted_records > 0 || needs_last_record_lsn_advance { // Commit any uncommitted records commit(&mut modification, &ctx, &mut uncommitted_records).await?; } if !caught_up && streaming_lsn >= end_of_wal { info!("caught up at LSN {streaming_lsn}"); caught_up = true; } tracing::debug!( "Ingested WAL up to {streaming_lsn}. Last record LSN is {}", timeline.get_last_record_lsn() ); last_rec_lsn = next_record_lsn; expected_wal_start = streaming_lsn; Some(streaming_lsn) } ReplicationMessage::PrimaryKeepAlive(keepalive) => { let wal_end = keepalive.wal_end(); let timestamp = keepalive.timestamp(); let reply_requested = keepalive.reply() != 0; trace!( "received PrimaryKeepAlive(wal_end: {wal_end}, timestamp: {timestamp:?} reply: {reply_requested})" ); if reply_requested { Some(last_rec_lsn) } else { None } } _ => None, }; if !connection_status.has_processed_wal && last_rec_lsn > last_rec_lsn_before_msg { // We have successfully processed at least one WAL record. connection_status.has_processed_wal = true; if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) { warn!("Wal connection event listener dropped, aborting the connection: {e}"); return Ok(()); } } if let Some(last_lsn) = status_update { let timeline_remote_consistent_lsn = timeline .get_remote_consistent_lsn_visible() .unwrap_or(Lsn(0)); // The last LSN we processed. It is not guaranteed to survive pageserver crash. let last_received_lsn = last_lsn; // `disk_consistent_lsn` is the LSN at which page server guarantees local persistence of all received data let disk_consistent_lsn = timeline.get_disk_consistent_lsn(); // The last LSN that is synced to remote storage and is guaranteed to survive pageserver crash // Used by safekeepers to remove WAL preceding `remote_consistent_lsn`. let remote_consistent_lsn = timeline_remote_consistent_lsn; let ts = SystemTime::now(); // Update the status about what we just received. This is shown in the mgmt API. let last_received_wal = WalReceiverInfo { wal_source_connconf: wal_source_connconf.clone(), last_received_msg_lsn: last_lsn, last_received_msg_ts: ts .duration_since(SystemTime::UNIX_EPOCH) .expect("Received message time should be before UNIX EPOCH!") .as_micros(), }; *timeline.last_received_wal.lock().unwrap() = Some(last_received_wal); // Send the replication feedback message. // Regular standby_status_update fields are put into this message. let current_timeline_size = if timeline.tenant_shard_id.is_shard_zero() { timeline .get_current_logical_size( crate::tenant::timeline::GetLogicalSizePriority::User, &ctx, ) // FIXME: https://github.com/neondatabase/neon/issues/5963 .size_dont_care_about_accuracy() } else { // Non-zero shards send zero for logical size. The safekeeper will ignore // this number. This is because in a sharded tenant, only shard zero maintains // accurate logical size. 0 }; let status_update = PageserverFeedback { current_timeline_size, last_received_lsn, disk_consistent_lsn, remote_consistent_lsn, replytime: ts, shard_number: timeline.tenant_shard_id.shard_number.0 as u32, corruption_detected: timeline .corruption_detected .load(std::sync::atomic::Ordering::Relaxed), }; debug!("neon_status_update {status_update:?}"); let mut data = BytesMut::new(); status_update.serialize(&mut data); physical_stream .as_mut() .zenith_status_update(data.len() as u64, &data) .await?; } } Ok(()) } /// Data returned from the postgres `IDENTIFY_SYSTEM` command /// /// See the [postgres docs] for more details. /// /// [postgres docs]: https://www.postgresql.org/docs/current/protocol-replication.html #[derive(Debug)] // As of nightly 2021-09-11, fields that are only read by the type's `Debug` impl still count as // unused. Relevant issue: https://github.com/rust-lang/rust/issues/88900 #[allow(dead_code)] struct IdentifySystem { systemid: u64, timeline: u32, xlogpos: PgLsn, dbname: Option<String>, } /// There was a problem parsing the response to /// a postgres IDENTIFY_SYSTEM command. #[derive(Debug, thiserror::Error)] #[error("IDENTIFY_SYSTEM parse error")] struct IdentifyError; /// Run the postgres `IDENTIFY_SYSTEM` command async fn identify_system(client: &Client) -> anyhow::Result<IdentifySystem> { let query_str = "IDENTIFY_SYSTEM"; let response = client.simple_query(query_str).await?; // get(N) from row, then parse it as some destination type. fn get_parse<T>(row: &SimpleQueryRow, idx: usize) -> Result<T, IdentifyError> where T: FromStr, { let val = row.get(idx).ok_or(IdentifyError)?; val.parse::<T>().or(Err(IdentifyError)) } // extract the row contents into an IdentifySystem struct. // written as a closure so I can use ? for Option here. if let Some(SimpleQueryMessage::Row(first_row)) = response.first() { Ok(IdentifySystem { systemid: get_parse(first_row, 0)?, timeline: get_parse(first_row, 1)?, xlogpos: get_parse(first_row, 2)?, dbname: get_parse(first_row, 3).ok(), }) } else { Err(IdentifyError.into()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs
pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs
use std::ops::Bound; use std::sync::Arc; use anyhow::Context; use bytes::Bytes; use postgres_ffi::{ControlFileData, PgMajorVersion}; use remote_storage::{ Download, DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, Listing, ListingObject, RemotePath, RemoteStorageConfig, }; use serde::de::DeserializeOwned; use tokio_util::sync::CancellationToken; use tracing::{debug, info, instrument}; use utils::lsn::Lsn; use super::index_part_format; use crate::assert_u64_eq_usize::U64IsUsize; use crate::config::PageServerConf; pub async fn new( conf: &'static PageServerConf, location: &index_part_format::Location, cancel: CancellationToken, ) -> Result<RemoteStorageWrapper, anyhow::Error> { // Downloads should be reasonably sized. We do ranged reads for relblock raw data // and full reads for SLRU segments which are bounded by Postgres. let timeout = RemoteStorageConfig::DEFAULT_TIMEOUT; let location_storage = match location { #[cfg(feature = "testing")] index_part_format::Location::LocalFs { path } => { GenericRemoteStorage::LocalFs(remote_storage::LocalFs::new(path.clone(), timeout)?) } index_part_format::Location::AwsS3 { region, bucket, key, } => { // TODO: think about security implications of letting the client specify the bucket & prefix. // It's the most flexible right now, but, possibly we want to move bucket name into PS conf // and force the timeline_id into the prefix? GenericRemoteStorage::AwsS3(Arc::new( remote_storage::S3Bucket::new( &remote_storage::S3Config { bucket_name: bucket.clone(), prefix_in_bucket: Some(key.clone()), bucket_region: region.clone(), endpoint: conf .import_pgdata_aws_endpoint_url .clone() .map(|url| url.to_string()), // by specifying None here, remote_storage/aws-sdk-rust will infer from env // This matches the default import job concurrency. This is managed // separately from the usual S3 client, but the concern here is bandwidth // usage. concurrency_limit: 128.try_into().unwrap(), max_keys_per_list_response: Some(1000), upload_storage_class: None, // irrelevant }, timeout, ) .await .context("setup s3 bucket")?, )) } }; let storage_wrapper = RemoteStorageWrapper::new(location_storage, cancel); Ok(storage_wrapper) } /// Wrap [`remote_storage`] APIs to make it look a bit more like a filesystem API /// such as [`tokio::fs`], which was used in the original implementation of the import code. #[derive(Clone)] pub struct RemoteStorageWrapper { storage: GenericRemoteStorage, cancel: CancellationToken, } impl RemoteStorageWrapper { pub fn new(storage: GenericRemoteStorage, cancel: CancellationToken) -> Self { Self { storage, cancel } } #[instrument(level = tracing::Level::DEBUG, skip_all, fields(%path))] pub async fn listfilesindir( &self, path: &RemotePath, ) -> Result<Vec<(RemotePath, usize)>, DownloadError> { assert!( path.object_name().is_some(), "must specify dirname, without trailing slash" ); let path = path.add_trailing_slash(); let res = crate::tenant::remote_timeline_client::download::download_retry_forever( || async { let Listing { keys, prefixes: _ } = self .storage .list( Some(&path), remote_storage::ListingMode::WithDelimiter, None, &self.cancel, ) .await?; let res = keys .into_iter() .map(|ListingObject { key, size, .. }| (key, size.into_usize())) .collect(); Ok(res) }, &format!("listfilesindir {path:?}"), &self.cancel, ) .await; debug!(?res, "returning"); res } #[instrument(level = tracing::Level::DEBUG, skip_all, fields(%path))] pub async fn listdir(&self, path: &RemotePath) -> Result<Vec<RemotePath>, DownloadError> { assert!( path.object_name().is_some(), "must specify dirname, without trailing slash" ); let path = path.add_trailing_slash(); let res = crate::tenant::remote_timeline_client::download::download_retry_forever( || async { let Listing { keys, prefixes } = self .storage .list( Some(&path), remote_storage::ListingMode::WithDelimiter, None, &self.cancel, ) .await?; let res = keys .into_iter() .map(|ListingObject { key, .. }| key) .chain(prefixes.into_iter()) .collect(); Ok(res) }, &format!("listdir {path:?}"), &self.cancel, ) .await; debug!(?res, "returning"); res } #[instrument(level = tracing::Level::DEBUG, skip_all, fields(%path))] pub async fn get(&self, path: &RemotePath) -> Result<Bytes, DownloadError> { let res = crate::tenant::remote_timeline_client::download::download_retry_forever( || async { let Download { download_stream, .. } = self .storage .download(path, &DownloadOpts::default(), &self.cancel) .await?; let mut reader = tokio_util::io::StreamReader::new(download_stream); // XXX optimize this, can we get the capacity hint from somewhere? let mut buf = Vec::new(); tokio::io::copy_buf(&mut reader, &mut buf).await?; Ok(Bytes::from(buf)) }, &format!("download {path:?}"), &self.cancel, ) .await; debug!(len = res.as_ref().ok().map(|buf| buf.len()), "done"); res } #[instrument(level = tracing::Level::DEBUG, skip_all, fields(%path))] pub async fn get_json<T: DeserializeOwned>( &self, path: &RemotePath, ) -> Result<Option<T>, DownloadError> { let buf = match self.get(path).await { Ok(buf) => buf, Err(DownloadError::NotFound) => return Ok(None), Err(err) => return Err(err), }; let res = serde_json::from_slice(&buf) .context("serialize") // TODO: own error type .map_err(DownloadError::Other)?; Ok(Some(res)) } #[instrument(level = tracing::Level::DEBUG, skip_all, fields(%path))] pub async fn get_range( &self, path: &RemotePath, start_inclusive: u64, end_exclusive: u64, ) -> Result<Vec<u8>, DownloadError> { let len = end_exclusive .checked_sub(start_inclusive) .unwrap() .into_usize(); let res = crate::tenant::remote_timeline_client::download::download_retry_forever( || async { let Download { download_stream, .. } = self .storage .download( path, &DownloadOpts { kind: DownloadKind::Large, etag: None, byte_start: Bound::Included(start_inclusive), byte_end: Bound::Excluded(end_exclusive), version_id: None, }, &self.cancel) .await?; let mut reader = tokio_util::io::StreamReader::new(download_stream); let mut buf = Vec::with_capacity(len); tokio::io::copy_buf(&mut reader, &mut buf).await?; Ok(buf) }, &format!("download range len=0x{len:x} [0x{start_inclusive:x},0x{end_exclusive:x}) from {path:?}"), &self.cancel, ) .await; debug!(len = res.as_ref().ok().map(|buf| buf.len()), "done"); res } pub fn pgdata(&self) -> RemotePath { RemotePath::from_string("pgdata").unwrap() } pub async fn get_control_file(&self) -> Result<ControlFile, anyhow::Error> { let control_file_path = self.pgdata().join("global/pg_control"); info!("get control file from {control_file_path}"); let control_file_buf = self.get(&control_file_path).await?; ControlFile::new(control_file_buf) } } pub struct ControlFile { control_file_data: ControlFileData, control_file_buf: Bytes, } impl ControlFile { pub(crate) fn new(control_file_buf: Bytes) -> Result<Self, anyhow::Error> { // XXX ControlFileData is version-specific, we're always using v14 here. v17 had changes. let control_file_data = ControlFileData::decode(&control_file_buf)?; let control_file = ControlFile { control_file_data, control_file_buf, }; control_file.try_pg_version()?; // so that we can offer infallible pg_version() Ok(control_file) } pub(crate) fn base_lsn(&self) -> Lsn { Lsn(self.control_file_data.checkPoint).align() } pub(crate) fn pg_version(&self) -> PgMajorVersion { self.try_pg_version() .expect("prepare() checks that try_pg_version doesn't error") } pub(crate) fn control_file_data(&self) -> &ControlFileData { &self.control_file_data } pub(crate) fn control_file_buf(&self) -> &Bytes { &self.control_file_buf } fn try_pg_version(&self) -> anyhow::Result<PgMajorVersion> { Ok(match self.control_file_data.catalog_version_no { // thesea are from catversion.h 202107181 => PgMajorVersion::PG14, 202209061 => PgMajorVersion::PG15, 202307071 => PgMajorVersion::PG16, 202406281 => PgMajorVersion::PG17, catversion => { anyhow::bail!("unrecognized catalog version {catversion}") } }) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/import_pgdata/flow.rs
pageserver/src/tenant/timeline/import_pgdata/flow.rs
//! Import a PGDATA directory into an empty root timeline. //! //! This module is adapted hackathon code by Heikki and Stas. //! Other code in the parent module was written by Christian as part of a customer PoC. //! //! The hackathon code was producing image layer files as a free-standing program. //! //! It has been modified to //! - run inside a running Pageserver, within the proper lifecycles of Timeline -> Tenant(Shard) //! - => sharding-awareness: produce image layers with only the data relevant for this shard //! - => S3 as the source for the PGDATA instead of local filesystem //! //! TODOs before productionization: //! - ChunkProcessingJob should cut up an ImportJob to hit exactly target image layer size. //! //! An incomplete set of TODOs from the Hackathon: //! - version-specific CheckPointData (=> pgv abstraction, already exists for regular walingest) use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::num::NonZeroUsize; use std::ops::Range; use std::sync::Arc; use anyhow::ensure; use bytes::Bytes; use futures::stream::FuturesOrdered; use itertools::Itertools; use pageserver_api::config::TimelineImportConfig; use pageserver_api::key::{ CHECKPOINT_KEY, CONTROLFILE_KEY, DBDIR_KEY, Key, TWOPHASEDIR_KEY, rel_block_to_key, rel_dir_to_key, rel_size_to_key, relmap_file_key, slru_block_to_key, slru_dir_to_key, slru_segment_size_to_key, }; use pageserver_api::keyspace::{ShardedRange, singleton_range}; use pageserver_api::models::{ShardImportProgress, ShardImportProgressV1, ShardImportStatus}; use pageserver_api::reltag::{RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; use postgres_ffi::BLCKSZ; use postgres_ffi::relfile_utils::parse_relfilename; use remote_storage::RemotePath; use tokio::sync::Semaphore; use tokio_stream::StreamExt; use tracing::{debug, instrument}; use utils::bin_ser::BeSer; use utils::lsn::Lsn; use utils::pausable_failpoint; use super::Timeline; use super::importbucket_client::{ControlFile, RemoteStorageWrapper}; use crate::assert_u64_eq_usize::UsizeIsU64; use crate::context::{DownloadBehavior, RequestContext}; use crate::controller_upcall_client::{StorageControllerUpcallApi, StorageControllerUpcallClient}; use crate::pgdatadir_mapping::{ DbDirectory, RelDirectory, SlruSegmentDirectory, TwoPhaseDirectory, }; use crate::task_mgr::TaskKind; use crate::tenant::storage_layer::{AsLayerDesc, ImageLayerWriter, Layer}; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; pub async fn run( timeline: Arc<Timeline>, control_file: ControlFile, storage: RemoteStorageWrapper, import_progress: Option<ShardImportProgress>, ctx: &RequestContext, ) -> anyhow::Result<()> { // Match how we run the import based on the progress version. // If there's no import progress, it means that this is a new import // and we can use whichever version we want. match import_progress { Some(ShardImportProgress::V1(progress)) => { run_v1(timeline, control_file, storage, Some(progress), ctx).await } None => run_v1(timeline, control_file, storage, None, ctx).await, } } async fn run_v1( timeline: Arc<Timeline>, control_file: ControlFile, storage: RemoteStorageWrapper, import_progress: Option<ShardImportProgressV1>, ctx: &RequestContext, ) -> anyhow::Result<()> { let planner = Planner { control_file, storage: storage.clone(), shard: timeline.shard_identity, tasks: Vec::default(), }; // Use the job size limit encoded in the progress if we are resuming an import. // This ensures that imports have stable plans even if the pageserver config changes. let import_config = { match &import_progress { Some(progress) => { let base = &timeline.conf.timeline_import_config; TimelineImportConfig { import_job_soft_size_limit: NonZeroUsize::new(progress.job_soft_size_limit) .unwrap(), import_job_concurrency: base.import_job_concurrency, import_job_checkpoint_threshold: base.import_job_checkpoint_threshold, import_job_max_byte_range_size: base.import_job_max_byte_range_size, } } None => timeline.conf.timeline_import_config.clone(), } }; let plan = planner.plan(&import_config).await?; // Hash the plan and compare with the hash of the plan we got back from the storage controller. // If the two match, it means that the planning stage had the same output. // // This is not intended to be a cryptographically secure hash. const SEED: u64 = 42; let mut hasher = twox_hash::XxHash64::with_seed(SEED); plan.hash(&mut hasher); let plan_hash = hasher.finish(); if let Some(progress) = &import_progress { // Handle collisions on jobs of unequal length if progress.jobs != plan.jobs.len() { anyhow::bail!("Import plan job length does not match storcon metadata") } if plan_hash != progress.import_plan_hash { anyhow::bail!("Import plan does not match storcon metadata"); } } pausable_failpoint!("import-timeline-pre-execute-pausable"); let jobs_count = import_progress.as_ref().map(|p| p.jobs); let start_from_job_idx = import_progress.map(|progress| progress.completed); tracing::info!( start_from_job_idx=?start_from_job_idx, jobs=?jobs_count, "Executing import plan" ); plan.execute(timeline, start_from_job_idx, plan_hash, &import_config, ctx) .await } struct Planner { control_file: ControlFile, storage: RemoteStorageWrapper, shard: ShardIdentity, tasks: Vec<AnyImportTask>, } #[derive(Hash)] struct Plan { jobs: Vec<ChunkProcessingJob>, // Included here such that it ends up in the hash for the plan shard: ShardIdentity, } impl Planner { /// Creates an import plan /// /// This function is and must remain pure: given the same input, it will generate the same import plan. async fn plan(mut self, import_config: &TimelineImportConfig) -> anyhow::Result<Plan> { let pgdata_lsn = Lsn(self.control_file.control_file_data().checkPoint).align(); anyhow::ensure!(pgdata_lsn.is_valid()); let datadir = PgDataDir::new(&self.storage).await?; // Import dbdir (00:00:00 keyspace) // This is just constructed here, but will be written to the image layer in the first call to import_db() let dbdir_buf = Bytes::from(DbDirectory::ser(&DbDirectory { dbdirs: datadir .dbs .iter() .map(|db| ((db.spcnode, db.dboid), true)) .collect(), })?); self.tasks .push(ImportSingleKeyTask::new(DBDIR_KEY, dbdir_buf).into()); // Import databases (00:spcnode:dbnode keyspace for each db) for db in datadir.dbs { self.import_db(&db).await?; } // Import SLRUs if self.shard.is_shard_zero() { // pg_xact (01:00 keyspace) self.import_slru(SlruKind::Clog, &self.storage.pgdata().join("pg_xact")) .await?; // pg_multixact/members (01:01 keyspace) self.import_slru( SlruKind::MultiXactMembers, &self.storage.pgdata().join("pg_multixact/members"), ) .await?; // pg_multixact/offsets (01:02 keyspace) self.import_slru( SlruKind::MultiXactOffsets, &self.storage.pgdata().join("pg_multixact/offsets"), ) .await?; } // Import pg_twophase. // TODO: as empty let twophasedir_buf = TwoPhaseDirectory::ser(&TwoPhaseDirectory { xids: HashSet::new(), })?; self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( TWOPHASEDIR_KEY, Bytes::from(twophasedir_buf), ))); // Controlfile, checkpoint self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( CONTROLFILE_KEY, self.control_file.control_file_buf().clone(), ))); let checkpoint_buf = self .control_file .control_file_data() .checkPointCopy .encode()?; self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( CHECKPOINT_KEY, checkpoint_buf, ))); // Sort the tasks by the key ranges they handle. // The plan being generated here needs to be stable across invocations // of this method. self.tasks.sort_by_key(|task| match task { AnyImportTask::SingleKey(key) => (key.key, key.key.next()), AnyImportTask::RelBlocks(rel_blocks) => { (rel_blocks.key_range.start, rel_blocks.key_range.end) } AnyImportTask::SlruBlocks(slru_blocks) => { (slru_blocks.key_range.start, slru_blocks.key_range.end) } }); // Assigns parts of key space to later parallel jobs // Note: The image layers produced here may have gaps, meaning, // there is not an image for each key in the layer's key range. // The read path stops traversal at the first image layer, regardless // of whether a base image has been found for a key or not. // (Concept of sparse image layers doesn't exist.) // This behavior is exactly right for the base image layers we're producing here. // But, since no other place in the code currently produces image layers with gaps, // it seems noteworthy. let mut last_end_key = Key::MIN; let mut current_chunk = Vec::new(); let mut current_chunk_size: usize = 0; let mut jobs = Vec::new(); for task in std::mem::take(&mut self.tasks).into_iter() { let task_size = task.total_size(&self.shard); let projected_chunk_size = current_chunk_size.saturating_add(task_size); if projected_chunk_size > import_config.import_job_soft_size_limit.into() { let key_range = last_end_key..task.key_range().start; jobs.push(ChunkProcessingJob::new( key_range.clone(), std::mem::take(&mut current_chunk), pgdata_lsn, )); last_end_key = key_range.end; current_chunk_size = 0; } current_chunk_size = current_chunk_size.saturating_add(task_size); current_chunk.push(task); } jobs.push(ChunkProcessingJob::new( last_end_key..Key::MAX, current_chunk, pgdata_lsn, )); Ok(Plan { jobs, shard: self.shard, }) } #[instrument(level = tracing::Level::DEBUG, skip_all, fields(dboid=%db.dboid, tablespace=%db.spcnode, path=%db.path))] async fn import_db(&mut self, db: &PgDataDirDb) -> anyhow::Result<()> { debug!("start"); scopeguard::defer! { debug!("return"); } // Import relmap (00:spcnode:dbnode:00:*:00) let relmap_key = relmap_file_key(db.spcnode, db.dboid); debug!("Constructing relmap entry, key {relmap_key}"); let relmap_path = db.path.join("pg_filenode.map"); let relmap_buf = self.storage.get(&relmap_path).await?; self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( relmap_key, relmap_buf, ))); // Import reldir (00:spcnode:dbnode:00:*:01) let reldir_key = rel_dir_to_key(db.spcnode, db.dboid); debug!("Constructing reldirs entry, key {reldir_key}"); let reldir_buf = RelDirectory::ser(&RelDirectory { rels: db .files .iter() .map(|f| (f.rel_tag.relnode, f.rel_tag.forknum)) .collect(), })?; self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( reldir_key, Bytes::from(reldir_buf), ))); // Import data (00:spcnode:dbnode:reloid:fork:blk) and set sizes for each last // segment in a given relation (00:spcnode:dbnode:reloid:fork:ff) for file in &db.files { debug!(%file.path, %file.filesize, "importing file"); let len = file.filesize; ensure!(len % 8192 == 0); let start_blk: u32 = file.segno * (1024 * 1024 * 1024 / 8192); let start_key = rel_block_to_key(file.rel_tag, start_blk); let end_key = rel_block_to_key(file.rel_tag, start_blk + (len / 8192) as u32); self.tasks .push(AnyImportTask::RelBlocks(ImportRelBlocksTask::new( self.shard, start_key..end_key, &file.path, self.storage.clone(), ))); // Set relsize for the last segment (00:spcnode:dbnode:reloid:fork:ff) if let Some(nblocks) = file.nblocks { let size_key = rel_size_to_key(file.rel_tag); //debug!("Setting relation size (path={path}, rel_tag={rel_tag}, segno={segno}) to {nblocks}, key {size_key}"); let buf = nblocks.to_le_bytes(); self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( size_key, Bytes::from(buf.to_vec()), ))); } } Ok(()) } async fn import_slru(&mut self, kind: SlruKind, path: &RemotePath) -> anyhow::Result<()> { assert!(self.shard.is_shard_zero()); let segments = self.storage.listfilesindir(path).await?; let segments: Vec<(String, u32, usize)> = segments .into_iter() .filter_map(|(path, size)| { let filename = path.object_name()?; let segno = u32::from_str_radix(filename, 16).ok()?; Some((filename.to_string(), segno, size)) }) .collect(); // Write SlruDir let slrudir_key = slru_dir_to_key(kind); let segnos: HashSet<u32> = segments .iter() .map(|(_path, segno, _size)| *segno) .collect(); let slrudir = SlruSegmentDirectory { segments: segnos }; let slrudir_buf = SlruSegmentDirectory::ser(&slrudir)?; self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( slrudir_key, Bytes::from(slrudir_buf), ))); for (segpath, segno, size) in segments { // SlruSegBlocks for each segment let p = path.join(&segpath); let file_size = size; ensure!(file_size % 8192 == 0); let nblocks = u32::try_from(file_size / 8192)?; let start_key = slru_block_to_key(kind, segno, 0); let end_key = slru_block_to_key(kind, segno, nblocks); debug!(%p, segno=%segno, %size, %start_key, %end_key, "scheduling SLRU segment"); self.tasks .push(AnyImportTask::SlruBlocks(ImportSlruBlocksTask::new( start_key..end_key, &p, self.storage.clone(), ))); // Followed by SlruSegSize let segsize_key = slru_segment_size_to_key(kind, segno); let segsize_buf = nblocks.to_le_bytes(); self.tasks .push(AnyImportTask::SingleKey(ImportSingleKeyTask::new( segsize_key, Bytes::copy_from_slice(&segsize_buf), ))); } Ok(()) } } impl Plan { async fn execute( self, timeline: Arc<Timeline>, start_after_job_idx: Option<usize>, import_plan_hash: u64, import_config: &TimelineImportConfig, ctx: &RequestContext, ) -> anyhow::Result<()> { let storcon_client = StorageControllerUpcallClient::new(timeline.conf, &timeline.cancel); let mut work = FuturesOrdered::new(); let semaphore = Arc::new(Semaphore::new(import_config.import_job_concurrency.into())); let jobs_in_plan = self.jobs.len(); let mut jobs = self .jobs .into_iter() .enumerate() .map(|(idx, job)| (idx + 1, job)) .filter(|(idx, _job)| { // Filter out any jobs that have been done already if let Some(start_after) = start_after_job_idx { *idx > start_after } else { true } }) .peekable(); let mut last_completed_job_idx = start_after_job_idx.unwrap_or(0); let checkpoint_every: usize = import_config.import_job_checkpoint_threshold.into(); let max_byte_range_size: usize = import_config.import_job_max_byte_range_size.into(); // Run import jobs concurrently up to the limit specified by the pageserver configuration. // Note that we process completed futures in the oreder of insertion. This will be the // building block for resuming imports across pageserver restarts or tenant migrations. while last_completed_job_idx < jobs_in_plan { tokio::select! { permit = semaphore.clone().acquire_owned(), if jobs.peek().is_some() => { let permit = permit.expect("never closed"); let (job_idx, job) = jobs.next().expect("we peeked"); let job_timeline = timeline.clone(); let ctx = ctx.detached_child(TaskKind::ImportPgdata, DownloadBehavior::Error); work.push_back(tokio::task::spawn(async move { let _permit = permit; let res = job.run(job_timeline, max_byte_range_size, &ctx).await; (job_idx, res) })); }, maybe_complete_job_idx = work.next() => { pausable_failpoint!("import-task-complete-pausable"); match maybe_complete_job_idx { Some(Ok((job_idx, res))) => { assert!(last_completed_job_idx.checked_add(1).unwrap() == job_idx); res?; last_completed_job_idx = job_idx; if last_completed_job_idx % checkpoint_every == 0 { tracing::info!(last_completed_job_idx, jobs=%jobs_in_plan, "Checkpointing import status"); let progress = ShardImportProgressV1 { jobs: jobs_in_plan, completed: last_completed_job_idx, import_plan_hash, job_soft_size_limit: import_config.import_job_soft_size_limit.into(), }; timeline.remote_client.schedule_index_upload_for_file_changes()?; timeline.remote_client.wait_completion().await?; storcon_client.put_timeline_import_status( timeline.tenant_shard_id, timeline.timeline_id, timeline.generation, ShardImportStatus::InProgress(Some(ShardImportProgress::V1(progress))) ) .await .map_err(|_err| { anyhow::anyhow!("Shut down while putting timeline import status") })?; } }, Some(Err(_)) => { anyhow::bail!( "import job panicked or cancelled" ); } None => {} } } } } Ok(()) } } // // dbdir iteration tools // struct PgDataDir { pub dbs: Vec<PgDataDirDb>, // spcnode, dboid, path } struct PgDataDirDb { pub spcnode: u32, pub dboid: u32, pub path: RemotePath, pub files: Vec<PgDataDirDbFile>, } struct PgDataDirDbFile { pub path: RemotePath, pub rel_tag: RelTag, pub segno: u32, pub filesize: usize, // Cummulative size of the given fork, set only for the last segment of that fork pub nblocks: Option<usize>, } impl PgDataDir { async fn new(storage: &RemoteStorageWrapper) -> anyhow::Result<Self> { let datadir_path = storage.pgdata(); // Import ordinary databases, DEFAULTTABLESPACE_OID is smaller than GLOBALTABLESPACE_OID, so import them first // Traverse database in increasing oid order let basedir = &datadir_path.join("base"); let db_oids: Vec<_> = storage .listdir(basedir) .await? .into_iter() .filter_map(|path| path.object_name().and_then(|name| name.parse::<u32>().ok())) .sorted() .collect(); debug!(?db_oids, "found databases"); let mut databases = Vec::new(); for dboid in db_oids { databases.push( PgDataDirDb::new( storage, &basedir.join(dboid.to_string()), postgres_ffi_types::constants::DEFAULTTABLESPACE_OID, dboid, &datadir_path, ) .await?, ); } // special case for global catalogs databases.push( PgDataDirDb::new( storage, &datadir_path.join("global"), postgres_ffi_types::constants::GLOBALTABLESPACE_OID, 0, &datadir_path, ) .await?, ); databases.sort_by_key(|db| (db.spcnode, db.dboid)); Ok(Self { dbs: databases }) } } impl PgDataDirDb { #[instrument(level = tracing::Level::DEBUG, skip_all, fields(%dboid, %db_path))] async fn new( storage: &RemoteStorageWrapper, db_path: &RemotePath, spcnode: u32, dboid: u32, datadir_path: &RemotePath, ) -> anyhow::Result<Self> { let mut files: Vec<PgDataDirDbFile> = storage .listfilesindir(db_path) .await? .into_iter() .filter_map(|(path, size)| { debug!(%path, %size, "found file in dbdir"); path.object_name().and_then(|name| { // returns (relnode, forknum, segno) parse_relfilename(name).ok().map(|x| (size, x)) }) }) .sorted_by_key(|(_, relfilename)| *relfilename) .map(|(filesize, (relnode, forknum, segno))| { let rel_tag = RelTag { spcnode, dbnode: dboid, relnode, forknum, }; let path = datadir_path.join(rel_tag.to_segfile_name(segno)); anyhow::ensure!(filesize % BLCKSZ as usize == 0); let nblocks = filesize / BLCKSZ as usize; Ok(PgDataDirDbFile { path, filesize, rel_tag, segno, nblocks: Some(nblocks), // first non-cummulative sizes }) }) .collect::<anyhow::Result<_, _>>()?; // Set cummulative sizes. Do all of that math here, so that later we could easier // parallelize over segments and know with which segments we need to write relsize // entry. let mut cumulative_nblocks: usize = 0; let mut prev_rel_tag: Option<RelTag> = None; for i in 0..files.len() { if prev_rel_tag == Some(files[i].rel_tag) { cumulative_nblocks += files[i].nblocks.unwrap(); } else { cumulative_nblocks = files[i].nblocks.unwrap(); } files[i].nblocks = if i == files.len() - 1 || files[i + 1].rel_tag != files[i].rel_tag { Some(cumulative_nblocks) } else { None }; prev_rel_tag = Some(files[i].rel_tag); } Ok(PgDataDirDb { files, path: db_path.clone(), spcnode, dboid, }) } } trait ImportTask { fn key_range(&self) -> Range<Key>; fn total_size(&self, shard_identity: &ShardIdentity) -> usize { let range = ShardedRange::new(self.key_range(), shard_identity); let page_count = range.page_count(); if page_count == u32::MAX { tracing::warn!( "Import task has non contiguous key range: {}..{}", self.key_range().start, self.key_range().end ); // Tasks should operate on contiguous ranges. It is unexpected for // ranges to violate this assumption. Calling code handles this by mapping // any task on a non contiguous range to its own image layer. usize::MAX } else { page_count as usize * 8192 } } async fn doit( self, layer_writer: &mut ImageLayerWriter, max_byte_range_size: usize, ctx: &RequestContext, ) -> anyhow::Result<usize>; } struct ImportSingleKeyTask { key: Key, buf: Bytes, } impl Hash for ImportSingleKeyTask { fn hash<H: Hasher>(&self, state: &mut H) { let ImportSingleKeyTask { key, buf } = self; key.hash(state); // The key value might not have a stable binary representation. // For instance, the db directory uses an unstable hash-map. // To work around this we are a bit lax here and only hash the // size of the buffer which must be consistent. buf.len().hash(state); } } impl ImportSingleKeyTask { fn new(key: Key, buf: Bytes) -> Self { ImportSingleKeyTask { key, buf } } } impl ImportTask for ImportSingleKeyTask { fn key_range(&self) -> Range<Key> { singleton_range(self.key) } async fn doit( self, layer_writer: &mut ImageLayerWriter, _max_byte_range_size: usize, ctx: &RequestContext, ) -> anyhow::Result<usize> { layer_writer.put_image(self.key, self.buf, ctx).await?; Ok(1) } } struct ImportRelBlocksTask { shard_identity: ShardIdentity, key_range: Range<Key>, path: RemotePath, storage: RemoteStorageWrapper, } impl Hash for ImportRelBlocksTask { fn hash<H: Hasher>(&self, state: &mut H) { let ImportRelBlocksTask { shard_identity: _, key_range, path, storage: _, } = self; key_range.hash(state); path.hash(state); } } impl ImportRelBlocksTask { fn new( shard_identity: ShardIdentity, key_range: Range<Key>, path: &RemotePath, storage: RemoteStorageWrapper, ) -> Self { ImportRelBlocksTask { shard_identity, key_range, path: path.clone(), storage, } } } impl ImportTask for ImportRelBlocksTask { fn key_range(&self) -> Range<Key> { self.key_range.clone() } #[instrument(level = tracing::Level::DEBUG, skip_all, fields(%self.path))] async fn doit( self, layer_writer: &mut ImageLayerWriter, max_byte_range_size: usize, ctx: &RequestContext, ) -> anyhow::Result<usize> { debug!("Importing relation file"); let (rel_tag, start_blk) = self.key_range.start.to_rel_block()?; let (rel_tag_end, end_blk) = self.key_range.end.to_rel_block()?; assert_eq!(rel_tag, rel_tag_end); let ranges = (start_blk..end_blk) .enumerate() .filter_map(|(i, blknum)| { let key = rel_block_to_key(rel_tag, blknum); if self.shard_identity.is_key_disposable(&key) { return None; } let file_offset = i.checked_mul(8192).unwrap(); Some(( vec![key], file_offset, file_offset.checked_add(8192).unwrap(), )) }) .coalesce(|(mut acc, acc_start, acc_end), (mut key, start, end)| { assert_eq!(key.len(), 1); assert!(!acc.is_empty()); assert!(acc_end > acc_start); if acc_end == start && end - acc_start <= max_byte_range_size { acc.push(key.pop().unwrap()); Ok((acc, acc_start, end)) } else { Err(((acc, acc_start, acc_end), (key, start, end))) } }); let mut nimages = 0; for (keys, range_start, range_end) in ranges { let range_buf = self .storage .get_range(&self.path, range_start.into_u64(), range_end.into_u64()) .await?; let mut buf = Bytes::from(range_buf); for key in keys { // The writer buffers writes internally let image = buf.split_to(8192); layer_writer.put_image(key, image, ctx).await?; nimages += 1; } } Ok(nimages) } } struct ImportSlruBlocksTask { key_range: Range<Key>, path: RemotePath, storage: RemoteStorageWrapper, } impl Hash for ImportSlruBlocksTask { fn hash<H: Hasher>(&self, state: &mut H) { let ImportSlruBlocksTask { key_range, path, storage: _, } = self; key_range.hash(state); path.hash(state); } } impl ImportSlruBlocksTask { fn new(key_range: Range<Key>, path: &RemotePath, storage: RemoteStorageWrapper) -> Self { ImportSlruBlocksTask { key_range, path: path.clone(), storage, } } } impl ImportTask for ImportSlruBlocksTask { fn key_range(&self) -> Range<Key> { self.key_range.clone() } async fn doit( self, layer_writer: &mut ImageLayerWriter, _max_byte_range_size: usize, ctx: &RequestContext, ) -> anyhow::Result<usize> { debug!("Importing SLRU segment file {}", self.path); let buf = self.storage.get(&self.path).await?; // TODO(vlad): Does timestamp to LSN work for imported timelines? // Probably not since we don't append the `xact_time` to it as in // [`WalIngest::ingest_xact_record`]. let (kind, segno, start_blk) = self.key_range.start.to_slru_block()?; let (_kind, _segno, end_blk) = self.key_range.end.to_slru_block()?; let mut blknum = start_blk; let mut nimages = 0; let mut file_offset = 0; while blknum < end_blk { let key = slru_block_to_key(kind, segno, blknum); let buf = &buf[file_offset..(file_offset + 8192)]; file_offset += 8192; layer_writer .put_image(key, Bytes::copy_from_slice(buf), ctx) .await?; nimages += 1; blknum += 1; } Ok(nimages) } } #[derive(Hash)] enum AnyImportTask { SingleKey(ImportSingleKeyTask), RelBlocks(ImportRelBlocksTask), SlruBlocks(ImportSlruBlocksTask), } impl ImportTask for AnyImportTask { fn key_range(&self) -> Range<Key> { match self { Self::SingleKey(t) => t.key_range(), Self::RelBlocks(t) => t.key_range(), Self::SlruBlocks(t) => t.key_range(), } } /// returns the number of images put into the `layer_writer` async fn doit( self, layer_writer: &mut ImageLayerWriter, max_byte_range_size: usize, ctx: &RequestContext, ) -> anyhow::Result<usize> {
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/import_pgdata/index_part_format.rs
pageserver/src/tenant/timeline/import_pgdata/index_part_format.rs
#[cfg(feature = "testing")] use camino::Utf8PathBuf; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub enum Root { V1(V1), } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub enum V1 { InProgress(InProgress), Done(Done), } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(transparent)] pub struct IdempotencyKey(String); impl IdempotencyKey { pub fn new(s: String) -> Self { Self(s) } } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct InProgress { pub idempotency_key: IdempotencyKey, pub location: Location, pub started_at: chrono::NaiveDateTime, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct Done { pub idempotency_key: IdempotencyKey, pub started_at: chrono::NaiveDateTime, pub finished_at: chrono::NaiveDateTime, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub enum Location { #[cfg(feature = "testing")] LocalFs { path: Utf8PathBuf }, AwsS3 { region: String, bucket: String, key: String, }, } impl Root { pub fn is_done(&self) -> bool { match self { Root::V1(v1) => match v1 { V1::Done(_) => true, V1::InProgress(_) => false, }, } } pub fn idempotency_key(&self) -> &IdempotencyKey { match self { Root::V1(v1) => match v1 { V1::InProgress(in_progress) => &in_progress.idempotency_key, V1::Done(done) => &done.idempotency_key, }, } } pub fn started_at(&self) -> &chrono::NaiveDateTime { match self { Root::V1(v1) => match v1 { V1::InProgress(in_progress) => &in_progress.started_at, V1::Done(done) => &done.started_at, }, } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/tenant/timeline/import_pgdata/importbucket_format.rs
pageserver/src/tenant/timeline/import_pgdata/importbucket_format.rs
use serde::{Deserialize, Serialize}; #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] pub struct PgdataStatus { pub done: bool, // TODO: remaining fields }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/consumption_metrics/disk_cache.rs
pageserver/src/consumption_metrics/disk_cache.rs
use std::sync::Arc; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use super::{NewMetricsRoot, NewRawMetric, RawMetric}; use crate::consumption_metrics::NewMetricsRefRoot; pub(super) fn read_metrics_from_serde_value( json_value: serde_json::Value, ) -> anyhow::Result<Vec<NewRawMetric>> { if NewMetricsRoot::is_v2_metrics(&json_value) { let root = serde_json::from_value::<NewMetricsRoot>(json_value)?; Ok(root.metrics) } else { let all_metrics = serde_json::from_value::<Vec<RawMetric>>(json_value)?; let all_metrics = all_metrics .into_iter() .map(|(key, (event_type, value))| NewRawMetric { key, kind: event_type, value, }) .collect(); Ok(all_metrics) } } pub(super) async fn read_metrics_from_disk( path: Arc<Utf8PathBuf>, ) -> anyhow::Result<Vec<NewRawMetric>> { // do not add context to each error, callsite will log with full path let span = tracing::Span::current(); tokio::task::spawn_blocking(move || { let _e = span.entered(); if let Some(parent) = path.parent() { if let Err(e) = scan_and_delete_with_same_prefix(&path) { tracing::info!("failed to cleanup temporary files in {parent:?}: {e:#}"); } } let mut file = std::fs::File::open(&*path)?; let reader = std::io::BufReader::new(&mut file); let json_value = serde_json::from_reader::<_, serde_json::Value>(reader)?; read_metrics_from_serde_value(json_value) }) .await .context("read metrics join error") .and_then(|x| x) } fn scan_and_delete_with_same_prefix(path: &Utf8Path) -> std::io::Result<()> { let it = std::fs::read_dir(path.parent().expect("caller checked"))?; let prefix = path.file_name().expect("caller checked").to_string(); for entry in it { let entry = entry?; if !entry.metadata()?.is_file() { continue; } let file_name = entry.file_name(); if path.file_name().unwrap() == file_name { // do not remove our actual file continue; } let file_name = file_name.to_string_lossy(); if !file_name.starts_with(&*prefix) { continue; } let path = entry.path(); if let Err(e) = std::fs::remove_file(&path) { tracing::warn!("cleaning up old tempfile {file_name:?} failed: {e:#}"); } else { tracing::info!("cleaned up old tempfile {file_name:?}"); } } Ok(()) } pub(super) async fn flush_metrics_to_disk( current_metrics: &Arc<Vec<NewRawMetric>>, path: &Arc<Utf8PathBuf>, ) -> anyhow::Result<()> { use std::io::Write; anyhow::ensure!(path.parent().is_some(), "path must have parent: {path:?}"); anyhow::ensure!( path.file_name().is_some(), "path must have filename: {path:?}" ); let span = tracing::Span::current(); tokio::task::spawn_blocking({ let current_metrics = current_metrics.clone(); let path = path.clone(); move || { let _e = span.entered(); let parent = path.parent().expect("existence checked"); let file_name = path.file_name().expect("existence checked"); let mut tempfile = camino_tempfile::Builder::new() .prefix(file_name) .suffix(".tmp") .tempfile_in(parent)?; tracing::debug!("using tempfile {:?}", tempfile.path()); // write out all of the raw metrics, to be read out later on restart as cached values { let mut writer = std::io::BufWriter::new(&mut tempfile); serde_json::to_writer( &mut writer, &NewMetricsRefRoot::new(current_metrics.as_ref()), ) .context("serialize metrics")?; writer .into_inner() .map_err(|_| anyhow::anyhow!("flushing metrics failed"))?; } tempfile.flush()?; tempfile.as_file().sync_all()?; fail::fail_point!("before-persist-last-metrics-collected"); drop(tempfile.persist(&*path).map_err(|e| e.error)?); let f = std::fs::File::open(path.parent().unwrap())?; f.sync_all()?; anyhow::Ok(()) } }) .await .with_context(|| format!("write metrics to {path:?} join error")) .and_then(|x| x.with_context(|| format!("write metrics to {path:?}"))) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/consumption_metrics/upload.rs
pageserver/src/consumption_metrics/upload.rs
use std::error::Error as _; use std::time::SystemTime; use chrono::{DateTime, Utc}; use consumption_metrics::{CHUNK_SIZE, Event, EventChunk, IdempotencyKey}; use remote_storage::{GenericRemoteStorage, RemotePath}; use tokio::io::AsyncWriteExt; use tokio_util::sync::CancellationToken; use tracing::Instrument; use utils::id::{TenantId, TimelineId}; use super::metrics::Name; use super::{Cache, MetricsKey, NewRawMetric, RawMetric}; /// How the metrics from pageserver are identified. #[derive(serde::Serialize, serde::Deserialize, Debug, Clone, Copy, PartialEq)] struct Ids { pub(super) tenant_id: TenantId, #[serde(skip_serializing_if = "Option::is_none")] pub(super) timeline_id: Option<TimelineId>, } /// Serialize and write metrics to an HTTP endpoint #[tracing::instrument(skip_all, fields(metrics_total = %metrics.len()))] pub(super) async fn upload_metrics_http( client: &reqwest::Client, metric_collection_endpoint: &reqwest::Url, cancel: &CancellationToken, metrics: &[NewRawMetric], cached_metrics: &mut Cache, idempotency_keys: &[IdempotencyKey<'_>], ) -> anyhow::Result<()> { let mut uploaded = 0; let mut failed = 0; let started_at = std::time::Instant::now(); let mut iter = serialize_in_chunks(CHUNK_SIZE, metrics, idempotency_keys); while let Some(res) = iter.next() { let (chunk, body) = res?; let event_bytes = body.len(); let is_last = iter.len() == 0; let res = upload(client, metric_collection_endpoint, body, cancel, is_last) .instrument(tracing::info_span!( "upload", %event_bytes, uploaded, total = metrics.len(), )) .await; match res { Ok(()) => { for item in chunk { cached_metrics.insert(item.key, item.clone()); } uploaded += chunk.len(); } Err(_) => { // failure(s) have already been logged // // however this is an inconsistency: if we crash here, we will start with the // values as uploaded. in practice, the rejections no longer happen. failed += chunk.len(); } } } let elapsed = started_at.elapsed(); tracing::info!( uploaded, failed, elapsed_ms = elapsed.as_millis(), "done sending metrics" ); Ok(()) } /// Serialize and write metrics to a remote storage object #[tracing::instrument(skip_all, fields(metrics_total = %metrics.len()))] pub(super) async fn upload_metrics_bucket( client: &GenericRemoteStorage, cancel: &CancellationToken, node_id: &str, metrics: &[NewRawMetric], idempotency_keys: &[IdempotencyKey<'_>], ) -> anyhow::Result<()> { if metrics.is_empty() { // Skip uploads if we have no metrics, so that readers don't have to handle the edge case // of an empty object. return Ok(()); } // Compose object path let datetime: DateTime<Utc> = SystemTime::now().into(); let ts_prefix = datetime.format("year=%Y/month=%m/day=%d/hour=%H/%H:%M:%SZ"); let path = RemotePath::from_string(&format!("{ts_prefix}_{node_id}.ndjson.gz"))?; // Set up a gzip writer into a buffer let mut compressed_bytes: Vec<u8> = Vec::new(); let compressed_writer = std::io::Cursor::new(&mut compressed_bytes); let mut gzip_writer = async_compression::tokio::write::GzipEncoder::new(compressed_writer); // Serialize and write into compressed buffer let started_at = std::time::Instant::now(); for res in serialize_in_chunks_ndjson(CHUNK_SIZE, metrics, idempotency_keys) { let (_chunk, body) = res?; gzip_writer.write_all(&body).await?; } gzip_writer.flush().await?; gzip_writer.shutdown().await?; let compressed_length = compressed_bytes.len(); // Write to remote storage client .upload_storage_object( futures::stream::once(futures::future::ready(Ok(compressed_bytes.into()))), compressed_length, &path, cancel, ) .await?; let elapsed = started_at.elapsed(); tracing::info!( compressed_length, elapsed_ms = elapsed.as_millis(), "write metrics bucket at {path}", ); Ok(()) } /// Serializes the input metrics as JSON in chunks of chunk_size. The provided /// idempotency keys are injected into the corresponding metric events (reused /// across different metrics sinks), and must have the same length as input. fn serialize_in_chunks<'a>( chunk_size: usize, input: &'a [NewRawMetric], idempotency_keys: &'a [IdempotencyKey<'a>], ) -> impl ExactSizeIterator<Item = Result<(&'a [NewRawMetric], bytes::Bytes), serde_json::Error>> + 'a { use bytes::BufMut; assert_eq!(input.len(), idempotency_keys.len()); struct Iter<'a> { inner: std::slice::Chunks<'a, NewRawMetric>, idempotency_keys: std::slice::Iter<'a, IdempotencyKey<'a>>, chunk_size: usize, // write to a BytesMut so that we can cheaply clone the frozen Bytes for retries buffer: bytes::BytesMut, // chunk amount of events are reused to produce the serialized document scratch: Vec<Event<Ids, Name>>, } impl<'a> Iterator for Iter<'a> { type Item = Result<(&'a [NewRawMetric], bytes::Bytes), serde_json::Error>; fn next(&mut self) -> Option<Self::Item> { let chunk = self.inner.next()?; if self.scratch.is_empty() { // first round: create events with N strings self.scratch.extend( chunk .iter() .zip(&mut self.idempotency_keys) .map(|(raw_metric, key)| raw_metric.as_event(key)), ); } else { // next rounds: update_in_place to reuse allocations assert_eq!(self.scratch.len(), self.chunk_size); itertools::izip!(self.scratch.iter_mut(), chunk, &mut self.idempotency_keys) .for_each(|(slot, raw_metric, key)| raw_metric.update_in_place(slot, key)); } let res = serde_json::to_writer( (&mut self.buffer).writer(), &EventChunk { events: (&self.scratch[..chunk.len()]).into(), }, ); match res { Ok(()) => Some(Ok((chunk, self.buffer.split().freeze()))), Err(e) => Some(Err(e)), } } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } impl ExactSizeIterator for Iter<'_> {} let buffer = bytes::BytesMut::new(); let inner = input.chunks(chunk_size); let idempotency_keys = idempotency_keys.iter(); let scratch = Vec::new(); Iter { inner, idempotency_keys, chunk_size, buffer, scratch, } } /// Serializes the input metrics as NDJSON in chunks of chunk_size. Each event /// is serialized as a separate JSON object on its own line. The provided /// idempotency keys are injected into the corresponding metric events (reused /// across different metrics sinks), and must have the same length as input. fn serialize_in_chunks_ndjson<'a>( chunk_size: usize, input: &'a [NewRawMetric], idempotency_keys: &'a [IdempotencyKey<'a>], ) -> impl ExactSizeIterator<Item = Result<(&'a [NewRawMetric], bytes::Bytes), serde_json::Error>> + 'a { use bytes::BufMut; assert_eq!(input.len(), idempotency_keys.len()); struct Iter<'a> { inner: std::slice::Chunks<'a, NewRawMetric>, idempotency_keys: std::slice::Iter<'a, IdempotencyKey<'a>>, chunk_size: usize, // write to a BytesMut so that we can cheaply clone the frozen Bytes for retries buffer: bytes::BytesMut, // chunk amount of events are reused to produce the serialized document scratch: Vec<Event<Ids, Name>>, } impl<'a> Iterator for Iter<'a> { type Item = Result<(&'a [NewRawMetric], bytes::Bytes), serde_json::Error>; fn next(&mut self) -> Option<Self::Item> { let chunk = self.inner.next()?; if self.scratch.is_empty() { // first round: create events with N strings self.scratch.extend( chunk .iter() .zip(&mut self.idempotency_keys) .map(|(raw_metric, key)| raw_metric.as_event(key)), ); } else { // next rounds: update_in_place to reuse allocations assert_eq!(self.scratch.len(), self.chunk_size); itertools::izip!(self.scratch.iter_mut(), chunk, &mut self.idempotency_keys) .for_each(|(slot, raw_metric, key)| raw_metric.update_in_place(slot, key)); } // Serialize each event as NDJSON (one JSON object per line) for event in self.scratch[..chunk.len()].iter() { let res = serde_json::to_writer((&mut self.buffer).writer(), event); if let Err(e) = res { return Some(Err(e)); } // Add newline after each event to follow NDJSON format self.buffer.put_u8(b'\n'); } Some(Ok((chunk, self.buffer.split().freeze()))) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } impl ExactSizeIterator for Iter<'_> {} let buffer = bytes::BytesMut::new(); let inner = input.chunks(chunk_size); let idempotency_keys = idempotency_keys.iter(); let scratch = Vec::new(); Iter { inner, idempotency_keys, chunk_size, buffer, scratch, } } trait RawMetricExt { fn as_event(&self, key: &IdempotencyKey<'_>) -> Event<Ids, Name>; fn update_in_place(&self, event: &mut Event<Ids, Name>, key: &IdempotencyKey<'_>); } impl RawMetricExt for RawMetric { fn as_event(&self, key: &IdempotencyKey<'_>) -> Event<Ids, Name> { let MetricsKey { metric, tenant_id, timeline_id, } = self.0; let (kind, value) = self.1; Event { kind, metric, idempotency_key: key.to_string(), value, extra: Ids { tenant_id, timeline_id, }, } } fn update_in_place(&self, event: &mut Event<Ids, Name>, key: &IdempotencyKey<'_>) { use std::fmt::Write; let MetricsKey { metric, tenant_id, timeline_id, } = self.0; let (kind, value) = self.1; *event = Event { kind, metric, idempotency_key: { event.idempotency_key.clear(); write!(event.idempotency_key, "{key}").unwrap(); std::mem::take(&mut event.idempotency_key) }, value, extra: Ids { tenant_id, timeline_id, }, }; } } impl RawMetricExt for NewRawMetric { fn as_event(&self, key: &IdempotencyKey<'_>) -> Event<Ids, Name> { let MetricsKey { metric, tenant_id, timeline_id, } = self.key; let kind = self.kind; let value = self.value; Event { kind, metric, idempotency_key: key.to_string(), value, extra: Ids { tenant_id, timeline_id, }, } } fn update_in_place(&self, event: &mut Event<Ids, Name>, key: &IdempotencyKey<'_>) { use std::fmt::Write; let MetricsKey { metric, tenant_id, timeline_id, } = self.key; let kind = self.kind; let value = self.value; *event = Event { kind, metric, idempotency_key: { event.idempotency_key.clear(); write!(event.idempotency_key, "{key}").unwrap(); std::mem::take(&mut event.idempotency_key) }, value, extra: Ids { tenant_id, timeline_id, }, }; } } pub(crate) trait KeyGen<'a> { fn generate(&self) -> IdempotencyKey<'a>; } impl<'a> KeyGen<'a> for &'a str { fn generate(&self) -> IdempotencyKey<'a> { IdempotencyKey::generate(self) } } enum UploadError { Rejected(reqwest::StatusCode), Reqwest(reqwest::Error), Cancelled, } impl std::fmt::Debug for UploadError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // use same impl because backoff::retry will log this using both std::fmt::Display::fmt(self, f) } } impl std::fmt::Display for UploadError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use UploadError::*; match self { Rejected(code) => write!(f, "server rejected the metrics with {code}"), Reqwest(e) => write!( f, "request failed: {e}{}", e.source().map(|e| format!(": {e}")).unwrap_or_default() ), Cancelled => write!(f, "cancelled"), } } } impl UploadError { fn is_reject(&self) -> bool { matches!(self, UploadError::Rejected(_)) } } // this is consumed by the test verifiers static LAST_IN_BATCH: reqwest::header::HeaderName = reqwest::header::HeaderName::from_static("pageserver-metrics-last-upload-in-batch"); async fn upload( client: &reqwest::Client, metric_collection_endpoint: &reqwest::Url, body: bytes::Bytes, cancel: &CancellationToken, is_last: bool, ) -> Result<(), UploadError> { let warn_after = 3; let max_attempts = 10; // this is used only with tests so far let last_value = if is_last { "true" } else { "false" }; let res = utils::backoff::retry( || async { let res = client .post(metric_collection_endpoint.clone()) .header(reqwest::header::CONTENT_TYPE, "application/json") .header(LAST_IN_BATCH.clone(), last_value) .body(body.clone()) .send() .await; let res = res.and_then(|res| res.error_for_status()); // 10 redirects are normally allowed, so we don't need worry about 3xx match res { Ok(_response) => Ok(()), Err(e) => { let status = e.status().filter(|s| s.is_client_error()); if let Some(status) = status { // rejection used to be a thing when the server could reject a // whole batch of metrics if one metric was bad. Err(UploadError::Rejected(status)) } else { Err(UploadError::Reqwest(e)) } } } }, UploadError::is_reject, warn_after, max_attempts, "upload consumption_metrics", cancel, ) .await .ok_or_else(|| UploadError::Cancelled) .and_then(|x| x); match &res { Ok(_) => {} Err(e) if e.is_reject() => { // permanent errors currently do not get logged by backoff::retry // display alternate has no effect, but keeping it here for easier pattern matching. tracing::error!("failed to upload metrics: {e:#}"); } Err(_) => { // these have been logged already } } res } #[cfg(test)] mod tests { use chrono::{DateTime, Utc}; use once_cell::sync::Lazy; use super::*; use crate::consumption_metrics::NewMetricsRefRoot; use crate::consumption_metrics::disk_cache::read_metrics_from_serde_value; #[test] fn chunked_serialization() { let examples = metric_samples(); assert!(examples.len() > 1); let now = Utc::now(); let idempotency_keys = (0..examples.len()) .map(|i| FixedGen::new(now, "1", i as u16).generate()) .collect::<Vec<_>>(); // need to use Event here because serde_json::Value uses default hashmap, not linked // hashmap #[derive(serde::Deserialize)] struct EventChunk { events: Vec<Event<Ids, Name>>, } let correct = serialize_in_chunks(examples.len(), &examples, &idempotency_keys) .map(|res| res.unwrap().1) .flat_map(|body| serde_json::from_slice::<EventChunk>(&body).unwrap().events) .collect::<Vec<_>>(); for chunk_size in 1..examples.len() { let actual = serialize_in_chunks(chunk_size, &examples, &idempotency_keys) .map(|res| res.unwrap().1) .flat_map(|body| serde_json::from_slice::<EventChunk>(&body).unwrap().events) .collect::<Vec<_>>(); // if these are equal, it means that multi-chunking version works as well assert_eq!(correct, actual); } } #[test] fn chunked_serialization_ndjson() { let examples = metric_samples(); assert!(examples.len() > 1); let now = Utc::now(); let idempotency_keys = (0..examples.len()) .map(|i| FixedGen::new(now, "1", i as u16).generate()) .collect::<Vec<_>>(); // Parse NDJSON format - each line is a separate JSON object let parse_ndjson = |body: &[u8]| -> Vec<Event<Ids, Name>> { let body_str = std::str::from_utf8(body).unwrap(); body_str .trim_end_matches('\n') .lines() .filter(|line| !line.is_empty()) .map(|line| serde_json::from_str::<Event<Ids, Name>>(line).unwrap()) .collect() }; let correct = serialize_in_chunks_ndjson(examples.len(), &examples, &idempotency_keys) .map(|res| res.unwrap().1) .flat_map(|body| parse_ndjson(&body)) .collect::<Vec<_>>(); for chunk_size in 1..examples.len() { let actual = serialize_in_chunks_ndjson(chunk_size, &examples, &idempotency_keys) .map(|res| res.unwrap().1) .flat_map(|body| parse_ndjson(&body)) .collect::<Vec<_>>(); // if these are equal, it means that multi-chunking version works as well assert_eq!(correct, actual); } } #[derive(Clone, Copy)] struct FixedGen<'a>(chrono::DateTime<chrono::Utc>, &'a str, u16); impl<'a> FixedGen<'a> { fn new(now: chrono::DateTime<chrono::Utc>, node_id: &'a str, nonce: u16) -> Self { FixedGen(now, node_id, nonce) } } impl<'a> KeyGen<'a> for FixedGen<'a> { fn generate(&self) -> IdempotencyKey<'a> { IdempotencyKey::for_tests(self.0, self.1, self.2) } } static SAMPLES_NOW: Lazy<DateTime<Utc>> = Lazy::new(|| { DateTime::parse_from_rfc3339("2023-09-15T00:00:00.123456789Z") .unwrap() .into() }); #[test] fn metric_image_stability() { // it is important that these strings stay as they are let examples = [ ( line!(), r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"written_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#, ), ( line!(), r#"{"type":"incremental","start_time":"2023-09-14T00:00:00.123456789Z","stop_time":"2023-09-15T00:00:00.123456789Z","metric":"written_data_bytes_delta","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#, ), ( line!(), r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"written_size_since_parent","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#, ), ( line!(), r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"pitr_history_size_since_parent","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#, ), ( line!(), r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"timeline_logical_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#, ), ( line!(), r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"remote_storage_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000"}"#, ), ( line!(), r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"synthetic_storage_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":1,"tenant_id":"00000000000000000000000000000000"}"#, ), ]; let idempotency_key = consumption_metrics::IdempotencyKey::for_tests(*SAMPLES_NOW, "1", 0); let examples = examples.into_iter().zip(metric_samples()); for ((line, expected), item) in examples { let e = consumption_metrics::Event { kind: item.kind, metric: item.key.metric, idempotency_key: idempotency_key.to_string(), value: item.value, extra: Ids { tenant_id: item.key.tenant_id, timeline_id: item.key.timeline_id, }, }; let actual = serde_json::to_string(&e).unwrap(); assert_eq!( expected, actual, "example for {:?} from line {line}", item.kind ); } } #[test] fn disk_format_upgrade() { let old_samples_json = serde_json::to_value(metric_samples_old()).unwrap(); let new_samples = serde_json::to_value(NewMetricsRefRoot::new(metric_samples().as_ref())).unwrap(); let upgraded_samples = read_metrics_from_serde_value(old_samples_json).unwrap(); let new_samples = read_metrics_from_serde_value(new_samples).unwrap(); assert_eq!(upgraded_samples, new_samples); } fn metric_samples_old() -> [RawMetric; 7] { let tenant_id = TenantId::from_array([0; 16]); let timeline_id = TimelineId::from_array([0xff; 16]); let before = DateTime::parse_from_rfc3339("2023-09-14T00:00:00.123456789Z") .unwrap() .into(); let [now, before] = [*SAMPLES_NOW, before]; super::super::metrics::metric_examples_old(tenant_id, timeline_id, now, before) } fn metric_samples() -> [NewRawMetric; 7] { let tenant_id = TenantId::from_array([0; 16]); let timeline_id = TimelineId::from_array([0xff; 16]); let before = DateTime::parse_from_rfc3339("2023-09-14T00:00:00.123456789Z") .unwrap() .into(); let [now, before] = [*SAMPLES_NOW, before]; super::super::metrics::metric_examples(tenant_id, timeline_id, now, before) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/consumption_metrics/metrics.rs
pageserver/src/consumption_metrics/metrics.rs
use std::sync::Arc; use std::time::SystemTime; use chrono::{DateTime, Utc}; use consumption_metrics::EventType; use futures::stream::StreamExt; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; use super::{Cache, NewRawMetric}; use crate::context::RequestContext; use crate::tenant::mgr::TenantManager; use crate::tenant::timeline::logical_size::CurrentLogicalSize; /// Name of the metric, used by `MetricsKey` factory methods and `deserialize_cached_events` /// instead of static str. // Do not rename any of these without first consulting with data team and partner // management. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub(super) enum Name { /// Timeline last_record_lsn, absolute. #[serde(rename = "written_size")] WrittenSize, /// Timeline last_record_lsn, incremental #[serde(rename = "written_data_bytes_delta")] WrittenSizeDelta, /// Written bytes only on this timeline (not including ancestors): /// written_size - ancestor_lsn /// /// On the root branch, this is equivalent to `written_size`. #[serde(rename = "written_size_since_parent")] WrittenSizeSinceParent, /// PITR history size only on this timeline (not including ancestors): /// last_record_lsn - max(pitr_cutoff, ancestor_lsn). /// /// On the root branch, this is its entire PITR history size. Not emitted if GC hasn't computed /// the PITR cutoff yet. 0 if PITR is disabled. #[serde(rename = "pitr_history_size_since_parent")] PitrHistorySizeSinceParent, /// Timeline logical size #[serde(rename = "timeline_logical_size")] LogicalSize, /// Tenant remote size #[serde(rename = "remote_storage_size")] RemoteSize, /// Tenant synthetic size #[serde(rename = "synthetic_storage_size")] SyntheticSize, } /// Key that uniquely identifies the object this metric describes. /// /// This is a denormalization done at the MetricsKey const methods; these should not be constructed /// elsewhere. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub(crate) struct MetricsKey { pub(super) tenant_id: TenantId, #[serde(skip_serializing_if = "Option::is_none")] pub(super) timeline_id: Option<TimelineId>, pub(super) metric: Name, } impl MetricsKey { const fn absolute_values(self) -> AbsoluteValueFactory { AbsoluteValueFactory(self) } const fn incremental_values(self) -> IncrementalValueFactory { IncrementalValueFactory(self) } } /// Helper type which each individual metric kind can return to produce only absolute values. struct AbsoluteValueFactory(MetricsKey); impl AbsoluteValueFactory { #[cfg(test)] const fn at_old_format(self, time: DateTime<Utc>, val: u64) -> super::RawMetric { let key = self.0; (key, (EventType::Absolute { time }, val)) } const fn at(self, time: DateTime<Utc>, val: u64) -> NewRawMetric { let key = self.0; NewRawMetric { key, kind: EventType::Absolute { time }, value: val, } } fn key(&self) -> &MetricsKey { &self.0 } } /// Helper type which each individual metric kind can return to produce only incremental values. struct IncrementalValueFactory(MetricsKey); impl IncrementalValueFactory { #[allow(clippy::wrong_self_convention)] const fn from_until( self, prev_end: DateTime<Utc>, up_to: DateTime<Utc>, val: u64, ) -> NewRawMetric { let key = self.0; // cannot assert prev_end < up_to because these are realtime clock based let when = EventType::Incremental { start_time: prev_end, stop_time: up_to, }; NewRawMetric { key, kind: when, value: val, } } #[allow(clippy::wrong_self_convention)] #[cfg(test)] const fn from_until_old_format( self, prev_end: DateTime<Utc>, up_to: DateTime<Utc>, val: u64, ) -> super::RawMetric { let key = self.0; // cannot assert prev_end < up_to because these are realtime clock based let when = EventType::Incremental { start_time: prev_end, stop_time: up_to, }; (key, (when, val)) } fn key(&self) -> &MetricsKey { &self.0 } } // the static part of a MetricsKey impl MetricsKey { /// Absolute value of [`Timeline::get_last_record_lsn`]. /// /// [`Timeline::get_last_record_lsn`]: crate::tenant::Timeline::get_last_record_lsn const fn written_size(tenant_id: TenantId, timeline_id: TimelineId) -> AbsoluteValueFactory { MetricsKey { tenant_id, timeline_id: Some(timeline_id), metric: Name::WrittenSize, } .absolute_values() } /// Values will be the difference of the latest [`MetricsKey::written_size`] to what we /// previously sent, starting from the previously sent incremental time range ending at the /// latest absolute measurement. const fn written_size_delta( tenant_id: TenantId, timeline_id: TimelineId, ) -> IncrementalValueFactory { MetricsKey { tenant_id, timeline_id: Some(timeline_id), metric: Name::WrittenSizeDelta, } .incremental_values() } /// `written_size` - `ancestor_lsn`. const fn written_size_since_parent( tenant_id: TenantId, timeline_id: TimelineId, ) -> AbsoluteValueFactory { MetricsKey { tenant_id, timeline_id: Some(timeline_id), metric: Name::WrittenSizeSinceParent, } .absolute_values() } /// `written_size` - max(`pitr_cutoff`, `ancestor_lsn`). const fn pitr_history_size_since_parent( tenant_id: TenantId, timeline_id: TimelineId, ) -> AbsoluteValueFactory { MetricsKey { tenant_id, timeline_id: Some(timeline_id), metric: Name::PitrHistorySizeSinceParent, } .absolute_values() } /// Exact [`Timeline::get_current_logical_size`]. /// /// [`Timeline::get_current_logical_size`]: crate::tenant::Timeline::get_current_logical_size const fn timeline_logical_size( tenant_id: TenantId, timeline_id: TimelineId, ) -> AbsoluteValueFactory { MetricsKey { tenant_id, timeline_id: Some(timeline_id), metric: Name::LogicalSize, } .absolute_values() } /// [`TenantShard::remote_size`] /// /// [`TenantShard::remote_size`]: crate::tenant::TenantShard::remote_size const fn remote_storage_size(tenant_id: TenantId) -> AbsoluteValueFactory { MetricsKey { tenant_id, timeline_id: None, metric: Name::RemoteSize, } .absolute_values() } /// [`TenantShard::cached_synthetic_size`] as refreshed by [`calculate_synthetic_size_worker`]. /// /// [`TenantShard::cached_synthetic_size`]: crate::tenant::TenantShard::cached_synthetic_size /// [`calculate_synthetic_size_worker`]: super::calculate_synthetic_size_worker const fn synthetic_size(tenant_id: TenantId) -> AbsoluteValueFactory { MetricsKey { tenant_id, timeline_id: None, metric: Name::SyntheticSize, } .absolute_values() } } pub(super) async fn collect_all_metrics( tenant_manager: &Arc<TenantManager>, cached_metrics: &Cache, ctx: &RequestContext, ) -> Vec<NewRawMetric> { use pageserver_api::models::TenantState; let started_at = std::time::Instant::now(); let tenants = match tenant_manager.list_tenants() { Ok(tenants) => tenants, Err(err) => { tracing::error!("failed to list tenants: {:?}", err); return vec![]; } }; let tenants = futures::stream::iter(tenants).filter_map(|(id, state, _)| async move { if state != TenantState::Active || !id.is_shard_zero() { None } else { tenant_manager .get_attached_tenant_shard(id) .ok() .map(|tenant| (id.tenant_id, tenant)) } }); let res = collect(tenants, cached_metrics, ctx).await; tracing::info!( elapsed_ms = started_at.elapsed().as_millis(), total = res.len(), "collected metrics" ); res } async fn collect<S>(tenants: S, cache: &Cache, ctx: &RequestContext) -> Vec<NewRawMetric> where S: futures::stream::Stream<Item = (TenantId, Arc<crate::tenant::TenantShard>)>, { let mut current_metrics: Vec<NewRawMetric> = Vec::new(); let mut tenants = std::pin::pin!(tenants); while let Some((tenant_id, tenant)) = tenants.next().await { let timelines = tenant.list_timelines(); for timeline in timelines { let timeline_id = timeline.timeline_id; match TimelineSnapshot::collect(&timeline, ctx) { Ok(Some(snap)) => { snap.to_metrics( tenant_id, timeline_id, Utc::now(), &mut current_metrics, cache, ); } Ok(None) => {} Err(e) => { tracing::error!( "failed to get metrics values for tenant {tenant_id} timeline {}: {e:#?}", timeline.timeline_id ); continue; } } } let snap = TenantSnapshot::collect(&tenant); snap.to_metrics(tenant_id, Utc::now(), cache, &mut current_metrics); } current_metrics } /// In-between abstraction to allow testing metrics without actual Tenants. struct TenantSnapshot { remote_size: u64, synthetic_size: u64, } impl TenantSnapshot { /// Collect tenant status to have metrics created out of it. fn collect(t: &Arc<crate::tenant::TenantShard>) -> Self { TenantSnapshot { remote_size: t.remote_size(), // Note that this metric is calculated in a separate bgworker // Here we only use cached value, which may lag behind the real latest one synthetic_size: t.cached_synthetic_size(), } } fn to_metrics( &self, tenant_id: TenantId, now: DateTime<Utc>, cached: &Cache, metrics: &mut Vec<NewRawMetric>, ) { let remote_size = MetricsKey::remote_storage_size(tenant_id).at(now, self.remote_size); let synthetic_size = { let factory = MetricsKey::synthetic_size(tenant_id); let mut synthetic_size = self.synthetic_size; if synthetic_size == 0 { if let Some(item) = cached.get(factory.key()) { // use the latest value from previous session, TODO: check generation number synthetic_size = item.value; } } if synthetic_size != 0 { // only send non-zeroes because otherwise these show up as errors in logs Some(factory.at(now, synthetic_size)) } else { None } }; metrics.extend([Some(remote_size), synthetic_size].into_iter().flatten()); } } /// Internal type to make timeline metric production testable. /// /// As this value type contains all of the information needed from a timeline to produce the /// metrics, it can easily be created with different values in test. struct TimelineSnapshot { loaded_at: (Lsn, SystemTime), last_record_lsn: Lsn, ancestor_lsn: Lsn, current_exact_logical_size: Option<u64>, /// Whether PITR is enabled (pitr_interval > 0). pitr_enabled: bool, /// The PITR cutoff LSN. None if not yet initialized. If PITR is disabled, this is approximately /// Some(last_record_lsn), but may lag behind it since it's computed periodically. pitr_cutoff: Option<Lsn>, } impl TimelineSnapshot { /// Collect the metrics from an actual timeline. /// /// Fails currently only when [`Timeline::get_current_logical_size`] fails. /// /// [`Timeline::get_current_logical_size`]: crate::tenant::Timeline::get_current_logical_size fn collect( t: &Arc<crate::tenant::Timeline>, ctx: &RequestContext, ) -> anyhow::Result<Option<Self>> { if !t.is_active() { // no collection for broken or stopping needed, we will still keep the cached values // though at the caller. Ok(None) } else { let loaded_at = t.loaded_at; let last_record_lsn = t.get_last_record_lsn(); let ancestor_lsn = t.get_ancestor_lsn(); let pitr_enabled = !t.get_pitr_interval().is_zero(); let pitr_cutoff = t.gc_info.read().unwrap().cutoffs.time; let current_exact_logical_size = { let span = tracing::info_span!("collect_metrics_iteration", tenant_id = %t.tenant_shard_id.tenant_id, timeline_id = %t.timeline_id); let size = span.in_scope(|| { t.get_current_logical_size( crate::tenant::timeline::GetLogicalSizePriority::Background, ctx, ) }); match size { // Only send timeline logical size when it is fully calculated. CurrentLogicalSize::Exact(ref size) => Some(size.into()), CurrentLogicalSize::Approximate(_) => None, } }; Ok(Some(TimelineSnapshot { loaded_at, last_record_lsn, ancestor_lsn, current_exact_logical_size, pitr_enabled, pitr_cutoff, })) } } /// Produce the timeline consumption metrics into the `metrics` argument. fn to_metrics( &self, tenant_id: TenantId, timeline_id: TimelineId, now: DateTime<Utc>, metrics: &mut Vec<NewRawMetric>, cache: &Cache, ) { let timeline_written_size = u64::from(self.last_record_lsn); let written_size_delta_key = MetricsKey::written_size_delta(tenant_id, timeline_id); let last_stop_time = cache.get(written_size_delta_key.key()).map(|item| { item.kind .incremental_timerange() .expect("never create EventType::Absolute for written_size_delta") .end }); let written_size_now = MetricsKey::written_size(tenant_id, timeline_id).at(now, timeline_written_size); // by default, use the last sent written_size as the basis for // calculating the delta. if we don't yet have one, use the load time value. let prev: (DateTime<Utc>, u64) = cache .get(&written_size_now.key) .map(|item| { // use the prev time from our last incremental update, or default to latest // absolute update on the first round. let prev_at = item .kind .absolute_time() .expect("never create EventType::Incremental for written_size"); let prev_at = last_stop_time.unwrap_or(prev_at); (*prev_at, item.value) }) .unwrap_or_else(|| { // if we don't have a previous point of comparison, compare to the load time // lsn. let (disk_consistent_lsn, loaded_at) = &self.loaded_at; (DateTime::from(*loaded_at), disk_consistent_lsn.0) }); let up_to = now; let written_size_last = written_size_now.value.max(prev.1); // don't regress if let Some(delta) = written_size_now.value.checked_sub(prev.1) { let key_value = written_size_delta_key.from_until(prev.0, up_to, delta); // written_size_delta metrics.push(key_value); // written_size metrics.push(written_size_now); } else { // the cached value was ahead of us, report zero until we've caught up metrics.push(written_size_delta_key.from_until(prev.0, up_to, 0)); // the cached value was ahead of us, report the same until we've caught up metrics.push(NewRawMetric { key: written_size_now.key, kind: written_size_now.kind, value: prev.1, }); } // Compute the branch-local written size. let written_size_since_parent_key = MetricsKey::written_size_since_parent(tenant_id, timeline_id); metrics.push( written_size_since_parent_key .at(now, written_size_last.saturating_sub(self.ancestor_lsn.0)), ); // Compute the branch-local PITR history size. Not emitted if GC hasn't yet computed the // PITR cutoff. 0 if PITR is disabled. let pitr_history_size_since_parent_key = MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id); if !self.pitr_enabled { metrics.push(pitr_history_size_since_parent_key.at(now, 0)); } else if let Some(pitr_cutoff) = self.pitr_cutoff { metrics.push(pitr_history_size_since_parent_key.at( now, written_size_last.saturating_sub(pitr_cutoff.max(self.ancestor_lsn).0), )); } { let factory = MetricsKey::timeline_logical_size(tenant_id, timeline_id); let current_or_previous = self .current_exact_logical_size .or_else(|| cache.get(factory.key()).map(|item| item.value)); if let Some(size) = current_or_previous { metrics.push(factory.at(now, size)); } } } } #[cfg(test)] mod tests; #[cfg(test)] pub(crate) use tests::{metric_examples, metric_examples_old};
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/consumption_metrics/metrics/tests.rs
pageserver/src/consumption_metrics/metrics/tests.rs
use std::collections::HashMap; use super::*; use crate::consumption_metrics::RawMetric; #[test] fn startup_collected_timeline_metrics_before_advancing() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let mut metrics = Vec::new(); let cache = HashMap::new(); let initdb_lsn = Lsn(0x10000); let pitr_cutoff = Lsn(0x11000); let disk_consistent_lsn = Lsn(initdb_lsn.0 * 2); let logical_size = 0x42000; let snap = TimelineSnapshot { loaded_at: (disk_consistent_lsn, SystemTime::now()), last_record_lsn: disk_consistent_lsn, ancestor_lsn: Lsn(0), current_exact_logical_size: Some(logical_size), pitr_enabled: true, pitr_cutoff: Some(pitr_cutoff), }; let now = DateTime::<Utc>::from(SystemTime::now()); snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until( snap.loaded_at.1.into(), now, 0 ), MetricsKey::written_size(tenant_id, timeline_id).at(now, disk_consistent_lsn.0), MetricsKey::written_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0 - pitr_cutoff.0), MetricsKey::timeline_logical_size(tenant_id, timeline_id).at(now, logical_size) ] ); } #[test] fn startup_collected_timeline_metrics_second_round() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let [now, before, init] = time_backwards(); let now = DateTime::<Utc>::from(now); let before = DateTime::<Utc>::from(before); let initdb_lsn = Lsn(0x10000); let pitr_cutoff = Lsn(0x11000); let disk_consistent_lsn = Lsn(initdb_lsn.0 * 2); let logical_size = 0x42000; let mut metrics = Vec::new(); let cache = HashMap::from([MetricsKey::written_size(tenant_id, timeline_id) .at(before, disk_consistent_lsn.0) .to_kv_pair()]); let snap = TimelineSnapshot { loaded_at: (disk_consistent_lsn, init), last_record_lsn: disk_consistent_lsn, ancestor_lsn: Lsn(0), current_exact_logical_size: Some(logical_size), pitr_enabled: true, pitr_cutoff: Some(pitr_cutoff), }; snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until(before, now, 0), MetricsKey::written_size(tenant_id, timeline_id).at(now, disk_consistent_lsn.0), MetricsKey::written_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0 - pitr_cutoff.0), MetricsKey::timeline_logical_size(tenant_id, timeline_id).at(now, logical_size) ] ); } #[test] fn startup_collected_timeline_metrics_nth_round_at_same_lsn() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let [now, just_before, before, init] = time_backwards(); let now = DateTime::<Utc>::from(now); let just_before = DateTime::<Utc>::from(just_before); let before = DateTime::<Utc>::from(before); let initdb_lsn = Lsn(0x10000); let pitr_cutoff = Lsn(0x11000); let disk_consistent_lsn = Lsn(initdb_lsn.0 * 2); let logical_size = 0x42000; let mut metrics = Vec::new(); let cache = HashMap::from([ // at t=before was the last time the last_record_lsn changed MetricsKey::written_size(tenant_id, timeline_id) .at(before, disk_consistent_lsn.0) .to_kv_pair(), // end time of this event is used for the next ones MetricsKey::written_size_delta(tenant_id, timeline_id) .from_until(before, just_before, 0) .to_kv_pair(), ]); let snap = TimelineSnapshot { loaded_at: (disk_consistent_lsn, init), last_record_lsn: disk_consistent_lsn, ancestor_lsn: Lsn(0), current_exact_logical_size: Some(logical_size), pitr_enabled: true, pitr_cutoff: Some(pitr_cutoff), }; snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until(just_before, now, 0), MetricsKey::written_size(tenant_id, timeline_id).at(now, disk_consistent_lsn.0), MetricsKey::written_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0 - pitr_cutoff.0), MetricsKey::timeline_logical_size(tenant_id, timeline_id).at(now, logical_size) ] ); } /// Tests that written sizes do not regress across restarts. #[test] fn post_restart_written_sizes_with_rolled_back_last_record_lsn() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let [later, now, at_restart] = time_backwards(); // FIXME: tests would be so much easier if we did not need to juggle back and forth // SystemTime and DateTime::<Utc> ... Could do the conversion only at upload time? let now = DateTime::<Utc>::from(now); let later = DateTime::<Utc>::from(later); let before_restart = at_restart - std::time::Duration::from_secs(5 * 60); let way_before = before_restart - std::time::Duration::from_secs(10 * 60); let before_restart = DateTime::<Utc>::from(before_restart); let way_before = DateTime::<Utc>::from(way_before); let snap = TimelineSnapshot { loaded_at: (Lsn(50), at_restart), last_record_lsn: Lsn(50), ancestor_lsn: Lsn(0), current_exact_logical_size: None, pitr_enabled: true, pitr_cutoff: Some(Lsn(20)), }; let mut cache = HashMap::from([ MetricsKey::written_size(tenant_id, timeline_id) .at(before_restart, 100) .to_kv_pair(), MetricsKey::written_size_delta(tenant_id, timeline_id) .from_until( way_before, before_restart, // not taken into account, but the timestamps are important 999_999_999, ) .to_kv_pair(), ]); let mut metrics = Vec::new(); snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until( before_restart, now, 0 ), MetricsKey::written_size(tenant_id, timeline_id).at(now, 100), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at(now, 100), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(now, 80), ] ); // now if we cache these metrics, and re-run while "still in recovery" cache.extend(metrics.drain(..).map(|x| x.to_kv_pair())); // "still in recovery", because our snapshot did not change snap.to_metrics(tenant_id, timeline_id, later, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until(now, later, 0), MetricsKey::written_size(tenant_id, timeline_id).at(later, 100), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at(later, 100), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(later, 80), ] ); } /// Tests that written sizes do not regress across restarts, even on child branches. #[test] fn post_restart_written_sizes_with_rolled_back_last_record_lsn_and_ancestor_lsn() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let [later, now, at_restart] = time_backwards(); // FIXME: tests would be so much easier if we did not need to juggle back and forth // SystemTime and DateTime::<Utc> ... Could do the conversion only at upload time? let now = DateTime::<Utc>::from(now); let later = DateTime::<Utc>::from(later); let before_restart = at_restart - std::time::Duration::from_secs(5 * 60); let way_before = before_restart - std::time::Duration::from_secs(10 * 60); let before_restart = DateTime::<Utc>::from(before_restart); let way_before = DateTime::<Utc>::from(way_before); let snap = TimelineSnapshot { loaded_at: (Lsn(50), at_restart), last_record_lsn: Lsn(50), ancestor_lsn: Lsn(40), current_exact_logical_size: None, pitr_enabled: true, pitr_cutoff: Some(Lsn(20)), }; let mut cache = HashMap::from([ MetricsKey::written_size(tenant_id, timeline_id) .at(before_restart, 100) .to_kv_pair(), MetricsKey::written_size_delta(tenant_id, timeline_id) .from_until( way_before, before_restart, // not taken into account, but the timestamps are important 999_999_999, ) .to_kv_pair(), ]); let mut metrics = Vec::new(); snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until( before_restart, now, 0 ), MetricsKey::written_size(tenant_id, timeline_id).at(now, 100), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at(now, 60), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(now, 60), ] ); // now if we cache these metrics, and re-run while "still in recovery" cache.extend(metrics.drain(..).map(|x| x.to_kv_pair())); // "still in recovery", because our snapshot did not change snap.to_metrics(tenant_id, timeline_id, later, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until(now, later, 0), MetricsKey::written_size(tenant_id, timeline_id).at(later, 100), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at(later, 60), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(later, 60), ] ); } /// Tests that written sizes do not regress across restarts, even on child branches and /// with a PITR cutoff after the branch point. #[test] fn post_restart_written_sizes_with_rolled_back_last_record_lsn_and_ancestor_lsn_and_pitr_cutoff() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let [later, now, at_restart] = time_backwards(); // FIXME: tests would be so much easier if we did not need to juggle back and forth // SystemTime and DateTime::<Utc> ... Could do the conversion only at upload time? let now = DateTime::<Utc>::from(now); let later = DateTime::<Utc>::from(later); let before_restart = at_restart - std::time::Duration::from_secs(5 * 60); let way_before = before_restart - std::time::Duration::from_secs(10 * 60); let before_restart = DateTime::<Utc>::from(before_restart); let way_before = DateTime::<Utc>::from(way_before); let snap = TimelineSnapshot { loaded_at: (Lsn(50), at_restart), last_record_lsn: Lsn(50), ancestor_lsn: Lsn(30), current_exact_logical_size: None, pitr_enabled: true, pitr_cutoff: Some(Lsn(40)), }; let mut cache = HashMap::from([ MetricsKey::written_size(tenant_id, timeline_id) .at(before_restart, 100) .to_kv_pair(), MetricsKey::written_size_delta(tenant_id, timeline_id) .from_until( way_before, before_restart, // not taken into account, but the timestamps are important 999_999_999, ) .to_kv_pair(), ]); let mut metrics = Vec::new(); snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until( before_restart, now, 0 ), MetricsKey::written_size(tenant_id, timeline_id).at(now, 100), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at(now, 70), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(now, 60), ] ); // now if we cache these metrics, and re-run while "still in recovery" cache.extend(metrics.drain(..).map(|x| x.to_kv_pair())); // "still in recovery", because our snapshot did not change snap.to_metrics(tenant_id, timeline_id, later, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until(now, later, 0), MetricsKey::written_size(tenant_id, timeline_id).at(later, 100), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at(later, 70), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(later, 60), ] ); } #[test] fn post_restart_current_exact_logical_size_uses_cached() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let [now, at_restart] = time_backwards(); let now = DateTime::<Utc>::from(now); let before_restart = at_restart - std::time::Duration::from_secs(5 * 60); let before_restart = DateTime::<Utc>::from(before_restart); let snap = TimelineSnapshot { loaded_at: (Lsn(50), at_restart), last_record_lsn: Lsn(50), ancestor_lsn: Lsn(0), current_exact_logical_size: None, pitr_enabled: true, pitr_cutoff: None, }; let cache = HashMap::from([MetricsKey::timeline_logical_size(tenant_id, timeline_id) .at(before_restart, 100) .to_kv_pair()]); let mut metrics = Vec::new(); snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); metrics.retain(|item| item.key.metric == Name::LogicalSize); assert_eq!( metrics, &[MetricsKey::timeline_logical_size(tenant_id, timeline_id).at(now, 100)] ); } #[test] fn post_restart_synthetic_size_uses_cached_if_available() { let tenant_id = TenantId::generate(); let ts = TenantSnapshot { remote_size: 1000, // not yet calculated synthetic_size: 0, }; let now = SystemTime::now(); let before_restart = DateTime::<Utc>::from(now - std::time::Duration::from_secs(5 * 60)); let now = DateTime::<Utc>::from(now); let cached = HashMap::from([MetricsKey::synthetic_size(tenant_id) .at(before_restart, 1000) .to_kv_pair()]); let mut metrics = Vec::new(); ts.to_metrics(tenant_id, now, &cached, &mut metrics); assert_eq!( metrics, &[ MetricsKey::remote_storage_size(tenant_id).at(now, 1000), MetricsKey::synthetic_size(tenant_id).at(now, 1000), ] ); } #[test] fn post_restart_synthetic_size_is_not_sent_when_not_cached() { let tenant_id = TenantId::generate(); let ts = TenantSnapshot { remote_size: 1000, // not yet calculated synthetic_size: 0, }; let now = SystemTime::now(); let now = DateTime::<Utc>::from(now); let cached = HashMap::new(); let mut metrics = Vec::new(); ts.to_metrics(tenant_id, now, &cached, &mut metrics); assert_eq!( metrics, &[ MetricsKey::remote_storage_size(tenant_id).at(now, 1000), // no synthetic size here ] ); } fn time_backwards<const N: usize>() -> [std::time::SystemTime; N] { let mut times = [std::time::SystemTime::UNIX_EPOCH; N]; times[0] = std::time::SystemTime::now(); for behind in 1..N { times[behind] = times[0] - std::time::Duration::from_secs(behind as u64); } times } /// Tests that disabled PITR history does not yield any history size, even when the PITR cutoff /// indicates otherwise. #[test] fn pitr_disabled_yields_no_history_size() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let mut metrics = Vec::new(); let cache = HashMap::new(); let initdb_lsn = Lsn(0x10000); let pitr_cutoff = Lsn(0x11000); let disk_consistent_lsn = Lsn(initdb_lsn.0 * 2); let snap = TimelineSnapshot { loaded_at: (disk_consistent_lsn, SystemTime::now()), last_record_lsn: disk_consistent_lsn, ancestor_lsn: Lsn(0), current_exact_logical_size: None, pitr_enabled: false, pitr_cutoff: Some(pitr_cutoff), }; let now = DateTime::<Utc>::from(SystemTime::now()); snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until( snap.loaded_at.1.into(), now, 0 ), MetricsKey::written_size(tenant_id, timeline_id).at(now, disk_consistent_lsn.0), MetricsKey::written_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(now, 0), ] ); } /// Tests that uninitialized PITR cutoff does not emit any history size metric at all. #[test] fn pitr_uninitialized_does_not_emit_history_size() { let tenant_id = TenantId::generate(); let timeline_id = TimelineId::generate(); let mut metrics = Vec::new(); let cache = HashMap::new(); let initdb_lsn = Lsn(0x10000); let disk_consistent_lsn = Lsn(initdb_lsn.0 * 2); let snap = TimelineSnapshot { loaded_at: (disk_consistent_lsn, SystemTime::now()), last_record_lsn: disk_consistent_lsn, ancestor_lsn: Lsn(0), current_exact_logical_size: None, pitr_enabled: true, pitr_cutoff: None, }; let now = DateTime::<Utc>::from(SystemTime::now()); snap.to_metrics(tenant_id, timeline_id, now, &mut metrics, &cache); assert_eq!( metrics, &[ MetricsKey::written_size_delta(tenant_id, timeline_id).from_until( snap.loaded_at.1.into(), now, 0 ), MetricsKey::written_size(tenant_id, timeline_id).at(now, disk_consistent_lsn.0), MetricsKey::written_size_since_parent(tenant_id, timeline_id) .at(now, disk_consistent_lsn.0), ] ); } pub(crate) const fn metric_examples_old( tenant_id: TenantId, timeline_id: TimelineId, now: DateTime<Utc>, before: DateTime<Utc>, ) -> [RawMetric; 7] { [ MetricsKey::written_size(tenant_id, timeline_id).at_old_format(now, 0), MetricsKey::written_size_delta(tenant_id, timeline_id) .from_until_old_format(before, now, 0), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at_old_format(now, 0), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at_old_format(now, 0), MetricsKey::timeline_logical_size(tenant_id, timeline_id).at_old_format(now, 0), MetricsKey::remote_storage_size(tenant_id).at_old_format(now, 0), MetricsKey::synthetic_size(tenant_id).at_old_format(now, 1), ] } pub(crate) const fn metric_examples( tenant_id: TenantId, timeline_id: TimelineId, now: DateTime<Utc>, before: DateTime<Utc>, ) -> [NewRawMetric; 7] { [ MetricsKey::written_size(tenant_id, timeline_id).at(now, 0), MetricsKey::written_size_delta(tenant_id, timeline_id).from_until(before, now, 0), MetricsKey::written_size_since_parent(tenant_id, timeline_id).at(now, 0), MetricsKey::pitr_history_size_since_parent(tenant_id, timeline_id).at(now, 0), MetricsKey::timeline_logical_size(tenant_id, timeline_id).at(now, 0), MetricsKey::remote_storage_size(tenant_id).at(now, 0), MetricsKey::synthetic_size(tenant_id).at(now, 1), ] }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/deletion_queue/list_writer.rs
pageserver/src/deletion_queue/list_writer.rs
//! The list writer is the first stage in the deletion queue. It accumulates //! layers to delete, and periodically writes out these layers into a persistent //! DeletionList. //! //! The purpose of writing DeletionLists is to decouple the decision to //! delete an object from the validation required to execute it: even if //! validation is not possible, e.g. due to a control plane outage, we can //! still persist our intent to delete an object, in a way that would //! survive a restart. //! //! DeletionLists are passed onwards to the Validator. use std::collections::HashMap; use std::fs::create_dir_all; use std::time::Duration; use pageserver_api::shard::TenantShardId; use regex::Regex; use remote_storage::RemotePath; use tokio_util::sync::CancellationToken; use tracing::{debug, info, warn}; use utils::generation::Generation; use utils::id::TimelineId; use super::{DeletionHeader, DeletionList, FlushOp, ValidatorQueueMessage}; use crate::config::PageServerConf; use crate::deletion_queue::TEMP_SUFFIX; use crate::metrics; use crate::tenant::remote_timeline_client::{LayerFileMetadata, remote_layer_path}; use crate::tenant::storage_layer::LayerName; use crate::virtual_file::{MaybeFatalIo, on_fatal_io_error}; // The number of keys in a DeletionList before we will proactively persist it // (without reaching a flush deadline). This aims to deliver objects of the order // of magnitude 1MB when we are under heavy delete load. const DELETION_LIST_TARGET_SIZE: usize = 16384; // Ordinarily, we only flush to DeletionList periodically, to bound the window during // which we might leak objects from not flushing a DeletionList after // the objects are already unlinked from timeline metadata. const FRONTEND_DEFAULT_TIMEOUT: Duration = Duration::from_millis(10000); // If someone is waiting for a flush to DeletionList, only delay a little to accumulate // more objects before doing the flush. const FRONTEND_FLUSHING_TIMEOUT: Duration = Duration::from_millis(100); #[derive(Debug)] pub(super) struct DeletionOp { pub(super) tenant_shard_id: TenantShardId, pub(super) timeline_id: TimelineId, // `layers` and `objects` are both just lists of objects. `layers` is used if you do not // have a config object handy to project it to a remote key, and need the consuming worker // to do it for you. pub(super) layers: Vec<(LayerName, LayerFileMetadata)>, pub(super) objects: Vec<RemotePath>, /// The _current_ generation of the Tenant shard attachment in which we are enqueuing /// this deletion. pub(super) generation: Generation, } #[derive(Debug)] pub(super) struct RecoverOp { pub(super) attached_tenants: HashMap<TenantShardId, Generation>, } #[derive(Debug)] pub(super) enum ListWriterQueueMessage { Delete(DeletionOp), // Wait until all prior deletions make it into a persistent DeletionList Flush(FlushOp), // Wait until all prior deletions have been executed (i.e. objects are actually deleted) FlushExecute(FlushOp), // Call once after re-attaching to control plane, to notify the deletion queue about // latest attached generations & load any saved deletion lists from disk. Recover(RecoverOp), } pub(super) struct ListWriter { conf: &'static PageServerConf, // Incoming frontend requests to delete some keys rx: tokio::sync::mpsc::UnboundedReceiver<ListWriterQueueMessage>, // Outbound requests to the backend to execute deletion lists we have composed. tx: tokio::sync::mpsc::Sender<ValidatorQueueMessage>, // The list we are currently building, contains a buffer of keys to delete // and our next sequence number pending: DeletionList, // These FlushOps should notify the next time we flush pending_flushes: Vec<FlushOp>, // Worker loop is torn down when this fires. cancel: CancellationToken, // Safety guard to do recovery exactly once recovered: bool, } impl ListWriter { // Initially DeletionHeader.validated_sequence is zero. The place we start our // sequence numbers must be higher than that. const BASE_SEQUENCE: u64 = 1; pub(super) fn new( conf: &'static PageServerConf, rx: tokio::sync::mpsc::UnboundedReceiver<ListWriterQueueMessage>, tx: tokio::sync::mpsc::Sender<ValidatorQueueMessage>, cancel: CancellationToken, ) -> Self { Self { pending: DeletionList::new(Self::BASE_SEQUENCE), conf, rx, tx, pending_flushes: Vec::new(), cancel, recovered: false, } } /// Try to flush `list` to persistent storage /// /// This does not return errors, because on failure to flush we do not lose /// any state: flushing will be retried implicitly on the next deadline async fn flush(&mut self) { if self.pending.is_empty() { for f in self.pending_flushes.drain(..) { f.notify(); } return; } match self.pending.save(self.conf).await { Ok(_) => { info!(sequence = self.pending.sequence, "Stored deletion list"); for f in self.pending_flushes.drain(..) { f.notify(); } // Take the list we've accumulated, replace it with a fresh list for the next sequence let next_list = DeletionList::new(self.pending.sequence + 1); let list = std::mem::replace(&mut self.pending, next_list); if let Err(e) = self.tx.send(ValidatorQueueMessage::Delete(list)).await { // This is allowed to fail: it will only happen if the backend worker is shut down, // so we can just drop this on the floor. info!("Deletion list dropped, this is normal during shutdown ({e:#})"); } } Err(e) => { metrics::DELETION_QUEUE.unexpected_errors.inc(); warn!( sequence = self.pending.sequence, "Failed to write deletion list, will retry later ({e:#})" ); } } } /// Load the header, to learn the sequence number up to which deletions /// have been validated. We will apply validated=true to DeletionLists /// <= this sequence when loading them. /// /// It is not an error for the header to not exist: we return None, and /// the caller should act as if validated_sequence is 0 async fn load_validated_sequence(&self) -> Result<Option<u64>, anyhow::Error> { let header_path = self.conf.deletion_header_path(); match tokio::fs::read(&header_path).await { Ok(header_bytes) => { match serde_json::from_slice::<DeletionHeader>(&header_bytes) { Ok(h) => Ok(Some(h.validated_sequence)), Err(e) => { warn!( "Failed to deserialize deletion header, ignoring {header_path}: {e:#}", ); // This should never happen unless we make a mistake with our serialization. // Ignoring a deletion header is not consequential for correctnes because all deletions // are ultimately allowed to fail: worst case we leak some objects for the scrubber to clean up. metrics::DELETION_QUEUE.unexpected_errors.inc(); Ok(None) } } } Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { debug!("Deletion header {header_path} not found, first start?"); Ok(None) } else { on_fatal_io_error(&e, "reading deletion header"); } } } } async fn recover( &mut self, attached_tenants: HashMap<TenantShardId, Generation>, ) -> Result<(), anyhow::Error> { debug!( "recovering with {} attached tenants", attached_tenants.len() ); // Load the header let validated_sequence = self.load_validated_sequence().await?.unwrap_or(0); self.pending.sequence = validated_sequence + 1; let deletion_directory = self.conf.deletion_prefix(); let mut dir = tokio::fs::read_dir(&deletion_directory) .await .fatal_err("read deletion directory"); let list_name_pattern = Regex::new("(?<sequence>[a-zA-Z0-9]{16})-(?<version>[a-zA-Z0-9]{2}).list").unwrap(); let temp_extension = format!(".{TEMP_SUFFIX}"); let header_path = self.conf.deletion_header_path(); let mut seqs: Vec<u64> = Vec::new(); while let Some(dentry) = dir.next_entry().await.fatal_err("read deletion dentry") { let file_name = dentry.file_name(); let dentry_str = file_name.to_string_lossy(); if file_name == header_path.file_name().unwrap_or("") { // Don't try and parse the header's name like a list continue; } if dentry_str.ends_with(&temp_extension) { info!("Cleaning up temporary file {dentry_str}"); let absolute_path = deletion_directory.join(dentry.file_name().to_str().expect("non-Unicode path")); tokio::fs::remove_file(&absolute_path) .await .fatal_err("delete temp file"); continue; } let file_name = dentry.file_name().to_owned(); let basename = file_name.to_string_lossy(); let seq_part = if let Some(m) = list_name_pattern.captures(&basename) { m.name("sequence") .expect("Non optional group should be present") .as_str() } else { warn!("Unexpected key in deletion queue: {basename}"); metrics::DELETION_QUEUE.unexpected_errors.inc(); continue; }; let seq: u64 = match u64::from_str_radix(seq_part, 16) { Ok(s) => s, Err(e) => { warn!("Malformed key '{basename}': {e}"); metrics::DELETION_QUEUE.unexpected_errors.inc(); continue; } }; seqs.push(seq); } seqs.sort(); // Start our next deletion list from after the last location validated by // previous process lifetime, or after the last location found (it is updated // below after enumerating the deletion lists) self.pending.sequence = validated_sequence + 1; if let Some(max_list_seq) = seqs.last() { self.pending.sequence = std::cmp::max(self.pending.sequence, max_list_seq + 1); } for s in seqs { let list_path = self.conf.deletion_list_path(s); let list_bytes = tokio::fs::read(&list_path) .await .fatal_err("read deletion list"); let mut deletion_list = match serde_json::from_slice::<DeletionList>(&list_bytes) { Ok(l) => l, Err(e) => { // Drop the list on the floor: any objects it referenced will be left behind // for scrubbing to clean up. This should never happen unless we have a serialization bug. warn!(sequence = s, "Failed to deserialize deletion list: {e}"); metrics::DELETION_QUEUE.unexpected_errors.inc(); continue; } }; if deletion_list.sequence <= validated_sequence { // If the deletion list falls below valid_seq, we may assume that it was // already validated the last time this pageserver ran. Otherwise, we still // load it, as it may still contain content valid in this generation. deletion_list.validated = true; } else { // Special case optimization: if a tenant is still attached, and no other // generation was issued to another node in the interval while we restarted, // then we may treat deletion lists from the previous generation as if they // belong to our currently attached generation, and proceed to validate & execute. for (tenant_shard_id, tenant_list) in &mut deletion_list.tenants { if let Some(attached_gen) = attached_tenants.get(tenant_shard_id) { if attached_gen.previous() == tenant_list.generation { info!( seq=%s, tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), old_gen=?tenant_list.generation, new_gen=?attached_gen, "Updating gen on recovered list"); tenant_list.generation = *attached_gen; } else { info!( seq=%s, tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), old_gen=?tenant_list.generation, new_gen=?attached_gen, "Encountered stale generation on recovered list"); } } } } info!( validated = deletion_list.validated, sequence = deletion_list.sequence, "Recovered deletion list" ); // We will drop out of recovery if this fails: it indicates that we are shutting down // or the backend has panicked metrics::DELETION_QUEUE .keys_submitted .inc_by(deletion_list.len() as u64); self.tx .send(ValidatorQueueMessage::Delete(deletion_list)) .await?; } info!(next_sequence = self.pending.sequence, "Replay complete"); Ok(()) } /// This is the front-end ingest, where we bundle up deletion requests into DeletionList /// and write them out, for later validation by the backend and execution by the executor. pub(super) async fn background(&mut self) { info!("Started deletion frontend worker"); // Synchronous, but we only do it once per process lifetime so it's tolerable if let Err(e) = create_dir_all(self.conf.deletion_prefix()) { tracing::error!( "Failed to create deletion list directory {}, deletions will not be executed ({e})", self.conf.deletion_prefix(), ); metrics::DELETION_QUEUE.unexpected_errors.inc(); return; } while !self.cancel.is_cancelled() { let timeout = if self.pending_flushes.is_empty() { FRONTEND_DEFAULT_TIMEOUT } else { FRONTEND_FLUSHING_TIMEOUT }; let msg = match tokio::time::timeout(timeout, self.rx.recv()).await { Ok(Some(msg)) => msg, Ok(None) => { // Queue sender destroyed, shutting down break; } Err(_) => { // Hit deadline, flush. self.flush().await; continue; } }; match msg { ListWriterQueueMessage::Delete(op) => { assert!( self.recovered, "Cannot process deletions before recovery. This is a bug." ); debug!( "Delete: ingesting {} layers, {} other objects", op.layers.len(), op.objects.len() ); let mut layer_paths = Vec::new(); for (layer, meta) in op.layers { layer_paths.push(remote_layer_path( &op.tenant_shard_id.tenant_id, &op.timeline_id, meta.shard, &layer, meta.generation, )); } layer_paths.extend(op.objects); if !self.pending.push( &op.tenant_shard_id, &op.timeline_id, op.generation, &mut layer_paths, ) { self.flush().await; let retry_succeeded = self.pending.push( &op.tenant_shard_id, &op.timeline_id, op.generation, &mut layer_paths, ); if !retry_succeeded { // Unexpected: after we flush, we should have // drained self.pending, so a conflict on // generation numbers should be impossible. tracing::error!( "Failed to enqueue deletions, leaking objects. This is a bug." ); metrics::DELETION_QUEUE.unexpected_errors.inc(); } } } ListWriterQueueMessage::Flush(op) => { if self.pending.is_empty() { // Execute immediately debug!("Flush: No pending objects, flushing immediately"); op.notify() } else { // Execute next time we flush debug!("Flush: adding to pending flush list for next deadline flush"); self.pending_flushes.push(op); } } ListWriterQueueMessage::FlushExecute(op) => { debug!("FlushExecute: passing through to backend"); // We do not flush to a deletion list here: the client sends a Flush before the FlushExecute if let Err(e) = self.tx.send(ValidatorQueueMessage::Flush(op)).await { info!("Can't flush, shutting down ({e})"); // Caller will get error when their oneshot sender was dropped. } } ListWriterQueueMessage::Recover(op) => { if self.recovered { tracing::error!( "Deletion queue recovery called more than once. This is a bug." ); metrics::DELETION_QUEUE.unexpected_errors.inc(); // Non-fatal: although this is a bug, since we did recovery at least once we may proceed. continue; } if let Err(e) = self.recover(op.attached_tenants).await { // This should only happen in truly unrecoverable cases, like the recovery finding that the backend // queue receiver has been dropped, or something is critically broken with // the local filesystem holding deletion lists. info!( "Deletion queue recover aborted, deletion queue will not proceed ({e})" ); metrics::DELETION_QUEUE.unexpected_errors.inc(); return; } else { self.recovered = true; } } } if self.pending.len() > DELETION_LIST_TARGET_SIZE || !self.pending_flushes.is_empty() { self.flush().await; } } info!("Deletion queue shut down."); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/deletion_queue/deleter.rs
pageserver/src/deletion_queue/deleter.rs
//! The deleter is the final stage in the deletion queue. It accumulates remote //! paths to delete, and periodically executes them in batches of up to 1000 //! using the DeleteObjects request. //! //! Its purpose is to increase efficiency of remote storage I/O by issuing a smaller //! number of full-sized DeleteObjects requests, rather than a larger number of //! smaller requests. use std::time::Duration; use remote_storage::{GenericRemoteStorage, RemotePath, TimeoutOrCancel}; use tokio_util::sync::CancellationToken; use tracing::{info, warn}; use utils::{backoff, pausable_failpoint}; use super::{DeletionQueueError, FlushOp}; use crate::metrics; const AUTOFLUSH_INTERVAL: Duration = Duration::from_secs(10); pub(super) enum DeleterMessage { Delete(Vec<RemotePath>), Flush(FlushOp), } /// Non-persistent deletion queue, for coalescing multiple object deletes into /// larger DeleteObjects requests. pub(super) struct Deleter { // Accumulate up to 1000 keys for the next deletion operation accumulator: Vec<RemotePath>, rx: tokio::sync::mpsc::Receiver<DeleterMessage>, cancel: CancellationToken, remote_storage: GenericRemoteStorage, } impl Deleter { pub(super) fn new( remote_storage: GenericRemoteStorage, rx: tokio::sync::mpsc::Receiver<DeleterMessage>, cancel: CancellationToken, ) -> Self { Self { remote_storage, rx, cancel, accumulator: Vec::new(), } } /// Wrap the remote `delete_objects` with a failpoint async fn remote_delete(&self) -> Result<(), anyhow::Error> { // A backoff::retry is used here for two reasons: // - To provide a backoff rather than busy-polling the API on errors // - To absorb transient 429/503 conditions without hitting our error // logging path for issues deleting objects. backoff::retry( || async { fail::fail_point!("deletion-queue-before-execute", |_| { info!("Skipping execution, failpoint set"); metrics::DELETION_QUEUE .remote_errors .with_label_values(&["failpoint"]) .inc(); Err(anyhow::anyhow!("failpoint: deletion-queue-before-execute")) }); self.remote_storage .delete_objects(&self.accumulator, &self.cancel) .await }, TimeoutOrCancel::caused_by_cancel, 3, 10, "executing deletion batch", &self.cancel, ) .await .ok_or_else(|| anyhow::anyhow!("Shutting down")) .and_then(|x| x) } /// Block until everything in accumulator has been executed async fn flush(&mut self) -> Result<(), DeletionQueueError> { while !self.accumulator.is_empty() && !self.cancel.is_cancelled() { pausable_failpoint!("deletion-queue-before-execute-pause"); match self.remote_delete().await { Ok(()) => { // Note: we assume that the remote storage layer returns Ok(()) if some // or all of the deleted objects were already gone. metrics::DELETION_QUEUE .keys_executed .inc_by(self.accumulator.len() as u64); info!( "Executed deletion batch {}..{}", self.accumulator .first() .expect("accumulator should be non-empty"), self.accumulator .last() .expect("accumulator should be non-empty"), ); self.accumulator.clear(); } Err(e) => { if self.cancel.is_cancelled() { return Err(DeletionQueueError::ShuttingDown); } warn!("DeleteObjects request failed: {e:#}, will continue trying"); metrics::DELETION_QUEUE .remote_errors .with_label_values(&["execute"]) .inc(); } }; } if self.cancel.is_cancelled() { // Expose an error because we may not have actually flushed everything Err(DeletionQueueError::ShuttingDown) } else { Ok(()) } } pub(super) async fn background(&mut self) -> Result<(), DeletionQueueError> { let max_keys_per_delete = self.remote_storage.max_keys_per_delete(); self.accumulator.reserve(max_keys_per_delete); loop { if self.cancel.is_cancelled() { return Err(DeletionQueueError::ShuttingDown); } let msg = match tokio::time::timeout(AUTOFLUSH_INTERVAL, self.rx.recv()).await { Ok(Some(m)) => m, Ok(None) => { // All queue senders closed info!("Shutting down"); return Err(DeletionQueueError::ShuttingDown); } Err(_) => { // Timeout, we hit deadline to execute whatever we have in hand. These functions will // return immediately if no work is pending self.flush().await?; continue; } }; match msg { DeleterMessage::Delete(mut list) => { while !list.is_empty() || self.accumulator.len() == max_keys_per_delete { if self.accumulator.len() == max_keys_per_delete { self.flush().await?; // If we have received this number of keys, proceed with attempting to execute assert_eq!(self.accumulator.len(), 0); } let available_slots = max_keys_per_delete - self.accumulator.len(); let take_count = std::cmp::min(available_slots, list.len()); for path in list.drain(list.len() - take_count..) { self.accumulator.push(path); } } } DeleterMessage::Flush(flush_op) => { // If flush() errors, we drop the flush_op and the caller will get // an error recv()'ing their oneshot channel. self.flush().await?; flush_op.notify(); } } } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/deletion_queue/validator.rs
pageserver/src/deletion_queue/validator.rs
//! The validator is responsible for validating DeletionLists for execution, //! based on whether the generation in the DeletionList is still the latest //! generation for a tenant. //! //! The purpose of validation is to ensure split-brain safety in the cluster //! of pageservers: a deletion may only be executed if the tenant generation //! that originated it is still current. See docs/rfcs/025-generation-numbers.md //! The purpose of accumulating lists before validating them is to reduce load //! on the control plane API by issuing fewer, larger requests. //! //! In addition to validating DeletionLists, the validator validates updates to remote_consistent_lsn //! for timelines: these are logically deletions because the safekeepers use remote_consistent_lsn //! to decide when old //! //! Deletions are passed onward to the Deleter. use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use camino::Utf8PathBuf; use tokio_util::sync::CancellationToken; use tracing::{debug, info, warn}; use super::deleter::DeleterMessage; use super::{DeletionHeader, DeletionList, DeletionQueueError, FlushOp, VisibleLsnUpdates}; use crate::config::PageServerConf; use crate::controller_upcall_client::{RetryForeverError, StorageControllerUpcallApi}; use crate::metrics; use crate::virtual_file::MaybeFatalIo; // After this length of time, do any validation work that is pending, // even if we haven't accumulated many keys to delete. // // This also causes updates to remote_consistent_lsn to be validated, even // if there were no deletions enqueued. const AUTOFLUSH_INTERVAL: Duration = Duration::from_secs(10); // If we have received this number of keys, proceed with attempting to execute const AUTOFLUSH_KEY_COUNT: usize = 16384; #[derive(Debug)] pub(super) enum ValidatorQueueMessage { Delete(DeletionList), Flush(FlushOp), } pub(super) struct Validator<C> where C: StorageControllerUpcallApi, { conf: &'static PageServerConf, rx: tokio::sync::mpsc::Receiver<ValidatorQueueMessage>, tx: tokio::sync::mpsc::Sender<DeleterMessage>, // Client for calling into control plane API for validation of deletes controller_upcall_client: C, // DeletionLists which are waiting generation validation. Not safe to // execute until [`validate`] has processed them. pending_lists: Vec<DeletionList>, // DeletionLists which have passed validation and are ready to execute. validated_lists: Vec<DeletionList>, // Sum of all the lengths of lists in pending_lists pending_key_count: usize, // Lsn validation state: we read projected LSNs and write back visible LSNs // after validation. This is the LSN equivalent of `pending_validation_lists`: // it is drained in [`validate`] lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>, // If we failed to rewrite a deletion list due to local filesystem I/O failure, // we must remember that and refuse to advance our persistent validated sequence // number past the failure. list_write_failed: Option<u64>, cancel: CancellationToken, } impl<C> Validator<C> where C: StorageControllerUpcallApi, { pub(super) fn new( conf: &'static PageServerConf, rx: tokio::sync::mpsc::Receiver<ValidatorQueueMessage>, tx: tokio::sync::mpsc::Sender<DeleterMessage>, controller_upcall_client: C, lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>, cancel: CancellationToken, ) -> Self { Self { conf, rx, tx, controller_upcall_client, lsn_table, pending_lists: Vec::new(), validated_lists: Vec::new(), pending_key_count: 0, list_write_failed: None, cancel, } } /// Process any outstanding validations of generations of pending LSN updates or pending /// DeletionLists. /// /// Valid LSN updates propagate back to Timelines immediately, valid DeletionLists /// go into the queue of ready-to-execute lists. async fn validate(&mut self) -> Result<(), DeletionQueueError> { let mut tenant_generations = HashMap::new(); for list in &self.pending_lists { for (tenant_id, tenant_list) in &list.tenants { // Note: DeletionLists are in logical time order, so generation always // goes up. By doing a simple insert() we will always end up with // the latest generation seen for a tenant. tenant_generations.insert(*tenant_id, tenant_list.generation); } } let pending_lsn_updates = { let mut lsn_table = self.lsn_table.write().expect("Lock should not be poisoned"); std::mem::take(&mut *lsn_table) }; for (tenant_id, update) in &pending_lsn_updates.tenants { let entry = tenant_generations .entry(*tenant_id) .or_insert(update.generation); if update.generation > *entry { *entry = update.generation; } } if tenant_generations.is_empty() { // No work to do return Ok(()); } let tenants_valid = match self .controller_upcall_client .validate(tenant_generations.iter().map(|(k, v)| (*k, *v)).collect()) .await { Ok(tenants) => tenants, Err(RetryForeverError::ShuttingDown) => { // The only way a validation call returns an error is when the cancellation token fires return Err(DeletionQueueError::ShuttingDown); } }; let mut validated_sequence: Option<u64> = None; // Apply the validation results to the pending LSN updates for (tenant_id, tenant_lsn_state) in pending_lsn_updates.tenants { let validated_generation = tenant_generations .get(&tenant_id) .expect("Map was built from the same keys we're reading"); let valid = tenants_valid .get(&tenant_id) .copied() // If the tenant was missing from the validation response, it has been deleted. // The Timeline that requested the LSN update is probably already torn down, // or will be torn down soon. In this case, drop the update by setting valid=false. .unwrap_or(false); if valid && *validated_generation == tenant_lsn_state.generation { for (timeline_id, pending_lsn) in tenant_lsn_state.timelines { tracing::debug!( %tenant_id, %timeline_id, current = %pending_lsn.result_slot.load(), projected = %pending_lsn.projected, "advancing validated remote_consistent_lsn", ); pending_lsn.result_slot.store(pending_lsn.projected); } } else { // If we failed validation, then do not apply any of the projected updates info!( "Dropped remote consistent LSN updates for tenant {tenant_id} in stale generation {:?}", tenant_lsn_state.generation ); metrics::DELETION_QUEUE.dropped_lsn_updates.inc(); } } // Apply the validation results to the pending deletion lists for list in &mut self.pending_lists { // Filter the list based on whether the server responded valid: true. // If a tenant is omitted in the response, it has been deleted, and we should // proceed with deletion. let mut mutated = false; list.tenants.retain(|tenant_id, tenant| { let validated_generation = tenant_generations .get(tenant_id) .expect("Map was built from the same keys we're reading"); // If the tenant was missing from the validation response, it has been deleted. // This means that a deletion is valid, but also redundant since the tenant's // objects should have already been deleted. Treat it as invalid to drop the // redundant deletion. let valid = tenants_valid.get(tenant_id).copied().unwrap_or(false); // A list is valid if it comes from the current _or previous_ generation. // - The previous generation case is permitted due to how we store deletion lists locally: // if we see the immediately previous generation in a locally stored deletion list, // it proves that this node's disk was used for both current & previous generations, // and therefore no other node was involved in between: the two generations may be // logically treated as the same. // - In that previous generation case, we rewrote it to the current generation // in recover(), so the comparison here is simply an equality. let this_list_valid = valid && (tenant.generation == *validated_generation); if !this_list_valid { info!("Dropping stale deletions for tenant {tenant_id} in generation {:?}, objects may be leaked", tenant.generation); metrics::DELETION_QUEUE.keys_dropped.inc_by(tenant.len() as u64); mutated = true; } else { metrics::DELETION_QUEUE.keys_validated.inc_by(tenant.len() as u64); } this_list_valid }); list.validated = true; if mutated { // Save the deletion list if we had to make changes due to stale generations. The // saved list is valid for execution. if let Err(e) = list.save(self.conf).await { // Highly unexpected. Could happen if e.g. disk full. // If we didn't save the trimmed list, it is _not_ valid to execute. warn!("Failed to save modified deletion list {list}: {e:#}"); metrics::DELETION_QUEUE.unexpected_errors.inc(); // Rather than have a complex retry process, just drop it and leak the objects, // scrubber will clean up eventually. list.tenants.clear(); // Result is a valid-but-empty list, which is a no-op for execution. // We must remember this failure, to prevent later writing out a header that // would imply the unwritable list was valid on disk. if self.list_write_failed.is_none() { self.list_write_failed = Some(list.sequence); } } } validated_sequence = Some(list.sequence); } if let Some(validated_sequence) = validated_sequence { if let Some(list_write_failed) = self.list_write_failed { // Rare error case: we failed to write out a deletion list to excise invalid // entries, so we cannot advance the header's valid sequence number past that point. // // In this state we will continue to validate, execute and delete deletion lists, // we just cannot update the header. It should be noticed and fixed by a human due to // the nonzero value of our unexpected_errors metric. warn!( sequence_number = list_write_failed, "Cannot write header because writing a deletion list failed earlier", ); } else { // Write the queue header to record how far validation progressed. This avoids having // to rewrite each DeletionList to set validated=true in it. let header = DeletionHeader::new(validated_sequence); // Drop result because the validated_sequence is an optimization. If we fail to save it, // then restart, we will drop some deletion lists, creating work for scrubber. // The save() function logs a warning on error. if let Err(e) = header.save(self.conf).await { warn!("Failed to write deletion queue header: {e:#}"); metrics::DELETION_QUEUE.unexpected_errors.inc(); } } } // Transfer the validated lists to the validated queue, for eventual execution self.validated_lists.append(&mut self.pending_lists); Ok(()) } async fn cleanup_lists(&mut self, list_paths: Vec<Utf8PathBuf>) { for list_path in list_paths { debug!("Removing deletion list {list_path}"); tokio::fs::remove_file(&list_path) .await .fatal_err("remove deletion list"); } } async fn flush(&mut self) -> Result<(), DeletionQueueError> { tracing::debug!("Flushing with {} pending lists", self.pending_lists.len()); // Issue any required generation validation calls to the control plane self.validate().await?; // After successful validation, nothing is pending: any lists that // made it through validation will be in validated_lists. assert!(self.pending_lists.is_empty()); self.pending_key_count = 0; tracing::debug!( "Validation complete, have {} validated lists", self.validated_lists.len() ); // Return quickly if we have no validated lists to execute. This avoids flushing the // executor when an idle backend hits its autoflush interval if self.validated_lists.is_empty() { return Ok(()); } // Drain `validated_lists` into the executor let mut executing_lists = Vec::new(); for list in self.validated_lists.drain(..) { let list_path = self.conf.deletion_list_path(list.sequence); let objects = list.into_remote_paths(); self.tx .send(DeleterMessage::Delete(objects)) .await .map_err(|_| DeletionQueueError::ShuttingDown)?; executing_lists.push(list_path); } self.flush_executor().await?; // Erase the deletion lists whose keys have all be deleted from remote storage self.cleanup_lists(executing_lists).await; Ok(()) } async fn flush_executor(&mut self) -> Result<(), DeletionQueueError> { // Flush the executor, so that all the keys referenced by these deletion lists // are actually removed from remote storage. This is a precondition to deleting // the deletion lists themselves. let (flush_op, rx) = FlushOp::new(); self.tx .send(DeleterMessage::Flush(flush_op)) .await .map_err(|_| DeletionQueueError::ShuttingDown)?; rx.await.map_err(|_| DeletionQueueError::ShuttingDown) } pub(super) async fn background(&mut self) { tracing::info!("Started deletion backend worker"); while !self.cancel.is_cancelled() { let msg = match tokio::time::timeout(AUTOFLUSH_INTERVAL, self.rx.recv()).await { Ok(Some(m)) => m, Ok(None) => { // All queue senders closed info!("Shutting down"); break; } Err(_) => { // Timeout, we hit deadline to execute whatever we have in hand. These functions will // return immediately if no work is pending. match self.flush().await { Ok(()) => {} Err(DeletionQueueError::ShuttingDown) => { // If we are shutting down, then auto-flush can safely be skipped } } continue; } }; match msg { ValidatorQueueMessage::Delete(list) => { if list.validated { // A pre-validated list may only be seen during recovery, if we are recovering // a DeletionList whose on-disk state has validated=true self.validated_lists.push(list) } else { self.pending_key_count += list.len(); self.pending_lists.push(list); } if self.pending_key_count > AUTOFLUSH_KEY_COUNT { match self.flush().await { Ok(()) => {} Err(DeletionQueueError::ShuttingDown) => { // If we are shutting down, then auto-flush can safely be skipped } } } } ValidatorQueueMessage::Flush(op) => { match self.flush().await { Ok(()) => { op.notify(); } Err(DeletionQueueError::ShuttingDown) => { // If we fail due to shutting down, we will just drop `op` to propagate that status. } } } } } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/open_options.rs
pageserver/src/virtual_file/open_options.rs
//! Enum-dispatch to the `OpenOptions` type of the respective [`super::IoEngineKind`]; use std::os::fd::OwnedFd; use std::os::unix::fs::OpenOptionsExt; use std::path::Path; use super::io_engine::IoEngine; #[derive(Debug, Clone)] pub struct OpenOptions { /// We keep a copy of the write() flag we pass to the `inner`` `OptionOptions` /// to support [`Self::is_write`]. write: bool, /// We don't expose + pass through a raw `custom_flags()` style API. /// The only custom flag we support is `O_DIRECT`, which we track here /// and map to `custom_flags()` in the [`Self::open`] method. direct: bool, inner: Inner, } #[derive(Debug, Clone)] enum Inner { StdFs(std::fs::OpenOptions), #[cfg(target_os = "linux")] TokioEpollUring(tokio_epoll_uring::ops::open_at::OpenOptions), } impl Default for OpenOptions { fn default() -> Self { let inner = match super::io_engine::get() { IoEngine::NotSet => panic!("io engine not set"), IoEngine::StdFs => Inner::StdFs(std::fs::OpenOptions::new()), #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { Inner::TokioEpollUring(tokio_epoll_uring::ops::open_at::OpenOptions::new()) } }; Self { write: false, direct: false, inner, } } } impl OpenOptions { pub fn new() -> OpenOptions { Self::default() } pub(super) fn is_write(&self) -> bool { self.write } pub(super) fn is_direct(&self) -> bool { self.direct } pub fn read(mut self, read: bool) -> Self { match &mut self.inner { Inner::StdFs(x) => { let _ = x.read(read); } #[cfg(target_os = "linux")] Inner::TokioEpollUring(x) => { let _ = x.read(read); } } self } pub fn write(mut self, write: bool) -> Self { self.write = write; match &mut self.inner { Inner::StdFs(x) => { let _ = x.write(write); } #[cfg(target_os = "linux")] Inner::TokioEpollUring(x) => { let _ = x.write(write); } } self } pub fn create(mut self, create: bool) -> Self { match &mut self.inner { Inner::StdFs(x) => { let _ = x.create(create); } #[cfg(target_os = "linux")] Inner::TokioEpollUring(x) => { let _ = x.create(create); } } self } pub fn create_new(mut self, create_new: bool) -> Self { match &mut self.inner { Inner::StdFs(x) => { let _ = x.create_new(create_new); } #[cfg(target_os = "linux")] Inner::TokioEpollUring(x) => { let _ = x.create_new(create_new); } } self } pub fn truncate(mut self, truncate: bool) -> Self { match &mut self.inner { Inner::StdFs(x) => { let _ = x.truncate(truncate); } #[cfg(target_os = "linux")] Inner::TokioEpollUring(x) => { let _ = x.truncate(truncate); } } self } /// Don't use, `O_APPEND` is not supported. pub fn append(&mut self, _append: bool) { super::io_engine::panic_operation_must_be_idempotent(); } pub(in crate::virtual_file) async fn open(&self, path: &Path) -> std::io::Result<OwnedFd> { #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] let mut custom_flags = 0; if self.direct { #[cfg(target_os = "linux")] { custom_flags |= nix::libc::O_DIRECT; } #[cfg(not(target_os = "linux"))] { // Other platforms may be used for development but don't necessarily have a 1:1 equivalent to Linux's O_DIRECT (macOS!). // Just don't set the flag; to catch alignment bugs typical for O_DIRECT, // we have a runtime validation layer inside `VirtualFile::write_at` and `VirtualFile::read_at`. static WARNING: std::sync::Once = std::sync::Once::new(); WARNING.call_once(|| { let span = tracing::info_span!(parent: None, "open_options"); let _enter = span.enter(); tracing::warn!("your platform is not a supported production platform, ignoing request for O_DIRECT; this could hide alignment bugs; this warning is logged once per process"); }); } } match self.inner.clone() { Inner::StdFs(mut x) => x .custom_flags(custom_flags) .open(path) .map(|file| file.into()), #[cfg(target_os = "linux")] Inner::TokioEpollUring(mut x) => { x.custom_flags(custom_flags); let system = super::io_engine::tokio_epoll_uring_ext::thread_local_system().await; let (_, res) = super::io_engine::retry_ecanceled_once((), |()| async { let res = system.open(path, &x).await; ((), res) }) .await; res.map_err(super::io_engine::epoll_uring_error_to_std) } } } pub fn mode(mut self, mode: u32) -> Self { match &mut self.inner { Inner::StdFs(x) => { let _ = x.mode(mode); } #[cfg(target_os = "linux")] Inner::TokioEpollUring(x) => { let _ = x.mode(mode); } } self } pub fn direct(mut self, direct: bool) -> Self { self.direct = direct; self } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/io_engine.rs
pageserver/src/virtual_file/io_engine.rs
//! [`super::VirtualFile`] supports different IO engines. //! //! The [`IoEngineKind`] enum identifies them. //! //! The choice of IO engine is global. //! Initialize using [`init`]. //! //! Then use [`get`] and [`super::OpenOptions`]. //! //! #[cfg(target_os = "linux")] pub(super) mod tokio_epoll_uring_ext; use tokio_epoll_uring::IoBuf; use tracing::Instrument; pub(crate) use super::api::IoEngineKind; #[derive(Clone, Copy)] #[repr(u8)] pub(crate) enum IoEngine { NotSet, StdFs, #[cfg(target_os = "linux")] TokioEpollUring, } impl From<IoEngineKind> for IoEngine { fn from(value: IoEngineKind) -> Self { match value { IoEngineKind::StdFs => IoEngine::StdFs, #[cfg(target_os = "linux")] IoEngineKind::TokioEpollUring => IoEngine::TokioEpollUring, } } } impl TryFrom<u8> for IoEngine { type Error = u8; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { v if v == (IoEngine::NotSet as u8) => IoEngine::NotSet, v if v == (IoEngine::StdFs as u8) => IoEngine::StdFs, #[cfg(target_os = "linux")] v if v == (IoEngine::TokioEpollUring as u8) => IoEngine::TokioEpollUring, x => return Err(x), }) } } static IO_ENGINE: AtomicU8 = AtomicU8::new(IoEngine::NotSet as u8); pub(crate) fn set(engine_kind: IoEngineKind) { let engine: IoEngine = engine_kind.into(); IO_ENGINE.store(engine as u8, std::sync::atomic::Ordering::Relaxed); #[cfg(not(test))] { let metric = &crate::metrics::virtual_file_io_engine::KIND; metric.reset(); metric .with_label_values(&[&format!("{engine_kind}")]) .set(1); } } #[cfg(not(test))] pub(super) fn init(engine_kind: IoEngineKind) { set(engine_kind); } /// Longer-term, this API should only be used by [`super::VirtualFile`]. pub(crate) fn get() -> IoEngine { let cur = IoEngine::try_from(IO_ENGINE.load(Ordering::Relaxed)).unwrap(); if cfg!(test) { let env_var_name = "NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE"; match cur { IoEngine::NotSet => { let kind = match std::env::var(env_var_name) { Ok(v) => match v.parse::<IoEngineKind>() { Ok(engine_kind) => engine_kind, Err(e) => { panic!( "invalid VirtualFile io engine for env var {env_var_name}: {e:#}: {v:?}" ) } }, Err(std::env::VarError::NotPresent) => { #[cfg(target_os = "linux")] { IoEngineKind::TokioEpollUring } #[cfg(not(target_os = "linux"))] { IoEngineKind::StdFs } } Err(std::env::VarError::NotUnicode(_)) => { panic!("env var {env_var_name} is not unicode"); } }; self::set(kind); self::get() } x => x, } } else { cur } } use std::os::unix::prelude::FileExt; use std::sync::atomic::{AtomicU8, Ordering}; #[cfg(target_os = "linux")] use {std::time::Duration, tracing::info}; use super::owned_buffers_io::io_buf_ext::FullSlice; use super::owned_buffers_io::slice::SliceMutExt; use super::{FileGuard, Metadata}; #[cfg(target_os = "linux")] pub(super) fn epoll_uring_error_to_std( e: tokio_epoll_uring::Error<std::io::Error>, ) -> std::io::Error { match e { tokio_epoll_uring::Error::Op(e) => e, tokio_epoll_uring::Error::System(system) => std::io::Error::other(system), } } impl IoEngine { pub(super) async fn read_at<Buf>( &self, file_guard: FileGuard, offset: u64, mut slice: tokio_epoll_uring::Slice<Buf>, ) -> ( (FileGuard, tokio_epoll_uring::Slice<Buf>), std::io::Result<usize>, ) where Buf: tokio_epoll_uring::IoBufMut + Send, { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { let rust_slice = slice.as_mut_rust_slice_full_zeroed(); let res = file_guard.with_std_file(|std_file| std_file.read_at(rust_slice, offset)); ((file_guard, slice), res) } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { let system = tokio_epoll_uring_ext::thread_local_system().await; let (resources, res) = retry_ecanceled_once((file_guard, slice), |(file_guard, slice)| async { system.read(file_guard, offset, slice).await }) .await; (resources, res.map_err(epoll_uring_error_to_std)) } } } pub(super) async fn sync_all(&self, file_guard: FileGuard) -> (FileGuard, std::io::Result<()>) { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { let res = file_guard.with_std_file(|std_file| std_file.sync_all()); (file_guard, res) } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { let system = tokio_epoll_uring_ext::thread_local_system().await; let (resources, res) = retry_ecanceled_once(file_guard, |file_guard| async { system.fsync(file_guard).await }) .await; (resources, res.map_err(epoll_uring_error_to_std)) } } } pub(super) async fn sync_data( &self, file_guard: FileGuard, ) -> (FileGuard, std::io::Result<()>) { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { let res = file_guard.with_std_file(|std_file| std_file.sync_data()); (file_guard, res) } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { let system = tokio_epoll_uring_ext::thread_local_system().await; let (resources, res) = retry_ecanceled_once(file_guard, |file_guard| async { system.fdatasync(file_guard).await }) .await; (resources, res.map_err(epoll_uring_error_to_std)) } } } pub(super) async fn metadata( &self, file_guard: FileGuard, ) -> (FileGuard, std::io::Result<Metadata>) { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { let res = file_guard.with_std_file(|std_file| std_file.metadata().map(Metadata::from)); (file_guard, res) } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { let system = tokio_epoll_uring_ext::thread_local_system().await; let (resources, res) = retry_ecanceled_once(file_guard, |file_guard| async { system.statx(file_guard).await }) .await; ( resources, res.map_err(epoll_uring_error_to_std).map(Metadata::from), ) } } } pub(super) async fn set_len( &self, file_guard: FileGuard, len: u64, ) -> (FileGuard, std::io::Result<()>) { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { let res = file_guard.with_std_file(|std_file| std_file.set_len(len)); (file_guard, res) } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { // TODO: ftruncate op for tokio-epoll-uring // Don't forget to use retry_ecanceled_once let res = file_guard.with_std_file(|std_file| std_file.set_len(len)); (file_guard, res) } } } pub(super) async fn write_at<B: IoBuf + Send>( &self, file_guard: FileGuard, offset: u64, buf: FullSlice<B>, ) -> ((FileGuard, FullSlice<B>), std::io::Result<usize>) { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { let result = file_guard.with_std_file(|std_file| std_file.write_at(&buf, offset)); ((file_guard, buf), result) } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { let system = tokio_epoll_uring_ext::thread_local_system().await; let ((file_guard, slice), res) = retry_ecanceled_once( (file_guard, buf.into_raw_slice()), async |(file_guard, buf)| system.write(file_guard, offset, buf).await, ) .await; ( (file_guard, FullSlice::must_new(slice)), res.map_err(epoll_uring_error_to_std), ) } } } /// If we switch a user of [`tokio::fs`] to use [`super::io_engine`], /// they'd start blocking the executor thread if [`IoEngine::StdFs`] is configured /// whereas before the switch to [`super::io_engine`], that wasn't the case. /// This method helps avoid such a regression. /// /// Panics if the `spawn_blocking` fails, see [`tokio::task::JoinError`] for reasons why that can happen. pub(crate) async fn spawn_blocking_and_block_on_if_std<Fut, R>(&self, work: Fut) -> R where Fut: 'static + Send + std::future::Future<Output = R>, R: 'static + Send, { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { let span = tracing::info_span!("spawn_blocking_block_on_if_std"); tokio::task::spawn_blocking({ move || tokio::runtime::Handle::current().block_on(work.instrument(span)) }) .await .expect("failed to join blocking code most likely it panicked, panicking as well") } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => work.await, } } } /// We observe in tests that stop pageserver with SIGTERM immediately after it was ingesting data, /// occasionally buffered writers fail (and get retried by BufferedWriter) with ECANCELED. /// The problem is believed to be a race condition in how io_uring handles punted async work (io-wq) and signals. /// Investigation ticket: <https://github.com/neondatabase/neon/issues/11446> /// /// This function retries the operation once if it fails with ECANCELED. /// ONLY USE FOR IDEMPOTENT [`super::VirtualFile`] operations. #[cfg(target_os = "linux")] pub(super) async fn retry_ecanceled_once<F, Fut, T, V>( resources: T, f: F, ) -> (T, Result<V, tokio_epoll_uring::Error<std::io::Error>>) where F: Fn(T) -> Fut, Fut: std::future::Future<Output = (T, Result<V, tokio_epoll_uring::Error<std::io::Error>>)>, T: Send, V: Send, { let (resources, res) = f(resources).await; let Err(e) = res else { return (resources, res); }; let tokio_epoll_uring::Error::Op(err) = e else { return (resources, Err(e)); }; if err.raw_os_error() != Some(nix::libc::ECANCELED) { return (resources, Err(tokio_epoll_uring::Error::Op(err))); } { static RATE_LIMIT: std::sync::Mutex<utils::rate_limit::RateLimit> = std::sync::Mutex::new(utils::rate_limit::RateLimit::new(Duration::from_secs(1))); let mut guard = RATE_LIMIT.lock().unwrap(); guard.call2(|rate_limit_stats| { info!( %rate_limit_stats, "ECANCELED observed, assuming it is due to a signal being received by the submitting thread, retrying after a delay; this message is rate-limited" ); }); drop(guard); } tokio::time::sleep(Duration::from_millis(100)).await; // something big enough to beat even heavily overcommitted CI runners let (resources, res) = f(resources).await; (resources, res) } pub(super) fn panic_operation_must_be_idempotent() { panic!( "unsupported; io_engine may retry operations internally and thus needs them to be idempotent (retry_ecanceled_once)" ) } pub enum FeatureTestResult { PlatformPreferred(IoEngineKind), Worse { engine: IoEngineKind, remark: String, }, } impl FeatureTestResult { #[cfg(target_os = "linux")] const PLATFORM_PREFERRED: IoEngineKind = IoEngineKind::TokioEpollUring; #[cfg(not(target_os = "linux"))] const PLATFORM_PREFERRED: IoEngineKind = IoEngineKind::StdFs; } impl From<FeatureTestResult> for IoEngineKind { fn from(val: FeatureTestResult) -> Self { match val { FeatureTestResult::PlatformPreferred(e) => e, FeatureTestResult::Worse { engine, .. } => engine, } } } /// Somewhat costly under the hood, do only once. /// Panics if we can't set up the feature test. pub fn feature_test() -> anyhow::Result<FeatureTestResult> { std::thread::spawn(|| { #[cfg(not(target_os = "linux"))] { Ok(FeatureTestResult::PlatformPreferred( FeatureTestResult::PLATFORM_PREFERRED, )) } #[cfg(target_os = "linux")] { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); Ok(match rt.block_on(tokio_epoll_uring::System::launch()) { Ok(_) => FeatureTestResult::PlatformPreferred({ assert!(matches!( IoEngineKind::TokioEpollUring, FeatureTestResult::PLATFORM_PREFERRED )); FeatureTestResult::PLATFORM_PREFERRED }), Err(tokio_epoll_uring::LaunchResult::IoUringBuild(e)) => { let remark = match e.raw_os_error() { Some(nix::libc::EPERM) => { // fall back "creating tokio-epoll-uring fails with EPERM, assuming it's admin-disabled " .to_string() } Some(nix::libc::EFAULT) => { // fail feature test anyhow::bail!( "creating tokio-epoll-uring fails with EFAULT, might have corrupted memory" ); } Some(_) | None => { // fall back format!("creating tokio-epoll-uring fails with error: {e:#}") } }; FeatureTestResult::Worse { engine: IoEngineKind::StdFs, remark, } } }) } }) .join() .unwrap() } /// For use in benchmark binaries only. /// /// Benchmarks which initialize `virtual_file` need to know what engine to use, but we also /// don't want to silently fall back to slower I/O engines in a benchmark: this could waste /// developer time trying to figure out why it's slow. /// /// In practice, this method will either return IoEngineKind::TokioEpollUring, or panic. pub fn io_engine_for_bench() -> IoEngineKind { #[cfg(not(target_os = "linux"))] { panic!("This benchmark does I/O and can only give a representative result on Linux"); } #[cfg(target_os = "linux")] { match feature_test().unwrap() { FeatureTestResult::PlatformPreferred(engine) => engine, FeatureTestResult::Worse { engine: _engine, remark, } => { panic!("This benchmark does I/O can requires the preferred I/O engine: {remark}"); } } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/temporary.rs
pageserver/src/virtual_file/temporary.rs
use tracing::error; use utils::sync::gate::GateGuard; use crate::context::RequestContext; use super::{ MaybeFatalIo, VirtualFile, owned_buffers_io::{ io_buf_aligned::IoBufAligned, io_buf_ext::FullSlice, write::OwnedAsyncWriter, }, }; /// A wrapper around [`super::VirtualFile`] that deletes the file on drop. /// For use as a [`OwnedAsyncWriter`] in [`super::owned_buffers_io::write::BufferedWriter`]. #[derive(Debug)] pub struct TempVirtualFile { inner: Option<Inner>, } #[derive(Debug)] struct Inner { file: VirtualFile, /// Gate guard is held on as long as we need to do operations in the path (delete on drop) _gate_guard: GateGuard, } impl OwnedAsyncWriter for TempVirtualFile { fn write_all_at<Buf: IoBufAligned + Send>( &self, buf: FullSlice<Buf>, offset: u64, ctx: &RequestContext, ) -> impl std::future::Future<Output = (FullSlice<Buf>, std::io::Result<()>)> + Send { VirtualFile::write_all_at(self, buf, offset, ctx) } async fn set_len(&self, len: u64, ctx: &RequestContext) -> std::io::Result<()> { VirtualFile::set_len(self, len, ctx).await } } impl Drop for TempVirtualFile { fn drop(&mut self) { let Some(Inner { file, _gate_guard }) = self.inner.take() else { return; }; let path = file.path(); if let Err(e) = std::fs::remove_file(path).maybe_fatal_err("failed to remove the virtual file") { error!(err=%e, path=%path, "failed to remove"); } drop(_gate_guard); } } impl std::ops::Deref for TempVirtualFile { type Target = VirtualFile; fn deref(&self) -> &Self::Target { &self .inner .as_ref() .expect("only None after into_inner or drop") .file } } impl std::ops::DerefMut for TempVirtualFile { fn deref_mut(&mut self) -> &mut Self::Target { &mut self .inner .as_mut() .expect("only None after into_inner or drop") .file } } impl TempVirtualFile { /// The caller is responsible for ensuring that the path of `virtual_file` is not reused /// until after this TempVirtualFile's `Drop` impl has completed. /// Failure to do so will result in unlinking of the reused path by the original instance's Drop impl. /// The best way to do so is by using a monotonic counter as a disambiguator. /// TODO: centralize this disambiguator pattern inside this struct. /// => <https://github.com/neondatabase/neon/pull/11549#issuecomment-2824592831> pub fn new(virtual_file: VirtualFile, gate_guard: GateGuard) -> Self { Self { inner: Some(Inner { file: virtual_file, _gate_guard: gate_guard, }), } } /// Dismantle this wrapper and return the underlying [`VirtualFile`]. /// This disables auto-unlinking functionality that is the essence of this wrapper. /// /// The gate guard is dropped as well; it is the callers responsibility to ensure filesystem /// operations after calls to this functions are still gated by some other gate guard. /// /// TODO: /// - centralize the common usage pattern of callers (sync_all(self), rename(self, dst), sync_all(dst.parent)) /// => <https://github.com/neondatabase/neon/pull/11549#issuecomment-2824592831> pub fn disarm_into_inner(mut self) -> VirtualFile { self.inner .take() .expect("only None after into_inner or drop, and we are into_inner, and we consume") .file } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/metadata.rs
pageserver/src/virtual_file/metadata.rs
use std::fs; pub enum Metadata { StdFs(fs::Metadata), #[cfg(target_os = "linux")] TokioEpollUring(Box<tokio_epoll_uring::ops::statx::statx>), } #[cfg(target_os = "linux")] impl From<Box<tokio_epoll_uring::ops::statx::statx>> for Metadata { fn from(value: Box<tokio_epoll_uring::ops::statx::statx>) -> Self { Metadata::TokioEpollUring(value) } } impl From<std::fs::Metadata> for Metadata { fn from(value: std::fs::Metadata) -> Self { Metadata::StdFs(value) } } impl Metadata { pub fn len(&self) -> u64 { match self { Metadata::StdFs(metadata) => metadata.len(), #[cfg(target_os = "linux")] Metadata::TokioEpollUring(statx) => statx.stx_size, } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/slice.rs
pageserver/src/virtual_file/owned_buffers_io/slice.rs
use tokio_epoll_uring::{BoundedBuf, BoundedBufMut, IoBufMut, Slice}; pub(crate) trait SliceMutExt { /// Get a `&mut[0..self.bytes_total()`] slice, for when you need to do borrow-based IO. /// /// See the test case `test_slice_full_zeroed` for the difference to just doing `&slice[..]` fn as_mut_rust_slice_full_zeroed(&mut self) -> &mut [u8]; } impl<B> SliceMutExt for Slice<B> where B: IoBufMut, { #[inline(always)] fn as_mut_rust_slice_full_zeroed(&mut self) -> &mut [u8] { // zero-initialize the uninitialized parts of the buffer so we can create a Rust slice // // SAFETY: we own `slice`, don't write outside the bounds unsafe { let to_init = self.bytes_total() - self.bytes_init(); self.stable_mut_ptr() .add(self.bytes_init()) .write_bytes(0, to_init); self.set_init(self.bytes_total()); }; let bytes_total = self.bytes_total(); &mut self[0..bytes_total] } } #[cfg(test)] mod tests { use std::io::Read; use bytes::Buf; use tokio_epoll_uring::Slice; use super::*; #[test] fn test_slice_full_zeroed() { let make_fake_file = || bytes::BytesMut::from(&b"12345"[..]).reader(); // before we start the test, let's make sure we have a shared understanding of what slice_full does { let buf = Vec::with_capacity(3); let slice: Slice<_> = buf.slice_full(); assert_eq!(slice.bytes_init(), 0); assert_eq!(slice.bytes_total(), 3); let rust_slice = &slice[..]; assert_eq!( rust_slice.len(), 0, "Slice only derefs to a &[u8] of the initialized part" ); } // and also let's establish a shared understanding of .slice() { let buf = Vec::with_capacity(3); let slice: Slice<_> = buf.slice(0..2); assert_eq!(slice.bytes_init(), 0); assert_eq!(slice.bytes_total(), 2); let rust_slice = &slice[..]; assert_eq!( rust_slice.len(), 0, "Slice only derefs to a &[u8] of the initialized part" ); } // the above leads to the easy mistake of using slice[..] for borrow-based IO like so: { let buf = Vec::with_capacity(3); let mut slice: Slice<_> = buf.slice_full(); assert_eq!(slice[..].len(), 0); let mut file = make_fake_file(); file.read_exact(&mut slice[..]).unwrap(); // one might think this reads 3 bytes but it reads 0 assert_eq!(&slice[..] as &[u8], &[][..] as &[u8]); } // With owned buffers IO like with VirtualFilem, you could totally // pass in a `Slice` with bytes_init()=0 but bytes_total()=5 // and it will read 5 bytes into the slice, and return a slice that has bytes_init()=5. { // TODO: demo } // // Ok, now that we have a shared understanding let's demo how to use the extension trait. // // slice_full() { let buf = Vec::with_capacity(3); let mut slice: Slice<_> = buf.slice_full(); let rust_slice = slice.as_mut_rust_slice_full_zeroed(); assert_eq!(rust_slice.len(), 3); assert_eq!(rust_slice, &[0, 0, 0]); let mut file = make_fake_file(); file.read_exact(rust_slice).unwrap(); assert_eq!(rust_slice, b"123"); assert_eq!(&slice[..], b"123"); } // .slice(..) { let buf = Vec::with_capacity(3); let mut slice: Slice<_> = buf.slice(0..2); let rust_slice = slice.as_mut_rust_slice_full_zeroed(); assert_eq!(rust_slice.len(), 2); assert_eq!(rust_slice, &[0, 0]); let mut file = make_fake_file(); file.read_exact(rust_slice).unwrap(); assert_eq!(rust_slice, b"12"); assert_eq!(&slice[..], b"12"); } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer.rs
pageserver/src/virtual_file/owned_buffers_io/aligned_buffer.rs
pub mod alignment; pub mod buffer; pub mod buffer_mut; pub mod raw; pub mod slice; pub use alignment::*; pub use buffer_mut::AlignedBufferMut; pub use slice::AlignedSlice;
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/io_buf_ext.rs
pageserver/src/virtual_file/owned_buffers_io/io_buf_ext.rs
//! See [`FullSlice`]. use std::ops::{Deref, Range}; use bytes::{Bytes, BytesMut}; use tokio_epoll_uring::{BoundedBuf, IoBuf, Slice}; use super::write::CheapCloneForRead; use crate::virtual_file::{IoBuffer, IoBufferMut}; /// The true owned equivalent for Rust [`slice`]. Use this for the write path. /// /// Unlike [`tokio_epoll_uring::Slice`], which we unfortunately inherited from `tokio-uring`, /// [`FullSlice`] is guaranteed to have all its bytes initialized. This means that /// [`<FullSlice as Deref<Target = [u8]>>::len`] is equal to [`Slice::bytes_init`] and [`Slice::bytes_total`]. /// pub struct FullSlice<B> { slice: Slice<B>, } impl<B> FullSlice<B> where B: IoBuf, { pub(crate) fn must_new(slice: Slice<B>) -> Self { assert_eq!(slice.bytes_init(), slice.bytes_total()); FullSlice { slice } } pub(crate) fn into_raw_slice(self) -> Slice<B> { let FullSlice { slice: s } = self; s } } impl<B> Deref for FullSlice<B> where B: IoBuf, { type Target = [u8]; fn deref(&self) -> &[u8] { let rust_slice = &self.slice[..]; assert_eq!(rust_slice.len(), self.slice.bytes_init()); assert_eq!(rust_slice.len(), self.slice.bytes_total()); rust_slice } } impl<B> CheapCloneForRead for FullSlice<B> where B: IoBuf + CheapCloneForRead, { fn cheap_clone(&self) -> Self { let bounds = self.slice.bounds(); let clone = self.slice.get_ref().cheap_clone(); let slice = clone.slice(bounds); Self { slice } } } pub(crate) trait IoBufExt { /// Get a [`FullSlice`] for the entire buffer, i.e., `self[..]` or `self[0..self.len()]`. fn slice_len(self) -> FullSlice<Self> where Self: Sized; } macro_rules! impl_io_buf_ext { ($T:ty) => { impl IoBufExt for $T { #[inline(always)] fn slice_len(self) -> FullSlice<Self> { let len = self.len(); let s = if len == 0 { // `BoundedBuf::slice(0..len)` or `BoundedBuf::slice(..)` has an incorrect assertion, // causing a panic if len == 0. // The Slice::from_buf_bounds has the correct assertion (<= instead of <). // => https://github.com/neondatabase/tokio-epoll-uring/issues/46 let slice = self.slice_full(); let mut bounds: Range<_> = slice.bounds(); bounds.end = bounds.start; Slice::from_buf_bounds(slice.into_inner(), bounds) } else { self.slice(0..len) }; FullSlice::must_new(s) } } }; } impl_io_buf_ext!(Bytes); impl_io_buf_ext!(BytesMut); impl_io_buf_ext!(Vec<u8>); impl_io_buf_ext!(IoBufferMut); impl_io_buf_ext!(IoBuffer);
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/write.rs
pageserver/src/virtual_file/owned_buffers_io/write.rs
mod flush; use bytes::BufMut; pub(crate) use flush::FlushControl; use flush::FlushHandle; pub(crate) use flush::FlushTaskError; use flush::ShutdownRequest; use tokio_epoll_uring::IoBuf; use tokio_util::sync::CancellationToken; use tracing::trace; use super::io_buf_aligned::IoBufAligned; use super::io_buf_aligned::IoBufAlignedMut; use super::io_buf_ext::{FullSlice, IoBufExt}; use crate::context::RequestContext; use crate::virtual_file::UsizeIsU64; use crate::virtual_file::{IoBuffer, IoBufferMut}; pub(crate) trait CheapCloneForRead { /// Returns a cheap clone of the buffer. fn cheap_clone(&self) -> Self; } impl CheapCloneForRead for IoBuffer { fn cheap_clone(&self) -> Self { // Cheap clone over an `Arc`. self.clone() } } /// A trait for doing owned-buffer write IO. /// Think [`tokio::io::AsyncWrite`] but with owned buffers. /// The owned buffers need to be aligned due to Direct IO requirements. pub trait OwnedAsyncWriter { fn write_all_at<Buf: IoBufAligned + Send>( &self, buf: FullSlice<Buf>, offset: u64, ctx: &RequestContext, ) -> impl std::future::Future<Output = (FullSlice<Buf>, std::io::Result<()>)> + Send; fn set_len( &self, len: u64, ctx: &RequestContext, ) -> impl Future<Output = std::io::Result<()>> + Send; } /// A wrapper aorund an [`OwnedAsyncWriter`] that uses a [`Buffer`] to batch /// small writes into larger writes of size [`Buffer::cap`]. /// /// The buffer is flushed if and only if it is full ([`Buffer::pending`] == [`Buffer::cap`]). /// This guarantees that writes to the filesystem happen /// - at offsets that are multiples of [`Buffer::cap`] /// - in lengths that are multiples of [`Buffer::cap`] /// /// Above property is useful for Direct IO, where whatever the /// effectively dominating disk-sector/filesystem-block/memory-page size /// determines the requirements on /// - the alignment of the pointer passed to the read/write operation /// - the value of `count` (i.e., the length of the read/write operation) /// which must be a multiple of the dominating sector/block/page size. /// /// See [`BufferedWriter::shutdown`] / [`BufferedWriterShutdownMode`] for different /// ways of dealing with the special case that the buffer is not full by the time /// we are done writing. /// /// The first flush to the underlying `W` happens at offset `start_offset` (arg of [`BufferedWriter::new`]). /// The next flush is to offset `start_offset + Buffer::cap`. The one after at `start_offset + 2 * Buffer::cap` and so on. /// /// TODO: decouple buffer capacity from alignment requirement. /// Right now we assume [`Buffer::cap`] is the alignment requirement, /// but actually [`Buffer::cap`] should only determine how often we flush /// while writing, while a separate alignment requirement argument should /// be passed to determine alignment requirement. This could be used by /// [`BufferedWriterShutdownMode::PadThenTruncate`] to avoid excessive /// padding of zeroes. For example, today, with a capacity of 64KiB, we /// would pad up to 64KiB-1 bytes of zeroes, then truncate off 64KiB-1. /// This is wasteful, e.g., if the alignment requirement is 4KiB, we only /// need to pad & truncate up to 4KiB-1 bytes of zeroes /// // TODO(yuchen): For large write, implementing buffer bypass for aligned parts of the write could be beneficial to throughput, // since we would avoid copying majority of the data into the internal buffer. // https://github.com/neondatabase/neon/issues/10101 pub struct BufferedWriter<B: Buffer, W> { /// Clone of the buffer that was last submitted to the flush loop. /// `None` if no flush request has been submitted, Some forever after. pub(super) maybe_flushed: Option<FullSlice<B::IoBuf>>, /// New writes are accumulated here. /// `None` only during submission while we wait for flush loop to accept /// the full dirty buffer in exchange for a clean buffer. /// If that exchange fails with an [`FlushTaskError`], the write path /// bails and leaves this as `None`. /// Subsequent writes will panic if attempted. /// The read path continues to work without error because [`Self::maybe_flushed`] /// and [`Self::bytes_submitted`] are advanced before the flush loop exchange starts, /// so, they will never try to read from [`Self::mutable`] anyway, because it's past /// the [`Self::maybe_flushed`] point. mutable: Option<B>, /// A handle to the background flush task for writting data to disk. flush_handle: FlushHandle<B::IoBuf, W>, /// The number of bytes submitted to the background task. bytes_submitted: u64, } /// How [`BufferedWriter::shutdown`] should deal with pending (=not-yet-flushed) data. /// /// Cf the [`BufferedWriter`] comment's paragraph for context on why we need to think about this. pub enum BufferedWriterShutdownMode { /// Drop pending data, don't write back to file. DropTail, /// Pad the pending data with zeroes (cf [`usize::next_multiple_of`]). ZeroPadToNextMultiple(usize), /// Fill the IO buffer with zeroes, flush to disk, the `ftruncate` the /// file to the exact number of bytes written to [`Self`]. /// /// TODO: see in [`BufferedWriter`] comment about decoupling buffer capacity from alignment requirement. PadThenTruncate, } impl<B, Buf, W> BufferedWriter<B, W> where B: IoBufAlignedMut + Buffer<IoBuf = Buf> + Send + 'static, Buf: IoBufAligned + Send + Sync + CheapCloneForRead, W: OwnedAsyncWriter + Send + Sync + 'static + std::fmt::Debug, { /// Creates a new buffered writer. /// /// The `buf_new` function provides a way to initialize the owned buffers used by this writer. pub fn new( writer: W, start_offset: u64, buf_new: impl Fn() -> B, gate_guard: utils::sync::gate::GateGuard, cancel: CancellationToken, ctx: &RequestContext, flush_task_span: tracing::Span, ) -> Self { Self { mutable: Some(buf_new()), maybe_flushed: None, flush_handle: FlushHandle::spawn_new( writer, buf_new(), gate_guard, cancel, ctx.attached_child(), flush_task_span, ), bytes_submitted: start_offset, } } /// Returns the number of bytes submitted to the background flush task. pub fn bytes_submitted(&self) -> u64 { self.bytes_submitted } /// Panics if used after any of the write paths returned an error pub fn inspect_mutable(&self) -> Option<&B> { self.mutable.as_ref() } /// Gets a reference to the maybe flushed read-only buffer. /// Returns `None` if the writer has not submitted any flush request. pub fn inspect_maybe_flushed(&self) -> Option<&FullSlice<Buf>> { self.maybe_flushed.as_ref() } #[cfg_attr(target_os = "macos", allow(dead_code))] pub async fn shutdown( mut self, mode: BufferedWriterShutdownMode, ctx: &RequestContext, ) -> Result<(u64, W), FlushTaskError> { let mut mutable = self.mutable.take().expect("must not use after an error"); let unpadded_pending = mutable.pending(); let final_len: u64; let shutdown_req; match mode { BufferedWriterShutdownMode::DropTail => { trace!(pending=%mutable.pending(), "dropping pending data"); drop(mutable); final_len = self.bytes_submitted; shutdown_req = ShutdownRequest { set_len: None }; } BufferedWriterShutdownMode::ZeroPadToNextMultiple(next_multiple) => { let len = mutable.pending(); let cap = mutable.cap(); assert!( len <= cap, "buffer impl ensures this, but let's check because the extend_with below would panic if we go beyond" ); let padded_len = len.next_multiple_of(next_multiple); assert!( padded_len <= cap, "caller specified a multiple that is larger than the buffer capacity" ); let count = padded_len - len; mutable.extend_with(0, count); trace!(count, "padding with zeros"); self.mutable = Some(mutable); final_len = self.bytes_submitted + padded_len.into_u64(); shutdown_req = ShutdownRequest { set_len: None }; } BufferedWriterShutdownMode::PadThenTruncate => { let len = mutable.pending(); let cap = mutable.cap(); // TODO: see struct comment TODO on decoupling buffer capacity from alignment requirement. let alignment_requirement = cap; assert!(len <= cap, "buffer impl should ensure this"); let padding_end_offset = len.next_multiple_of(alignment_requirement); assert!( padding_end_offset <= cap, "{padding_end_offset} <= {cap} ({alignment_requirement})" ); let count = padding_end_offset - len; mutable.extend_with(0, count); trace!(count, "padding with zeros"); self.mutable = Some(mutable); final_len = self.bytes_submitted + len.into_u64(); shutdown_req = ShutdownRequest { // Avoid set_len call if we didn't need to pad anything. set_len: if count > 0 { Some(final_len) } else { None }, }; } }; let padded_pending = self.mutable.as_ref().map(|b| b.pending()); trace!(unpadded_pending, padded_pending, "padding done"); if self.mutable.is_some() { self.flush(ctx).await?; } let Self { mutable: _, maybe_flushed: _, mut flush_handle, bytes_submitted: _, } = self; let writer = flush_handle.shutdown(shutdown_req).await?; Ok((final_len, writer)) } #[cfg(test)] pub(crate) fn mutable(&self) -> &B { self.mutable.as_ref().expect("must not use after an error") } #[cfg_attr(target_os = "macos", allow(dead_code))] pub async fn write_buffered_borrowed( &mut self, chunk: &[u8], ctx: &RequestContext, ) -> Result<usize, FlushTaskError> { let (len, control) = self.write_buffered_borrowed_controlled(chunk, ctx).await?; if let Some(control) = control { control.release().await; } Ok(len) } /// In addition to bytes submitted in this write, also returns a handle that can control the flush behavior. pub(crate) async fn write_buffered_borrowed_controlled( &mut self, mut chunk: &[u8], ctx: &RequestContext, ) -> Result<(usize, Option<FlushControl>), FlushTaskError> { let chunk_len = chunk.len(); let mut control: Option<FlushControl> = None; while !chunk.is_empty() { let buf = self.mutable.as_mut().expect("must not use after an error"); let need = buf.cap() - buf.pending(); let have = chunk.len(); let n = std::cmp::min(need, have); buf.extend_from_slice(&chunk[..n]); chunk = &chunk[n..]; if buf.pending() >= buf.cap() { assert_eq!(buf.pending(), buf.cap()); if let Some(control) = control.take() { control.release().await; } control = self.flush(ctx).await?; } } Ok((chunk_len, control)) } /// This function can only error if the flush task got cancelled. /// In that case, we leave [`Self::mutable`] intentionally as `None`. /// /// The read path continues to function correctly; it can read up to the /// point where it could read before, i.e., including what was in [`Self::mutable`] /// before the call to this function, because that's now stored in [`Self::maybe_flushed`]. /// /// The write path becomes unavailable and will panic if used. /// The only correct solution to retry writes is to discard the entire [`BufferedWriter`], /// which upper layers of pageserver write path currently do not support. /// It is in fact quite hard to reason about what exactly happens in today's code. /// Best case we accumulate junk in the EphemeralFile, worst case is data corruption. #[must_use = "caller must explcitly check the flush control"] async fn flush( &mut self, _ctx: &RequestContext, ) -> Result<Option<FlushControl>, FlushTaskError> { let buf = self.mutable.take().expect("must not use after an error"); let buf_len = buf.pending(); if buf_len == 0 { self.mutable = Some(buf); return Ok(None); } // Prepare the buffer for read while flushing. let slice = buf.flush(); // NB: this assignment also drops thereference to the old buffer, allowing us to re-own & make it mutable below. self.maybe_flushed = Some(slice.cheap_clone()); let offset = self.bytes_submitted; self.bytes_submitted += u64::try_from(buf_len).unwrap(); // If we return/panic here or later, we'll leave mutable = None, breaking further // writers, but the read path should still work. let (recycled, flush_control) = self.flush_handle.flush(slice, offset).await?; // The only other place that could hold a reference to the recycled buffer // is in `Self::maybe_flushed`, but we have already replace it with the new buffer. let recycled = Buffer::reuse_after_flush(recycled.into_raw_slice().into_inner()); // We got back some recycled buffer, can open up for more writes again. self.mutable = Some(recycled); Ok(Some(flush_control)) } } /// A [`Buffer`] is used by [`BufferedWriter`] to batch smaller writes into larger ones. pub trait Buffer { type IoBuf: IoBuf; /// Capacity of the buffer. Must not change over the lifetime `self`.` fn cap(&self) -> usize; /// Add data to the buffer. /// Panics if there is not enough room to accomodate `other`'s content, i.e., /// panics if `other.len() > self.cap() - self.pending()`. fn extend_from_slice(&mut self, other: &[u8]); /// Add `count` bytes `val` into `self`. /// Panics if `count > self.cap() - self.pending()`. fn extend_with(&mut self, val: u8, count: usize); /// Number of bytes in the buffer. fn pending(&self) -> usize; /// Turns `self` into a [`FullSlice`] of the pending data /// so we can use [`tokio_epoll_uring`] to write it to disk. fn flush(self) -> FullSlice<Self::IoBuf>; /// After the write to disk is done and we have gotten back the slice, /// [`BufferedWriter`] uses this method to re-use the io buffer. fn reuse_after_flush(iobuf: Self::IoBuf) -> Self; } impl Buffer for IoBufferMut { type IoBuf = IoBuffer; fn cap(&self) -> usize { self.capacity() } fn extend_from_slice(&mut self, other: &[u8]) { if self.len() + other.len() > self.cap() { panic!("Buffer capacity exceeded"); } IoBufferMut::extend_from_slice(self, other); } fn extend_with(&mut self, val: u8, count: usize) { if self.len() + count > self.cap() { panic!("Buffer capacity exceeded"); } IoBufferMut::put_bytes(self, val, count); } fn pending(&self) -> usize { self.len() } fn flush(self) -> FullSlice<Self::IoBuf> { self.freeze().slice_len() } /// Caller should make sure that `iobuf` only have one strong reference before invoking this method. fn reuse_after_flush(iobuf: Self::IoBuf) -> Self { let mut recycled = iobuf .into_mut() .expect("buffer should only have one strong reference"); recycled.clear(); recycled } } #[cfg(test)] mod tests { use std::sync::Mutex; use rstest::rstest; use super::*; use crate::context::{DownloadBehavior, RequestContext}; use crate::task_mgr::TaskKind; #[derive(Debug, PartialEq, Eq)] enum Op { Write { buf: Vec<u8>, offset: u64 }, SetLen { len: u64 }, } #[derive(Default, Debug)] struct RecorderWriter { /// record bytes and write offsets. recording: Mutex<Vec<Op>>, } impl OwnedAsyncWriter for RecorderWriter { async fn write_all_at<Buf: IoBufAligned + Send>( &self, buf: FullSlice<Buf>, offset: u64, _: &RequestContext, ) -> (FullSlice<Buf>, std::io::Result<()>) { self.recording.lock().unwrap().push(Op::Write { buf: Vec::from(&buf[..]), offset, }); (buf, Ok(())) } async fn set_len(&self, len: u64, _ctx: &RequestContext) -> std::io::Result<()> { self.recording.lock().unwrap().push(Op::SetLen { len }); Ok(()) } } fn test_ctx() -> RequestContext { RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error) } #[rstest] #[tokio::test] async fn test_write_all_borrowed_always_goes_through_buffer( #[values( BufferedWriterShutdownMode::DropTail, BufferedWriterShutdownMode::ZeroPadToNextMultiple(2), BufferedWriterShutdownMode::PadThenTruncate )] mode: BufferedWriterShutdownMode, ) -> anyhow::Result<()> { let ctx = test_ctx(); let ctx = &ctx; let recorder = RecorderWriter::default(); let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); let cap = 4; let mut writer = BufferedWriter::<_, RecorderWriter>::new( recorder, 0, || IoBufferMut::with_capacity(cap), gate.enter()?, cancel, ctx, tracing::Span::none(), ); writer.write_buffered_borrowed(b"abc", ctx).await?; writer.write_buffered_borrowed(b"", ctx).await?; writer.write_buffered_borrowed(b"d", ctx).await?; writer.write_buffered_borrowed(b"efg", ctx).await?; writer.write_buffered_borrowed(b"hijklm", ctx).await?; let mut expect = { [(0, b"abcd"), (4, b"efgh"), (8, b"ijkl")] .into_iter() .map(|(offset, v)| Op::Write { offset, buf: v[..].to_vec(), }) .collect::<Vec<_>>() }; let expect_next_offset = 12; match &mode { BufferedWriterShutdownMode::DropTail => (), // We test the case with padding to next multiple of 2 so that it's different // from the alignment requirement of 4 inferred from buffer capacity. // See TODOs in the `BufferedWriter` struct comment on decoupling buffer capacity from alignment requirement. BufferedWriterShutdownMode::ZeroPadToNextMultiple(2) => { expect.push(Op::Write { offset: expect_next_offset, // it's legitimate for pad-to-next multiple 2 to be < alignment requirement 4 inferred from buffer capacity buf: b"m\0".to_vec(), }); } BufferedWriterShutdownMode::ZeroPadToNextMultiple(_) => unimplemented!(), BufferedWriterShutdownMode::PadThenTruncate => { expect.push(Op::Write { offset: expect_next_offset, buf: b"m\0\0\0".to_vec(), }); expect.push(Op::SetLen { len: 13 }); } } let (_, recorder) = writer.shutdown(mode, ctx).await?; assert_eq!(&*recorder.recording.lock().unwrap(), &expect); Ok(()) } #[tokio::test] async fn test_set_len_is_skipped_if_not_needed() -> anyhow::Result<()> { let ctx = test_ctx(); let ctx = &ctx; let recorder = RecorderWriter::default(); let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); let cap = 4; let mut writer = BufferedWriter::<_, RecorderWriter>::new( recorder, 0, || IoBufferMut::with_capacity(cap), gate.enter()?, cancel, ctx, tracing::Span::none(), ); // write a multiple of `cap` writer.write_buffered_borrowed(b"abc", ctx).await?; writer.write_buffered_borrowed(b"defgh", ctx).await?; let (_, recorder) = writer .shutdown(BufferedWriterShutdownMode::PadThenTruncate, ctx) .await?; let expect = { [(0, b"abcd"), (4, b"efgh")] .into_iter() .map(|(offset, v)| Op::Write { offset, buf: v[..].to_vec(), }) .collect::<Vec<_>>() }; assert_eq!( &*recorder.recording.lock().unwrap(), &expect, "set_len should not be called if the buffer is already aligned" ); Ok(()) } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/io_buf_aligned.rs
pageserver/src/virtual_file/owned_buffers_io/io_buf_aligned.rs
use tokio_epoll_uring::{IoBuf, IoBufMut}; use crate::virtual_file::{IoBuffer, IoBufferMut, PageWriteGuardBuf}; /// A marker trait for a mutable aligned buffer type. pub trait IoBufAlignedMut: IoBufMut {} /// A marker trait for an aligned buffer type. pub trait IoBufAligned: IoBuf {} impl IoBufAlignedMut for IoBufferMut {} impl IoBufAligned for IoBuffer {} impl IoBufAlignedMut for PageWriteGuardBuf {}
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/write/flush.rs
pageserver/src/virtual_file/owned_buffers_io/write/flush.rs
use std::ops::ControlFlow; use tokio_util::sync::CancellationToken; use tracing::{Instrument, info_span, warn}; use utils::sync::duplex; use super::{Buffer, CheapCloneForRead, OwnedAsyncWriter}; use crate::context::RequestContext; use crate::virtual_file::MaybeFatalIo; use crate::virtual_file::owned_buffers_io::io_buf_aligned::IoBufAligned; use crate::virtual_file::owned_buffers_io::io_buf_ext::FullSlice; /// A handle to the flush task. pub struct FlushHandle<Buf, W> { inner: Option<FlushHandleInner<Buf, W>>, } pub struct FlushHandleInner<Buf, W> { /// A bi-directional channel that sends (buffer, offset) for writes, /// and receives recyled buffer. channel: duplex::mpsc::Duplex<Request<Buf>, FullSlice<Buf>>, /// Join handle for the background flush task. join_handle: tokio::task::JoinHandle<Result<W, FlushTaskError>>, } struct FlushRequest<Buf> { slice: FullSlice<Buf>, offset: u64, #[cfg(test)] ready_to_flush_rx: Option<tokio::sync::oneshot::Receiver<()>>, #[cfg(test)] done_flush_tx: Option<tokio::sync::oneshot::Sender<()>>, } pub struct ShutdownRequest { pub set_len: Option<u64>, } enum Request<Buf> { Flush(FlushRequest<Buf>), Shutdown(ShutdownRequest), } impl<Buf> Request<Buf> { fn op_str(&self) -> &'static str { match self { Request::Flush(_) => "flush", Request::Shutdown(_) => "shutdown", } } } /// Constructs a request and a control object for a new flush operation. #[cfg(not(test))] fn new_flush_op<Buf>(slice: FullSlice<Buf>, offset: u64) -> (FlushRequest<Buf>, FlushControl) { let request = FlushRequest { slice, offset }; let control = FlushControl::untracked(); (request, control) } /// Constructs a request and a control object for a new flush operation. #[cfg(test)] fn new_flush_op<Buf>(slice: FullSlice<Buf>, offset: u64) -> (FlushRequest<Buf>, FlushControl) { let (ready_to_flush_tx, ready_to_flush_rx) = tokio::sync::oneshot::channel(); let (done_flush_tx, done_flush_rx) = tokio::sync::oneshot::channel(); let control = FlushControl::not_started(ready_to_flush_tx, done_flush_rx); let request = FlushRequest { slice, offset, ready_to_flush_rx: Some(ready_to_flush_rx), done_flush_tx: Some(done_flush_tx), }; (request, control) } /// A handle to a `FlushRequest` that allows unit tests precise control over flush behavior. #[cfg(test)] pub(crate) struct FlushControl { not_started: FlushNotStarted, } #[cfg(not(test))] pub(crate) struct FlushControl; impl FlushControl { #[cfg(test)] fn not_started( ready_to_flush_tx: tokio::sync::oneshot::Sender<()>, done_flush_rx: tokio::sync::oneshot::Receiver<()>, ) -> Self { FlushControl { not_started: FlushNotStarted { ready_to_flush_tx, done_flush_rx, }, } } #[cfg(not(test))] fn untracked() -> Self { FlushControl } /// In tests, turn flush control into a not started state. #[cfg(test)] pub(crate) fn into_not_started(self) -> FlushNotStarted { self.not_started } /// Release control to the submitted buffer. /// /// In `cfg(test)` environment, the buffer is guranteed to be flushed to disk after [`FlushControl::release`] is finishes execution. pub async fn release(self) { #[cfg(test)] { self.not_started .ready_to_flush() .wait_until_flush_is_done() .await; } } } impl<Buf, W> FlushHandle<Buf, W> where Buf: IoBufAligned + Send + Sync + CheapCloneForRead, W: OwnedAsyncWriter + Send + Sync + 'static + std::fmt::Debug, { /// Spawns a new background flush task and obtains a handle. /// /// Handle and background task are connected through a duplex channel. /// Dirty buffers are sent to the background task for flushing. /// Clean buffers are sent back to the handle for reuse. /// /// The queue depth is 1, and the passed-in `buf` seeds the queue depth. /// I.e., the passed-in buf is immediately available to the handle as a recycled buffer. pub fn spawn_new<B>( file: W, buf: B, gate_guard: utils::sync::gate::GateGuard, cancel: CancellationToken, ctx: RequestContext, span: tracing::Span, ) -> Self where B: Buffer<IoBuf = Buf> + Send + 'static, { let (front, back) = duplex::mpsc::channel(1); back.try_send(buf.flush()) .expect("we just created it with capacity 1"); let join_handle = tokio::spawn( FlushBackgroundTask::new(back, file, gate_guard, cancel, ctx) .run() .instrument(span), ); FlushHandle { inner: Some(FlushHandleInner { channel: front, join_handle, }), } } /// Submits a buffer to be flushed in the background task. /// Returns a buffer that completed flushing for re-use, length reset to 0, capacity unchanged. /// If `save_buf_for_read` is true, then we save the buffer in `Self::maybe_flushed`, otherwise /// clear `maybe_flushed`. pub async fn flush( &mut self, slice: FullSlice<Buf>, offset: u64, ) -> Result<(FullSlice<Buf>, FlushControl), FlushTaskError> { let (request, flush_control) = new_flush_op(slice, offset); // Submits the buffer to the background task. self.send(Request::Flush(request)).await?; // Wait for an available buffer from the background flush task. // This is the BACKPRESSURE mechanism: if the flush task can't keep up, // then the write path will eventually wait for it here. let Some(recycled) = self.inner_mut().channel.recv().await else { return self.handle_error().await; }; Ok((recycled, flush_control)) } /// Sends poison pill to flush task and waits for it to exit. pub async fn shutdown(&mut self, req: ShutdownRequest) -> Result<W, FlushTaskError> { self.send(Request::Shutdown(req)).await?; self.wait().await } async fn send(&mut self, request: Request<Buf>) -> Result<(), FlushTaskError> { let submit = self.inner_mut().channel.send(request).await; if submit.is_err() { return self.handle_error().await; } Ok(()) } async fn handle_error<T>(&mut self) -> Result<T, FlushTaskError> { Err(self .wait() .await .expect_err("flush task only disconnects duplex if it exits with an error")) } async fn wait(&mut self) -> Result<W, FlushTaskError> { let handle = self .inner .take() .expect("must not use after we returned an error"); drop(handle.channel.tx); handle.join_handle.await.unwrap() } /// Gets a mutable reference to the inner handle. Panics if [`Self::inner`] is `None`. /// This only happens if the handle is used after an error. fn inner_mut(&mut self) -> &mut FlushHandleInner<Buf, W> { self.inner .as_mut() .expect("must not use after we returned an error") } } /// A background task for flushing data to disk. pub struct FlushBackgroundTask<Buf, W> { /// A bi-directional channel that receives (buffer, offset) for writes, /// and send back recycled buffer. channel: duplex::mpsc::Duplex<FullSlice<Buf>, Request<Buf>>, /// A writter for persisting data to disk. writer: W, ctx: RequestContext, cancel: CancellationToken, /// Prevent timeline from shuting down until the flush background task finishes flushing all remaining buffers to disk. _gate_guard: utils::sync::gate::GateGuard, } #[derive(Debug, thiserror::Error)] pub enum FlushTaskError { #[error("flush task cancelled")] Cancelled, } impl FlushTaskError { pub fn is_cancel(&self) -> bool { match self { FlushTaskError::Cancelled => true, } } pub fn into_anyhow(self) -> anyhow::Error { match self { FlushTaskError::Cancelled => anyhow::anyhow!(self), } } } impl<Buf, W> FlushBackgroundTask<Buf, W> where Buf: IoBufAligned + Send + Sync, W: OwnedAsyncWriter + Sync + 'static, { /// Creates a new background flush task. fn new( channel: duplex::mpsc::Duplex<FullSlice<Buf>, Request<Buf>>, file: W, gate_guard: utils::sync::gate::GateGuard, cancel: CancellationToken, ctx: RequestContext, ) -> Self { FlushBackgroundTask { channel, writer: file, _gate_guard: gate_guard, cancel, ctx, } } /// Runs the background flush task. async fn run(mut self) -> Result<W, FlushTaskError> { // Exit condition: channel is closed and there is no remaining buffer to be flushed while let Some(request) = self.channel.recv().await { let op_kind = request.op_str(); // Perform the requested operation. // // Error handling happens according to the current policy of crashing // on fatal IO errors and retrying in place otherwise (deeming all other errors retryable). // (The upper layers of the Pageserver write path are not equipped to retry write errors // becasuse they often deallocate the buffers that were already written). // // TODO: use utils::backoff::retry once async closures are actually usable // let mut request_storage = Some(request); for attempt in 1.. { if self.cancel.is_cancelled() { return Err(FlushTaskError::Cancelled); } let result = async { let request: Request<Buf> = request_storage .take().expect( "likely previous invocation of this future didn't get polled to completion", ); match &request { Request::Shutdown(ShutdownRequest { set_len: None }) => { request_storage = Some(request); return ControlFlow::Break(()); }, Request::Flush(_) | Request::Shutdown(ShutdownRequest { set_len: Some(_) }) => { }, } if attempt > 1 { warn!(op=%request.op_str(), "retrying"); } // borrows so we can async move the requests into async block while not moving these borrows here let writer = &self.writer; let request_storage = &mut request_storage; let ctx = &self.ctx; let io_fut = match request { Request::Flush(FlushRequest { slice, offset, #[cfg(test)] ready_to_flush_rx, #[cfg(test)] done_flush_tx }) => futures::future::Either::Left(async move { #[cfg(test)] if let Some(ready_to_flush_rx) = ready_to_flush_rx { { // In test, wait for control to signal that we are ready to flush. if ready_to_flush_rx.await.is_err() { tracing::debug!("control dropped"); } } } let (slice, res) = writer.write_all_at(slice, offset, ctx).await; *request_storage = Some(Request::Flush(FlushRequest { slice, offset, #[cfg(test)] ready_to_flush_rx: None, // the contract is that we notify before first attempt #[cfg(test)] done_flush_tx })); res }), Request::Shutdown(ShutdownRequest { set_len }) => futures::future::Either::Right(async move { let set_len = set_len.expect("we filter out the None case above"); let res = writer.set_len(set_len, ctx).await; *request_storage = Some(Request::Shutdown(ShutdownRequest { set_len: Some(set_len), })); res }), }; // Don't cancel the io_fut by doing tokio::select with self.cancel.cancelled(). // The underlying tokio-epoll-uring slot / kernel operation is still ongoing and occupies resources. // If we retry indefinitely, we'll deplete those resources. // Future: teach tokio-epoll-uring io_uring operation cancellation, but still, // wait for cancelled ops to complete and discard their error. let res = io_fut.await; let res = res.maybe_fatal_err("owned_buffers_io flush"); let Err(err) = res else { if attempt > 1 { warn!(op=%op_kind, "retry succeeded"); } return ControlFlow::Break(()); }; warn!(%err, "error flushing buffered writer buffer to disk, retrying after backoff"); utils::backoff::exponential_backoff(attempt, 1.0, 10.0, &self.cancel).await; ControlFlow::Continue(()) } .instrument(info_span!("attempt", %attempt, %op_kind)) .await; match result { ControlFlow::Break(()) => break, ControlFlow::Continue(()) => continue, } } let request = request_storage.expect("loop must have run at least once"); let slice = match request { Request::Flush(FlushRequest { slice, #[cfg(test)] mut done_flush_tx, .. }) => { #[cfg(test)] { // In test, tell control we are done flushing buffer. if done_flush_tx.take().expect("always Some").send(()).is_err() { tracing::debug!("control dropped"); } } slice } Request::Shutdown(_) => { // next iteration will observe recv() returning None continue; } }; // Sends the buffer back to the handle for reuse. The handle is in charged of cleaning the buffer. let send_res = self.channel.send(slice).await; if send_res.is_err() { // Although channel is closed. Still need to finish flushing the remaining buffers. continue; } } Ok(self.writer) } } #[cfg(test)] pub(crate) struct FlushNotStarted { ready_to_flush_tx: tokio::sync::oneshot::Sender<()>, done_flush_rx: tokio::sync::oneshot::Receiver<()>, } #[cfg(test)] pub(crate) struct FlushInProgress { done_flush_rx: tokio::sync::oneshot::Receiver<()>, } #[cfg(test)] pub(crate) struct FlushDone; #[cfg(test)] impl FlushNotStarted { /// Signals the background task the buffer is ready to flush to disk. pub fn ready_to_flush(self) -> FlushInProgress { self.ready_to_flush_tx .send(()) .map(|_| FlushInProgress { done_flush_rx: self.done_flush_rx, }) .unwrap() } } #[cfg(test)] impl FlushInProgress { /// Waits until background flush is done. pub async fn wait_until_flush_is_done(self) -> FlushDone { self.done_flush_rx.await.unwrap(); FlushDone } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/slice.rs
pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/slice.rs
use std::ops::{Deref, DerefMut}; use super::alignment::{Alignment, ConstAlign}; /// Newtype for an aligned slice. pub struct AlignedSlice<'a, const N: usize, A: Alignment> { /// underlying byte slice buf: &'a mut [u8; N], /// alignment marker _align: A, } impl<'a, const N: usize, const A: usize> AlignedSlice<'a, N, ConstAlign<A>> { /// Create a new aligned slice from a mutable byte slice. The input must already satisify the alignment. pub unsafe fn new_unchecked(buf: &'a mut [u8; N]) -> Self { let _align = ConstAlign::<A>; assert_eq!(buf.as_ptr().align_offset(_align.align()), 0); AlignedSlice { buf, _align } } } impl<const N: usize, A: Alignment> Deref for AlignedSlice<'_, N, A> { type Target = [u8; N]; fn deref(&self) -> &Self::Target { self.buf } } impl<const N: usize, A: Alignment> DerefMut for AlignedSlice<'_, N, A> { fn deref_mut(&mut self) -> &mut Self::Target { self.buf } } impl<const N: usize, A: Alignment> AsRef<[u8; N]> for AlignedSlice<'_, N, A> { fn as_ref(&self) -> &[u8; N] { self.buf } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/alignment.rs
pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/alignment.rs
pub trait Alignment: std::marker::Unpin + 'static { /// Returns the required alignments. fn align(&self) -> usize; } /// Alignment at compile time. #[derive(Debug, Clone, Copy)] pub struct ConstAlign<const A: usize>; impl<const A: usize> Alignment for ConstAlign<A> { fn align(&self) -> usize { A } } /// Alignment at run time. #[derive(Debug, Clone, Copy)] pub struct RuntimeAlign { align: usize, } impl Alignment for RuntimeAlign { fn align(&self) -> usize { self.align } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/raw.rs
pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/raw.rs
use core::slice; use std::alloc::{self, Layout}; use std::cmp; use std::mem::ManuallyDrop; use super::alignment::{Alignment, ConstAlign}; #[derive(Debug)] struct AlignedBufferPtr(*mut u8); // SAFETY: We gurantees no one besides `IoBufferPtr` itself has the raw pointer. unsafe impl Send for AlignedBufferPtr {} // SAFETY: We gurantees no one besides `IoBufferPtr` itself has the raw pointer. unsafe impl Sync for AlignedBufferPtr {} /// An aligned buffer type. #[derive(Debug)] pub struct RawAlignedBuffer<A: Alignment> { ptr: AlignedBufferPtr, capacity: usize, len: usize, align: A, } impl<const A: usize> RawAlignedBuffer<ConstAlign<A>> { /// Constructs a new, empty `IoBufferMut` with at least the specified capacity and alignment. /// /// The buffer will be able to hold at most `capacity` elements and will never resize. /// /// /// # Panics /// /// Panics if the new capacity exceeds `isize::MAX` _bytes_, or if the following alignment requirement is not met: /// * `align` must not be zero, /// /// * `align` must be a power of two, /// /// * `capacity`, when rounded up to the nearest multiple of `align`, /// must not overflow isize (i.e., the rounded value must be /// less than or equal to `isize::MAX`). pub fn with_capacity(capacity: usize) -> Self { let align = ConstAlign::<A>; let layout = Layout::from_size_align(capacity, align.align()).expect("Invalid layout"); // SAFETY: Making an allocation with a sized and aligned layout. The memory is manually freed with the same layout. let ptr = unsafe { let ptr = alloc::alloc(layout); if ptr.is_null() { alloc::handle_alloc_error(layout); } AlignedBufferPtr(ptr) }; RawAlignedBuffer { ptr, capacity, len: 0, align, } } } impl<A: Alignment> RawAlignedBuffer<A> { /// Returns the total number of bytes the buffer can hold. #[inline] pub fn capacity(&self) -> usize { self.capacity } /// Returns the alignment of the buffer. #[inline] pub fn align(&self) -> usize { self.align.align() } /// Returns the number of bytes in the buffer, also referred to as its 'length'. #[inline] pub fn len(&self) -> usize { self.len } /// Force the length of the buffer to `new_len`. #[inline] pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); self.len = new_len; } #[inline] pub fn as_ptr(&self) -> *const u8 { self.ptr.0 } #[inline] pub fn as_mut_ptr(&mut self) -> *mut u8 { self.ptr.0 } /// Extracts a slice containing the entire buffer. /// /// Equivalent to `&s[..]`. #[inline] pub fn as_slice(&self) -> &[u8] { // SAFETY: The pointer is valid and `len` bytes are initialized. unsafe { slice::from_raw_parts(self.as_ptr(), self.len) } } /// Extracts a mutable slice of the entire buffer. /// /// Equivalent to `&mut s[..]`. pub fn as_mut_slice(&mut self) -> &mut [u8] { // SAFETY: The pointer is valid and `len` bytes are initialized. unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) } } /// Drops the all the contents of the buffer, setting its length to `0`. #[inline] pub fn clear(&mut self) { self.len = 0; } /// Reserves capacity for at least `additional` more bytes to be inserted /// in the given `IoBufferMut`. The collection may reserve more space to /// speculatively avoid frequent reallocations. After calling `reserve`, /// capacity will be greater than or equal to `self.len() + additional`. /// Does nothing if capacity is already sufficient. /// /// # Panics /// /// Panics if the new capacity exceeds `isize::MAX` _bytes_. pub fn reserve(&mut self, additional: usize) { if additional > self.capacity() - self.len() { self.reserve_inner(additional); } } fn reserve_inner(&mut self, additional: usize) { let Some(required_cap) = self.len().checked_add(additional) else { capacity_overflow() }; let old_capacity = self.capacity(); let align = self.align(); // This guarantees exponential growth. The doubling cannot overflow // because `cap <= isize::MAX` and the type of `cap` is `usize`. let cap = cmp::max(old_capacity * 2, required_cap); if !is_valid_alloc(cap) { capacity_overflow() } let new_layout = Layout::from_size_align(cap, self.align()).expect("Invalid layout"); let old_ptr = self.as_mut_ptr(); // SAFETY: old allocation was allocated with std::alloc::alloc with the same layout, // and we panics on null pointer. let (ptr, cap) = unsafe { let old_layout = Layout::from_size_align_unchecked(old_capacity, align); let ptr = alloc::realloc(old_ptr, old_layout, new_layout.size()); if ptr.is_null() { alloc::handle_alloc_error(new_layout); } (AlignedBufferPtr(ptr), cap) }; self.ptr = ptr; self.capacity = cap; } /// Shortens the buffer, keeping the first len bytes. pub fn truncate(&mut self, len: usize) { if len > self.len { return; } self.len = len; } /// Consumes and leaks the `IoBufferMut`, returning a mutable reference to the contents, &'a mut [u8]. pub fn leak<'a>(self) -> &'a mut [u8] { let mut buf = ManuallyDrop::new(self); // SAFETY: leaking the buffer as intended. unsafe { slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.len) } } } fn capacity_overflow() -> ! { panic!("capacity overflow") } // We need to guarantee the following: // * We don't ever allocate `> isize::MAX` byte-size objects. // * We don't overflow `usize::MAX` and actually allocate too little. // // On 64-bit we just need to check for overflow since trying to allocate // `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add // an extra guard for this in case we're running on a platform which can use // all 4GB in user-space, e.g., PAE or x32. #[inline] fn is_valid_alloc(alloc_size: usize) -> bool { !(usize::BITS < 64 && alloc_size > isize::MAX as usize) } impl<A: Alignment> Drop for RawAlignedBuffer<A> { fn drop(&mut self) { // SAFETY: memory was allocated with std::alloc::alloc with the same layout. unsafe { alloc::dealloc( self.as_mut_ptr(), Layout::from_size_align_unchecked(self.capacity, self.align.align()), ) } } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs
pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs
use std::mem::MaybeUninit; use std::ops::{Deref, DerefMut}; use super::alignment::{Alignment, ConstAlign}; use super::buffer::AlignedBuffer; use super::raw::RawAlignedBuffer; /// A mutable aligned buffer type. #[derive(Debug)] pub struct AlignedBufferMut<A: Alignment> { raw: RawAlignedBuffer<A>, } impl<const A: usize> AlignedBufferMut<ConstAlign<A>> { /// Constructs a new, empty `IoBufferMut` with at least the specified capacity and alignment. /// /// The buffer will be able to hold at most `capacity` elements and will never resize. /// /// /// # Panics /// /// Panics if the new capacity exceeds `isize::MAX` _bytes_, or if the following alignment requirement is not met: /// * `align` must not be zero, /// /// * `align` must be a power of two, /// /// * `capacity`, when rounded up to the nearest multiple of `align`, /// must not overflow isize (i.e., the rounded value must be /// less than or equal to `isize::MAX`). pub fn with_capacity(capacity: usize) -> Self { AlignedBufferMut { raw: RawAlignedBuffer::with_capacity(capacity), } } /// Constructs a new `IoBufferMut` with at least the specified capacity and alignment, filled with zeros. pub fn with_capacity_zeroed(capacity: usize) -> Self { use bytes::BufMut; let mut buf = Self::with_capacity(capacity); buf.put_bytes(0, capacity); // SAFETY: `put_bytes` filled the entire buffer. unsafe { buf.set_len(capacity) }; buf } } impl<A: Alignment> AlignedBufferMut<A> { /// Constructs a mutable aligned buffer from raw. pub(super) fn from_raw(raw: RawAlignedBuffer<A>) -> Self { AlignedBufferMut { raw } } /// Returns the total number of bytes the buffer can hold. #[inline] pub fn capacity(&self) -> usize { self.raw.capacity() } /// Returns the alignment of the buffer. #[inline] pub fn align(&self) -> usize { self.raw.align() } /// Returns the number of bytes in the buffer, also referred to as its 'length'. #[inline] pub fn len(&self) -> usize { self.raw.len() } /// Force the length of the buffer to `new_len`. #[inline] unsafe fn set_len(&mut self, new_len: usize) { // SAFETY: the caller is unsafe unsafe { self.raw.set_len(new_len) } } #[inline] fn as_ptr(&self) -> *const u8 { self.raw.as_ptr() } #[inline] fn as_mut_ptr(&mut self) -> *mut u8 { self.raw.as_mut_ptr() } /// Extracts a slice containing the entire buffer. /// /// Equivalent to `&s[..]`. #[inline] fn as_slice(&self) -> &[u8] { self.raw.as_slice() } /// Extracts a mutable slice of the entire buffer. /// /// Equivalent to `&mut s[..]`. fn as_mut_slice(&mut self) -> &mut [u8] { self.raw.as_mut_slice() } /// Drops the all the contents of the buffer, setting its length to `0`. #[inline] pub fn clear(&mut self) { self.raw.clear() } /// Reserves capacity for at least `additional` more bytes to be inserted /// in the given `IoBufferMut`. The collection may reserve more space to /// speculatively avoid frequent reallocations. After calling `reserve`, /// capacity will be greater than or equal to `self.len() + additional`. /// Does nothing if capacity is already sufficient. /// /// # Panics /// /// Panics if the new capacity exceeds `isize::MAX` _bytes_. pub fn reserve(&mut self, additional: usize) { self.raw.reserve(additional); } /// Shortens the buffer, keeping the first len bytes. pub fn truncate(&mut self, len: usize) { self.raw.truncate(len); } /// Consumes and leaks the `IoBufferMut`, returning a mutable reference to the contents, &'a mut [u8]. pub fn leak<'a>(self) -> &'a mut [u8] { self.raw.leak() } pub fn freeze(self) -> AlignedBuffer<A> { let len = self.len(); AlignedBuffer::from_raw(self.raw, 0..len) } /// Clones and appends all elements in a slice to the buffer. Reserves additional capacity as needed. #[inline] pub fn extend_from_slice(&mut self, extend: &[u8]) { let cnt = extend.len(); self.reserve(cnt); // SAFETY: we already reserved additional `cnt` bytes, safe to perform memcpy. unsafe { let dst = self.spare_capacity_mut(); // Reserved above debug_assert!(dst.len() >= cnt); core::ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt); } // SAFETY: We do have at least `cnt` bytes remaining before advance. unsafe { bytes::BufMut::advance_mut(self, cnt); } } /// Returns the remaining spare capacity of the vector as a slice of `MaybeUninit<u8>`. #[inline] fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] { // SAFETY: we guarantees that the `Self::capacity()` bytes from // `Self::as_mut_ptr()` are allocated. unsafe { let ptr = self.as_mut_ptr().add(self.len()); let len = self.capacity() - self.len(); core::slice::from_raw_parts_mut(ptr.cast(), len) } } } impl<A: Alignment> Deref for AlignedBufferMut<A> { type Target = [u8]; fn deref(&self) -> &Self::Target { self.as_slice() } } impl<A: Alignment> DerefMut for AlignedBufferMut<A> { fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut_slice() } } impl<A: Alignment> AsRef<[u8]> for AlignedBufferMut<A> { fn as_ref(&self) -> &[u8] { self.as_slice() } } impl<A: Alignment> AsMut<[u8]> for AlignedBufferMut<A> { fn as_mut(&mut self) -> &mut [u8] { self.as_mut_slice() } } impl<A: Alignment> PartialEq<[u8]> for AlignedBufferMut<A> { fn eq(&self, other: &[u8]) -> bool { self.as_slice().eq(other) } } /// SAFETY: When advancing the internal cursor, the caller needs to make sure the bytes advcanced past have been initialized. unsafe impl<A: Alignment> bytes::BufMut for AlignedBufferMut<A> { #[inline] fn remaining_mut(&self) -> usize { // Although a `Vec` can have at most isize::MAX bytes, we never want to grow `IoBufferMut`. // Thus, it can have at most `self.capacity` bytes. self.capacity() - self.len() } // SAFETY: Caller needs to make sure the bytes being advanced past have been initialized. #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { let len = self.len(); let remaining = self.remaining_mut(); if remaining < cnt { panic_advance(cnt, remaining); } // SAFETY: Addition will not overflow since the sum is at most the capacity. unsafe { self.set_len(len + cnt); } } #[inline] fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice { let cap = self.capacity(); let len = self.len(); // SAFETY: Since `self.ptr` is valid for `cap` bytes, `self.ptr.add(len)` must be // valid for `cap - len` bytes. The subtraction will not underflow since // `len <= cap`. unsafe { bytes::buf::UninitSlice::from_raw_parts_mut(self.as_mut_ptr().add(len), cap - len) } } } /// Panic with a nice error message. #[cold] fn panic_advance(idx: usize, len: usize) -> ! { panic!("advance out of bounds: the len is {len} but advancing by {idx}"); } /// Safety: [`AlignedBufferMut`] has exclusive ownership of the io buffer, /// and the underlying pointer remains stable while io-uring is owning the buffer. /// The tokio-epoll-uring crate itself will not resize the buffer and will respect /// [`tokio_epoll_uring::IoBuf::bytes_total`]. unsafe impl<A: Alignment> tokio_epoll_uring::IoBuf for AlignedBufferMut<A> { fn stable_ptr(&self) -> *const u8 { self.as_ptr() } fn bytes_init(&self) -> usize { self.len() } fn bytes_total(&self) -> usize { self.capacity() } } // SAFETY: See above. unsafe impl<A: Alignment> tokio_epoll_uring::IoBufMut for AlignedBufferMut<A> { fn stable_mut_ptr(&mut self) -> *mut u8 { self.as_mut_ptr() } unsafe fn set_init(&mut self, init_len: usize) { if self.len() < init_len { // SAFETY: caller function is unsafe unsafe { self.set_len(init_len); } } } } impl<A: Alignment> std::io::Write for AlignedBufferMut<A> { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { self.extend_from_slice(buf); Ok(buf.len()) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } #[cfg(test)] mod tests { use super::*; const ALIGN: usize = 4 * 1024; type TestIoBufferMut = AlignedBufferMut<ConstAlign<ALIGN>>; #[test] fn test_with_capacity() { let v = TestIoBufferMut::with_capacity(ALIGN * 4); assert_eq!(v.len(), 0); assert_eq!(v.capacity(), ALIGN * 4); assert_eq!(v.align(), ALIGN); assert_eq!(v.as_ptr().align_offset(ALIGN), 0); let v = TestIoBufferMut::with_capacity(ALIGN / 2); assert_eq!(v.len(), 0); assert_eq!(v.capacity(), ALIGN / 2); assert_eq!(v.align(), ALIGN); assert_eq!(v.as_ptr().align_offset(ALIGN), 0); } #[test] fn test_with_capacity_zeroed() { let v = TestIoBufferMut::with_capacity_zeroed(ALIGN); assert_eq!(v.len(), ALIGN); assert_eq!(v.capacity(), ALIGN); assert_eq!(v.align(), ALIGN); assert_eq!(v.as_ptr().align_offset(ALIGN), 0); assert_eq!(&v[..], &[0; ALIGN]) } #[test] fn test_reserve() { use bytes::BufMut; let mut v = TestIoBufferMut::with_capacity(ALIGN); let capacity = v.capacity(); v.reserve(capacity); assert_eq!(v.capacity(), capacity); let data = [b'a'; ALIGN]; v.put(&data[..]); v.reserve(capacity); assert!(v.capacity() >= capacity * 2); assert_eq!(&v[..], &data[..]); let capacity = v.capacity(); v.clear(); v.reserve(capacity); assert_eq!(capacity, v.capacity()); } #[test] fn test_bytes_put() { use bytes::BufMut; let mut v = TestIoBufferMut::with_capacity(ALIGN * 4); let x = [b'a'; ALIGN]; for _ in 0..2 { for _ in 0..4 { v.put(&x[..]); } assert_eq!(v.len(), ALIGN * 4); assert_eq!(v.capacity(), ALIGN * 4); assert_eq!(v.align(), ALIGN); assert_eq!(v.as_ptr().align_offset(ALIGN), 0); v.clear() } assert_eq!(v.len(), 0); assert_eq!(v.capacity(), ALIGN * 4); assert_eq!(v.align(), ALIGN); assert_eq!(v.as_ptr().align_offset(ALIGN), 0); } #[test] #[should_panic] fn test_bytes_put_panic() { use bytes::BufMut; const ALIGN: usize = 4 * 1024; let mut v = TestIoBufferMut::with_capacity(ALIGN * 4); let x = [b'a'; ALIGN]; for _ in 0..5 { v.put_slice(&x[..]); } } #[test] fn test_io_buf_put_slice() { use tokio_epoll_uring::BoundedBufMut; const ALIGN: usize = 4 * 1024; let mut v = TestIoBufferMut::with_capacity(ALIGN); let x = [b'a'; ALIGN]; for _ in 0..2 { v.put_slice(&x[..]); assert_eq!(v.len(), ALIGN); assert_eq!(v.capacity(), ALIGN); assert_eq!(v.align(), ALIGN); assert_eq!(v.as_ptr().align_offset(ALIGN), 0); v.clear() } assert_eq!(v.len(), 0); assert_eq!(v.capacity(), ALIGN); assert_eq!(v.align(), ALIGN); assert_eq!(v.as_ptr().align_offset(ALIGN), 0); } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs
pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs
use std::ops::{Deref, Range, RangeBounds}; use std::sync::Arc; use super::alignment::Alignment; use super::raw::RawAlignedBuffer; use super::{AlignedBufferMut, ConstAlign}; /// An shared, immutable aligned buffer type. #[derive(Clone, Debug)] pub struct AlignedBuffer<A: Alignment> { /// Shared raw buffer. raw: Arc<RawAlignedBuffer<A>>, /// Range that specifies the current slice. range: Range<usize>, } impl<A: Alignment> AlignedBuffer<A> { /// Creates an immutable `IoBuffer` from the raw buffer pub(super) fn from_raw(raw: RawAlignedBuffer<A>, range: Range<usize>) -> Self { AlignedBuffer { raw: Arc::new(raw), range, } } /// Returns the number of bytes in the buffer, also referred to as its 'length'. #[inline] pub fn len(&self) -> usize { self.range.len() } /// Returns the alignment of the buffer. #[inline] pub fn align(&self) -> usize { self.raw.align() } #[inline] fn as_ptr(&self) -> *const u8 { // SAFETY: `self.range.start` is guaranteed to be within [0, self.len()). unsafe { self.raw.as_ptr().add(self.range.start) } } /// Extracts a slice containing the entire buffer. /// /// Equivalent to `&s[..]`. #[inline] fn as_slice(&self) -> &[u8] { &self.raw.as_slice()[self.range.start..self.range.end] } /// Returns a slice of self for the index range `[begin..end)`. pub fn slice(&self, range: impl RangeBounds<usize>) -> Self { use core::ops::Bound; let len = self.len(); let begin = match range.start_bound() { Bound::Included(&n) => n, Bound::Excluded(&n) => n.checked_add(1).expect("out of range"), Bound::Unbounded => 0, }; let end = match range.end_bound() { Bound::Included(&n) => n.checked_add(1).expect("out of range"), Bound::Excluded(&n) => n, Bound::Unbounded => len, }; assert!( begin <= end, "range start must not be greater than end: {begin:?} <= {end:?}", ); assert!(end <= len, "range end out of bounds: {end:?} <= {len:?}",); let begin = self.range.start + begin; let end = self.range.start + end; AlignedBuffer { raw: Arc::clone(&self.raw), range: begin..end, } } /// Returns the mutable aligned buffer, if the immutable aligned buffer /// has exactly one strong reference. Otherwise returns `None`. pub fn into_mut(self) -> Option<AlignedBufferMut<A>> { let raw = Arc::into_inner(self.raw)?; Some(AlignedBufferMut::from_raw(raw)) } } impl<A: Alignment> Deref for AlignedBuffer<A> { type Target = [u8]; fn deref(&self) -> &Self::Target { self.as_slice() } } impl<A: Alignment> AsRef<[u8]> for AlignedBuffer<A> { fn as_ref(&self) -> &[u8] { self.as_slice() } } impl<A: Alignment> PartialEq<[u8]> for AlignedBuffer<A> { fn eq(&self, other: &[u8]) -> bool { self.as_slice().eq(other) } } impl<const A: usize, const N: usize> From<&[u8; N]> for AlignedBuffer<ConstAlign<A>> { fn from(value: &[u8; N]) -> Self { let mut buf = AlignedBufferMut::with_capacity(N); buf.extend_from_slice(value); buf.freeze() } } /// SAFETY: the underlying buffer references a stable memory region. unsafe impl<A: Alignment> tokio_epoll_uring::IoBuf for AlignedBuffer<A> { fn stable_ptr(&self) -> *const u8 { self.as_ptr() } fn bytes_init(&self) -> usize { self.len() } fn bytes_total(&self) -> usize { self.len() } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/virtual_file/io_engine/tokio_epoll_uring_ext.rs
pageserver/src/virtual_file/io_engine/tokio_epoll_uring_ext.rs
//! Like [`::tokio_epoll_uring::thread_local_system()`], but with pageserver-specific //! handling in case the instance can't launched. //! //! This is primarily necessary due to ENOMEM aka OutOfMemory errors during io_uring creation //! on older kernels, such as some (but not all) older kernels in the Linux 5.10 series. //! See <https://github.com/neondatabase/neon/issues/6373#issuecomment-1905814391> for more details. use std::sync::Arc; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use tokio_epoll_uring::{System, SystemHandle}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, error, info, info_span, warn}; use utils::backoff::{DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS}; use crate::metrics::tokio_epoll_uring::{self as metrics, THREAD_LOCAL_METRICS_STORAGE}; use crate::virtual_file::on_fatal_io_error; #[derive(Clone)] struct ThreadLocalState(Arc<ThreadLocalStateInner>); struct ThreadLocalStateInner { cell: tokio::sync::OnceCell<SystemHandle<metrics::ThreadLocalMetrics>>, launch_attempts: AtomicU32, /// populated through fetch_add from [`THREAD_LOCAL_STATE_ID`] thread_local_state_id: u64, } impl Drop for ThreadLocalStateInner { fn drop(&mut self) { THREAD_LOCAL_METRICS_STORAGE.remove_system(self.thread_local_state_id); } } impl ThreadLocalState { pub fn new() -> Self { Self(Arc::new(ThreadLocalStateInner { cell: tokio::sync::OnceCell::default(), launch_attempts: AtomicU32::new(0), thread_local_state_id: THREAD_LOCAL_STATE_ID.fetch_add(1, Ordering::Relaxed), })) } pub fn make_id_string(&self) -> String { format!("{}", self.0.thread_local_state_id) } } static THREAD_LOCAL_STATE_ID: AtomicU64 = AtomicU64::new(0); thread_local! { static THREAD_LOCAL: ThreadLocalState = ThreadLocalState::new(); } /// Panics if we cannot [`System::launch`]. pub async fn thread_local_system() -> Handle { let fake_cancel = CancellationToken::new(); loop { let thread_local_state = THREAD_LOCAL.with(|arc| arc.clone()); let inner = &thread_local_state.0; let get_or_init_res = inner .cell .get_or_try_init(|| async { let attempt_no = inner .launch_attempts .fetch_add(1, std::sync::atomic::Ordering::Relaxed); let span = info_span!("tokio_epoll_uring_ext::thread_local_system", thread_local=%thread_local_state.make_id_string(), %attempt_no); async { // Rate-limit retries per thread-local. // NB: doesn't yield to executor at attempt_no=0. utils::backoff::exponential_backoff( attempt_no, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, &fake_cancel, ) .await; let per_system_metrics = metrics::THREAD_LOCAL_METRICS_STORAGE.register_system(inner.thread_local_state_id); let res = System::launch_with_metrics(per_system_metrics) // this might move us to another executor thread => loop outside the get_or_try_init, not inside it .await; match res { Ok(system) => { info!("successfully launched system"); metrics::THREAD_LOCAL_LAUNCH_SUCCESSES.inc(); Ok(system) } Err(tokio_epoll_uring::LaunchResult::IoUringBuild(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => { warn!("not enough locked memory to tokio-epoll-uring, will retry"); info_span!("stats").in_scope(|| { emit_launch_failure_process_stats(); }); metrics::THREAD_LOCAL_LAUNCH_FAILURES.inc(); metrics::THREAD_LOCAL_METRICS_STORAGE.remove_system(inner.thread_local_state_id); Err(()) } // abort the process instead of panicking because pageserver usually becomes half-broken if we panic somewhere. // This is equivalent to a fatal IO error. Err(ref e @ tokio_epoll_uring::LaunchResult::IoUringBuild(ref inner)) => { error!(error=%e, "failed to launch thread-local tokio-epoll-uring, this should not happen, aborting process"); info_span!("stats").in_scope(|| { emit_launch_failure_process_stats(); }); on_fatal_io_error(inner, "launch thread-local tokio-epoll-uring"); }, } } .instrument(span) .await }) .await; if get_or_init_res.is_ok() { return Handle(thread_local_state); } } } fn emit_launch_failure_process_stats() { // tokio-epoll-uring stats // vmlck + rlimit // number of threads // rss / system memory usage generally let tokio_epoll_uring::metrics::GlobalMetrics { systems_created, systems_destroyed, } = tokio_epoll_uring::metrics::global(); info!(systems_created, systems_destroyed, "tokio-epoll-uring"); match procfs::process::Process::myself() { Ok(myself) => { match myself.limits() { Ok(limits) => { info!(?limits.max_locked_memory, "/proc/self/limits"); } Err(error) => { info!(%error, "no limit stats due to error"); } } match myself.status() { Ok(status) => { let procfs::process::Status { vmsize, vmlck, vmpin, vmrss, rssanon, rssfile, rssshmem, vmdata, vmstk, vmexe, vmlib, vmpte, threads, .. } = status; info!( vmsize, vmlck, vmpin, vmrss, rssanon, rssfile, rssshmem, vmdata, vmstk, vmexe, vmlib, vmpte, threads, "/proc/self/status" ); } Err(error) => { info!(%error, "no status status due to error"); } } } Err(error) => { info!(%error, "no process stats due to error"); } }; } #[derive(Clone)] pub struct Handle(ThreadLocalState); impl std::ops::Deref for Handle { type Target = SystemHandle<metrics::ThreadLocalMetrics>; fn deref(&self) -> &Self::Target { self.0 .0 .cell .get() .expect("must be already initialized when using this") } }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/bin/pageserver.rs
pageserver/src/bin/pageserver.rs
#![recursion_limit = "300"] //! Main entry point for the Page Server executable. use std::env; use std::env::{VarError, var}; use std::io::Read; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use anyhow::{Context, anyhow}; use camino::Utf8Path; use clap::{Arg, ArgAction, Command}; use http_utils::tls_certs::ReloadingCertificateResolver; use metrics::launch_timestamp::{LaunchTimestamp, set_launch_timestamp_metric}; use metrics::set_build_info_metric; use nix::sys::socket::{setsockopt, sockopt}; use pageserver::basebackup_cache::BasebackupCache; use pageserver::config::{PageServerConf, PageserverIdentity, ignored_fields}; use pageserver::controller_upcall_client::StorageControllerUpcallClient; use pageserver::deletion_queue::DeletionQueue; use pageserver::disk_usage_eviction_task::{self, launch_disk_usage_global_eviction_task}; use pageserver::feature_resolver::FeatureResolver; use pageserver::metrics::{STARTUP_DURATION, STARTUP_IS_LOADING}; use pageserver::page_service::GrpcPageServiceHandler; use pageserver::task_mgr::{ BACKGROUND_RUNTIME, COMPUTE_REQUEST_RUNTIME, MGMT_REQUEST_RUNTIME, WALRECEIVER_RUNTIME, }; use pageserver::tenant::{TenantSharedResources, mgr, secondary}; use pageserver::{ CancellableTask, ConsumptionMetricsTasks, HttpEndpointListener, HttpsEndpointListener, MetricsCollectionTask, http, page_cache, page_service, task_mgr, virtual_file, }; use postgres_backend::AuthType; use remote_storage::GenericRemoteStorage; use tokio::time::Instant; use tokio_util::sync::CancellationToken; use tracing::*; use tracing_utils::OtelGuard; use utils::auth::{JwtAuth, SwappableJwtAuth}; use utils::crashsafe::syncfs; use utils::logging::TracingErrorLayerEnablement; use utils::metrics_collector::{METRICS_COLLECTION_INTERVAL, METRICS_COLLECTOR}; use utils::sentry_init::init_sentry; use utils::{failpoint_support, logging, project_build_tag, project_git_version, tcp_listener}; project_git_version!(GIT_VERSION); project_build_tag!(BUILD_TAG); #[global_allocator] static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; /// Configure jemalloc to profile heap allocations by sampling stack traces every 2 MB (1 << 21). /// This adds roughly 3% overhead for allocations on average, which is acceptable considering /// performance-sensitive code will avoid allocations as far as possible anyway. #[allow(non_upper_case_globals)] #[unsafe(export_name = "malloc_conf")] pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0"; const PID_FILE_NAME: &str = "pageserver.pid"; const FEATURES: &[&str] = &[ #[cfg(feature = "testing")] "testing", ]; fn version() -> String { format!( "{GIT_VERSION} failpoints: {}, features: {:?}", fail::has_failpoints(), FEATURES, ) } fn main() -> anyhow::Result<()> { let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate())); let arg_matches = cli().get_matches(); if arg_matches.get_flag("enabled-features") { println!("{{\"features\": {FEATURES:?} }}"); return Ok(()); } // Initialize up failpoints support let scenario = failpoint_support::init(); let workdir = arg_matches .get_one::<String>("workdir") .map(Utf8Path::new) .unwrap_or_else(|| Utf8Path::new(".neon")); let workdir = workdir .canonicalize_utf8() .with_context(|| format!("Error opening workdir '{workdir}'"))?; let cfg_file_path = workdir.join("pageserver.toml"); let identity_file_path = workdir.join("identity.toml"); // Set CWD to workdir for non-daemon modes env::set_current_dir(&workdir) .with_context(|| format!("Failed to set application's current dir to '{workdir}'"))?; let (conf, ignored) = initialize_config(&identity_file_path, &cfg_file_path, &workdir)?; // Initialize logging. // // It must be initialized before the custom panic hook is installed below. // // Regarding tracing_error enablement: at this time, we only use the // tracing_error crate to debug_assert that log spans contain tenant and timeline ids. // See `debug_assert_current_span_has_tenant_and_timeline_id` in the timeline module let tracing_error_layer_enablement = if cfg!(debug_assertions) { TracingErrorLayerEnablement::EnableWithRustLogFilter } else { TracingErrorLayerEnablement::Disabled }; logging::init( conf.log_format, tracing_error_layer_enablement, logging::Output::Stdout, )?; let otel_enablement = match &conf.tracing { Some(cfg) => tracing_utils::OtelEnablement::Enabled { service_name: "pageserver".to_string(), export_config: (&cfg.export_config).into(), }, None => tracing_utils::OtelEnablement::Disabled, }; let otel_guard = tracing_utils::init_performance_tracing(otel_enablement); if otel_guard.is_some() { info!(?conf.tracing, "starting with OTEL tracing enabled"); } // mind the order required here: 1. logging, 2. panic_hook, 3. sentry. // disarming this hook on pageserver, because we never tear down tracing. logging::replace_panic_hook_with_tracing_panic_hook().forget(); // initialize sentry if SENTRY_DSN is provided let _sentry_guard = init_sentry( Some(GIT_VERSION.into()), &[("node_id", &conf.id.to_string())], ); // Warn about ignored config items; see pageserver_api::config::ConfigToml // doc comment for rationale why we prefer this over serde(deny_unknown_fields). { let ignored_fields::Paths { paths } = &ignored; for path in paths { warn!(?path, "ignoring unknown configuration item"); } } // Log configuration items for feature-flag-like config // (maybe we should automate this with a visitor?). info!(?conf.virtual_file_io_engine, "starting with virtual_file IO engine"); info!(?conf.virtual_file_io_mode, "starting with virtual_file IO mode"); info!(?conf.validate_wal_contiguity, "starting with WAL contiguity validation"); info!(?conf.page_service_pipelining, "starting with page service pipelining config"); info!(?conf.get_vectored_concurrent_io, "starting with get_vectored IO concurrency config"); // The tenants directory contains all the pageserver local disk state. // Create if not exists and make sure all the contents are durable before proceeding. // Ensuring durability eliminates a whole bug class where we come up after an unclean shutdown. // After unclea shutdown, we don't know if all the filesystem content we can read via syscalls is actually durable or not. // Examples for that: OOM kill, systemd killing us during shutdown, self abort due to unrecoverable IO error. let tenants_path = conf.tenants_path(); { let open = || { nix::dir::Dir::open( tenants_path.as_std_path(), nix::fcntl::OFlag::O_DIRECTORY | nix::fcntl::OFlag::O_RDONLY, nix::sys::stat::Mode::empty(), ) }; let dirfd = match open() { Ok(dirfd) => dirfd, Err(e) => match e { nix::errno::Errno::ENOENT => { utils::crashsafe::create_dir_all(&tenants_path).with_context(|| { format!("Failed to create tenants root dir at '{tenants_path}'") })?; open().context("open tenants dir after creating it")? } e => anyhow::bail!(e), }, }; if conf.no_sync { info!("Skipping syncfs on startup"); } else { let started = Instant::now(); syncfs(dirfd)?; let elapsed = started.elapsed(); info!( elapsed_ms = elapsed.as_millis(), "made tenant directory contents durable" ); } } // Basic initialization of things that don't change after startup tracing::info!("Initializing virtual_file..."); virtual_file::init( conf.max_file_descriptors, conf.virtual_file_io_engine, conf.virtual_file_io_mode, if conf.no_sync { virtual_file::SyncMode::UnsafeNoSync } else { virtual_file::SyncMode::Sync }, ); tracing::info!("Initializing page_cache..."); page_cache::init(conf.page_cache_size); start_pageserver(launch_ts, conf, ignored, otel_guard).context("Failed to start pageserver")?; scenario.teardown(); Ok(()) } fn initialize_config( identity_file_path: &Utf8Path, cfg_file_path: &Utf8Path, workdir: &Utf8Path, ) -> anyhow::Result<(&'static PageServerConf, ignored_fields::Paths)> { // The deployment orchestrator writes out an indentity file containing the node id // for all pageservers. This file is the source of truth for the node id. In order // to allow for rolling back pageserver releases, the node id is also included in // the pageserver config that the deployment orchestrator writes to disk for the pageserver. // A rolled back version of the pageserver will get the node id from the pageserver.toml // config file. let identity = match std::fs::File::open(identity_file_path) { Ok(mut f) => { let md = f.metadata().context("stat config file")?; if !md.is_file() { anyhow::bail!( "Pageserver found identity file but it is a dir entry: {identity_file_path}. Aborting start up ..." ); } let mut s = String::new(); f.read_to_string(&mut s).context("read identity file")?; toml_edit::de::from_str::<PageserverIdentity>(&s)? } Err(e) => { anyhow::bail!( "Pageserver could not read identity file: {identity_file_path}: {e}. Aborting start up ..." ); } }; let config_file_contents = std::fs::read_to_string(cfg_file_path).context("read config file from filesystem")?; // Deserialize the config file contents into a ConfigToml. let config_toml: pageserver_api::config::ConfigToml = { let deserializer = toml_edit::de::Deserializer::from_str(&config_file_contents) .context("build toml deserializer")?; let mut path_to_error_track = serde_path_to_error::Track::new(); let deserializer = serde_path_to_error::Deserializer::new(deserializer, &mut path_to_error_track); serde::Deserialize::deserialize(deserializer).context("deserialize config toml")? }; // Find unknown fields by re-serializing the parsed ConfigToml and comparing it to the on-disk file. // Any fields that are only in the on-disk version are unknown. // (The assumption here is that the ConfigToml doesn't to skip_serializing_if.) // (Make sure to read the ConfigToml doc comment on why we only want to warn about, but not fail startup, on unknown fields). let ignored = { let ondisk_toml = config_file_contents .parse::<toml_edit::DocumentMut>() .context("parse original config as toml document")?; let parsed_toml = toml_edit::ser::to_document(&config_toml) .context("re-serialize config to toml document")?; pageserver::config::ignored_fields::find(ondisk_toml, parsed_toml) }; // Construct the runtime god object (it's called PageServerConf but actually is just global shared state). let conf = PageServerConf::parse_and_validate(identity.id, config_toml, workdir) .context("runtime-validation of config toml")?; let conf = Box::leak(Box::new(conf)); Ok((conf, ignored)) } struct WaitForPhaseResult<F: std::future::Future + Unpin> { timeout_remaining: Duration, skipped: Option<F>, } /// During startup, we apply a timeout to our waits for readiness, to avoid /// stalling the whole service if one Tenant experiences some problem. Each /// phase may consume some of the timeout: this function returns the updated /// timeout for use in the next call. async fn wait_for_phase<F>(phase: &str, mut fut: F, timeout: Duration) -> WaitForPhaseResult<F> where F: std::future::Future + Unpin, { let initial_t = Instant::now(); let skipped = match tokio::time::timeout(timeout, &mut fut).await { Ok(_) => None, Err(_) => { tracing::info!( timeout_millis = timeout.as_millis(), %phase, "Startup phase timed out, proceeding anyway" ); Some(fut) } }; WaitForPhaseResult { timeout_remaining: timeout .checked_sub(Instant::now().duration_since(initial_t)) .unwrap_or(Duration::ZERO), skipped, } } fn startup_checkpoint(started_at: Instant, phase: &str, human_phase: &str) { let elapsed = started_at.elapsed(); let secs = elapsed.as_secs_f64(); STARTUP_DURATION.with_label_values(&[phase]).set(secs); info!( elapsed_ms = elapsed.as_millis(), "{human_phase} ({secs:.3}s since start)" ) } fn start_pageserver( launch_ts: &'static LaunchTimestamp, conf: &'static PageServerConf, ignored: ignored_fields::Paths, otel_guard: Option<OtelGuard>, ) -> anyhow::Result<()> { // Monotonic time for later calculating startup duration let started_startup_at = Instant::now(); // Print version and launch timestamp to the log, // and expose them as prometheus metrics. // A changed version string indicates changed software. // A changed launch timestamp indicates a pageserver restart. info!( "version: {} launch_timestamp: {} build_tag: {}", version(), launch_ts.to_string(), BUILD_TAG, ); info!( "IO buffer alignment: {} bytes", pageserver_api::config::defaults::DEFAULT_IO_BUFFER_ALIGNMENT ); set_build_info_metric(GIT_VERSION, BUILD_TAG); set_launch_timestamp_metric(launch_ts); #[cfg(target_os = "linux")] metrics::register_internal(Box::new(metrics::more_process_metrics::Collector::new())).unwrap(); metrics::register_internal(Box::new( pageserver::metrics::tokio_epoll_uring::Collector::new(), )) .unwrap(); pageserver::preinitialize_metrics(conf, ignored); // If any failpoints were set from FAILPOINTS environment variable, // print them to the log for debugging purposes let failpoints = fail::list(); if !failpoints.is_empty() { info!( "started with failpoints: {}", failpoints .iter() .map(|(name, actions)| format!("{name}={actions}")) .collect::<Vec<String>>() .join(";") ) } // Create and lock PID file. This ensures that there cannot be more than one // pageserver process running at the same time. let lock_file_path = conf.workdir.join(PID_FILE_NAME); info!("Claiming pid file at {lock_file_path:?}..."); let lock_file = utils::pid_file::claim_for_current_process(&lock_file_path).context("claim pid file")?; info!("Claimed pid file at {lock_file_path:?}"); // Ensure that the lock file is held even if the main thread of the process panics. // We need to release the lock file only when the process exits. std::mem::forget(lock_file); // Bind the HTTP, libpq, and gRPC ports early, to error out if they are // already in use. info!( "Starting pageserver http handler on {} with auth {:#?}", conf.listen_http_addr, conf.http_auth_type ); let http_listener = tcp_listener::bind(&conf.listen_http_addr)?; let https_listener = match conf.listen_https_addr.as_ref() { Some(https_addr) => { info!( "Starting pageserver https handler on {https_addr} with auth {:#?}", conf.http_auth_type ); Some(tcp_listener::bind(https_addr)?) } None => None, }; info!( "Starting pageserver pg protocol handler on {} with auth {:#?}", conf.listen_pg_addr, conf.pg_auth_type, ); let pageserver_listener = tcp_listener::bind(&conf.listen_pg_addr)?; // Enable SO_KEEPALIVE on the socket, to detect dead connections faster. // These are configured via net.ipv4.tcp_keepalive_* sysctls. // // TODO: also set this on the walreceiver socket, but tokio-postgres doesn't // support enabling keepalives while using the default OS sysctls. setsockopt(&pageserver_listener, sockopt::KeepAlive, &true)?; let mut grpc_listener = None; if let Some(grpc_addr) = &conf.listen_grpc_addr { info!( "Starting pageserver gRPC handler on {grpc_addr} with auth {:#?}", conf.grpc_auth_type ); grpc_listener = Some(tcp_listener::bind(grpc_addr).map_err(|e| anyhow!("{e}"))?); } // Launch broker client // The storage_broker::connect call needs to happen inside a tokio runtime thread. let broker_client = WALRECEIVER_RUNTIME .block_on(async { let tls_config = storage_broker::ClientTlsConfig::new().ca_certificates( conf.ssl_ca_certs .iter() .map(pem::encode) .map(storage_broker::Certificate::from_pem), ); // Note: we do not attempt connecting here (but validate endpoints sanity). storage_broker::connect( conf.broker_endpoint.clone(), conf.broker_keepalive_interval, tls_config, ) }) .with_context(|| { format!( "create broker client for uri={:?} keepalive_interval={:?}", &conf.broker_endpoint, conf.broker_keepalive_interval, ) })?; // Initialize authentication for incoming connections let http_auth; let pg_auth; let grpc_auth; if [conf.http_auth_type, conf.pg_auth_type, conf.grpc_auth_type].contains(&AuthType::NeonJWT) { // unwrap is ok because check is performed when creating config, so path is set and exists let key_path = conf.auth_validation_public_key_path.as_ref().unwrap(); info!("Loading public key(s) for verifying JWT tokens from {key_path:?}"); let jwt_auth = JwtAuth::from_key_path(key_path)?; let auth: Arc<SwappableJwtAuth> = Arc::new(SwappableJwtAuth::new(jwt_auth)); http_auth = match conf.http_auth_type { AuthType::Trust => None, AuthType::NeonJWT => Some(auth.clone()), }; pg_auth = match conf.pg_auth_type { AuthType::Trust => None, AuthType::NeonJWT => Some(auth.clone()), }; grpc_auth = match conf.grpc_auth_type { AuthType::Trust => None, AuthType::NeonJWT => Some(auth), }; } else { http_auth = None; pg_auth = None; grpc_auth = None; } let tls_server_config = if conf.listen_https_addr.is_some() || conf.enable_tls_page_service_api { let resolver = BACKGROUND_RUNTIME.block_on(ReloadingCertificateResolver::new( "main", &conf.ssl_key_file, &conf.ssl_cert_file, conf.ssl_cert_reload_period, ))?; let server_config = rustls::ServerConfig::builder() .with_no_client_auth() .with_cert_resolver(resolver); Some(Arc::new(server_config)) } else { None }; match var("NEON_AUTH_TOKEN") { Ok(v) => { info!("Loaded JWT token for authentication with Safekeeper"); pageserver::config::SAFEKEEPER_AUTH_TOKEN .set(Arc::new(v)) .map_err(|_| anyhow!("Could not initialize SAFEKEEPER_AUTH_TOKEN"))?; } Err(VarError::NotPresent) => { info!("No JWT token for authentication with Safekeeper detected"); } Err(e) => return Err(e).with_context( || "Failed to either load to detect non-present NEON_AUTH_TOKEN environment variable", ), }; // Top-level cancellation token for the process let shutdown_pageserver = tokio_util::sync::CancellationToken::new(); // Set up remote storage client let remote_storage = BACKGROUND_RUNTIME.block_on(create_remote_storage_client(conf))?; let feature_resolver = create_feature_resolver( conf, shutdown_pageserver.clone(), BACKGROUND_RUNTIME.handle(), )?; // Set up deletion queue let (deletion_queue, deletion_workers) = DeletionQueue::new( remote_storage.clone(), StorageControllerUpcallClient::new(conf, &shutdown_pageserver), conf, ); deletion_workers.spawn_with(BACKGROUND_RUNTIME.handle()); // Up to this point no significant I/O has been done: this should have been fast. Record // duration prior to starting I/O intensive phase of startup. startup_checkpoint(started_startup_at, "initial", "Starting loading tenants"); STARTUP_IS_LOADING.set(1); // Startup staging or optimizing: // // We want to minimize downtime for `page_service` connections, and trying not to overload // BACKGROUND_RUNTIME by doing initial compactions and initial logical sizes at the same time. // // init_done_rx will notify when all initial load operations have completed. // // background_jobs_can_start (same name used to hold off background jobs from starting at // consumer side) will be dropped once we can start the background jobs. Currently it is behind // completing all initial logical size calculations (init_logical_size_done_rx) and a timeout // (background_task_maximum_delay). let (init_remote_done_tx, init_remote_done_rx) = utils::completion::channel(); let (init_done_tx, init_done_rx) = utils::completion::channel(); let (background_jobs_can_start, background_jobs_barrier) = utils::completion::channel(); let order = pageserver::InitializationOrder { initial_tenant_load_remote: Some(init_done_tx), initial_tenant_load: Some(init_remote_done_tx), background_jobs_can_start: background_jobs_barrier.clone(), }; info!(config=?conf.l0_flush, "using l0_flush config"); let l0_flush_global_state = pageserver::l0_flush::L0FlushGlobalState::new(conf.l0_flush.clone()); // Scan the local 'tenants/' directory and start loading the tenants let (basebackup_cache, basebackup_prepare_receiver) = BasebackupCache::new( conf.basebackup_cache_dir(), conf.basebackup_cache_config.clone(), ); let deletion_queue_client = deletion_queue.new_client(); let background_purges = mgr::BackgroundPurges::default(); let tenant_manager = mgr::init( conf, background_purges.clone(), TenantSharedResources { broker_client: broker_client.clone(), remote_storage: remote_storage.clone(), deletion_queue_client, l0_flush_global_state, basebackup_cache: Arc::clone(&basebackup_cache), feature_resolver: feature_resolver.clone(), }, shutdown_pageserver.clone(), ); let tenant_manager = Arc::new(tenant_manager); BACKGROUND_RUNTIME.block_on(mgr::init_tenant_mgr(tenant_manager.clone(), order))?; basebackup_cache.spawn_background_task( BACKGROUND_RUNTIME.handle(), basebackup_prepare_receiver, Arc::clone(&tenant_manager), shutdown_pageserver.child_token(), ); BACKGROUND_RUNTIME.spawn({ let shutdown_pageserver = shutdown_pageserver.clone(); let drive_init = async move { // NOTE: unlike many futures in pageserver, this one is cancellation-safe let guard = scopeguard::guard_on_success((), |_| { tracing::info!("Cancelled before initial load completed") }); let timeout = conf.background_task_maximum_delay; let init_remote_done = std::pin::pin!(async { init_remote_done_rx.wait().await; startup_checkpoint( started_startup_at, "initial_tenant_load_remote", "Remote part of initial load completed", ); }); let WaitForPhaseResult { timeout_remaining: timeout, skipped: init_remote_skipped, } = wait_for_phase("initial_tenant_load_remote", init_remote_done, timeout).await; let init_load_done = std::pin::pin!(async { init_done_rx.wait().await; startup_checkpoint( started_startup_at, "initial_tenant_load", "Initial load completed", ); STARTUP_IS_LOADING.set(0); }); let WaitForPhaseResult { timeout_remaining: _timeout, skipped: init_load_skipped, } = wait_for_phase("initial_tenant_load", init_load_done, timeout).await; // initial logical sizes can now start, as they were waiting on init_done_rx. scopeguard::ScopeGuard::into_inner(guard); // allow background jobs to start: we either completed prior stages, or they reached timeout // and were skipped. It is important that we do not let them block background jobs indefinitely, // because things like consumption metrics for billing are blocked by this barrier. drop(background_jobs_can_start); startup_checkpoint( started_startup_at, "background_jobs_can_start", "Starting background jobs", ); // We are done. If we skipped any phases due to timeout, run them to completion here so that // they will eventually update their startup_checkpoint, and so that we do not declare the // 'complete' stage until all the other stages are really done. let guard = scopeguard::guard_on_success((), |_| { tracing::info!("Cancelled before waiting for skipped phases done") }); if let Some(f) = init_remote_skipped { f.await; } if let Some(f) = init_load_skipped { f.await; } scopeguard::ScopeGuard::into_inner(guard); startup_checkpoint(started_startup_at, "complete", "Startup complete"); }; async move { let mut drive_init = std::pin::pin!(drive_init); // just race these tasks tokio::select! { _ = shutdown_pageserver.cancelled() => {}, _ = &mut drive_init => {}, } } }); let (secondary_controller, secondary_controller_tasks) = secondary::spawn_tasks( tenant_manager.clone(), remote_storage.clone(), background_jobs_barrier.clone(), shutdown_pageserver.clone(), ); // shared state between the disk-usage backed eviction background task and the http endpoint // that allows triggering disk-usage based eviction manually. note that the http endpoint // is still accessible even if background task is not configured as long as remote storage has // been configured. let disk_usage_eviction_state: Arc<disk_usage_eviction_task::State> = Arc::default(); let disk_usage_eviction_task = launch_disk_usage_global_eviction_task( conf, remote_storage.clone(), disk_usage_eviction_state.clone(), tenant_manager.clone(), background_jobs_barrier.clone(), ); // Start up the service to handle HTTP mgmt API request. We created the // listener earlier already. let (http_endpoint_listener, https_endpoint_listener) = { let _rt_guard = MGMT_REQUEST_RUNTIME.enter(); // for hyper let router_state = Arc::new( http::routes::State::new( conf, tenant_manager.clone(), http_auth.clone(), remote_storage.clone(), broker_client.clone(), disk_usage_eviction_state, deletion_queue.new_client(), secondary_controller, feature_resolver.clone(), ) .context("Failed to initialize router state")?, ); let router = http::make_router(router_state, launch_ts, http_auth.clone())? .build() .map_err(|err| anyhow!(err))?; let service = Arc::new(http_utils::RequestServiceBuilder::new(router).map_err(|err| anyhow!(err))?); let http_task = { let server = http_utils::server::Server::new(Arc::clone(&service), http_listener, None)?; let cancel = CancellationToken::new(); let task = MGMT_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error( "http endpoint listener", server.serve(cancel.clone()), )); HttpEndpointListener(CancellableTask { task, cancel }) }; let https_task = match https_listener { Some(https_listener) => { let tls_server_config = tls_server_config .clone() .expect("tls_server_config is set earlier if https is enabled"); let tls_acceptor = tokio_rustls::TlsAcceptor::from(tls_server_config); let server = http_utils::server::Server::new(service, https_listener, Some(tls_acceptor))?; let cancel = CancellationToken::new(); let task = MGMT_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error( "https endpoint listener", server.serve(cancel.clone()), )); Some(HttpsEndpointListener(CancellableTask { task, cancel })) } None => None, }; (http_task, https_task) }; /* BEGIN_HADRON */ let metrics_collection_task = { let cancel = shutdown_pageserver.child_token(); let task = crate::BACKGROUND_RUNTIME.spawn({ let cancel = cancel.clone(); let background_jobs_barrier = background_jobs_barrier.clone(); async move { if conf.force_metric_collection_on_scrape { return; } // first wait until background jobs are cleared to launch. tokio::select! { _ = cancel.cancelled() => { return; }, _ = background_jobs_barrier.wait() => {} }; let mut interval = tokio::time::interval(METRICS_COLLECTION_INTERVAL); loop { tokio::select! { _ = cancel.cancelled() => { tracing::info!("cancelled metrics collection task, exiting..."); break; }, _ = interval.tick() => {} } tokio::task::spawn_blocking(|| { METRICS_COLLECTOR.run_once(true); }); } } }); MetricsCollectionTask(CancellableTask { task, cancel }) }; /* END_HADRON */ let consumption_metrics_tasks = { let cancel = shutdown_pageserver.child_token(); let task = crate::BACKGROUND_RUNTIME.spawn({ let tenant_manager = tenant_manager.clone(); let cancel = cancel.clone(); async move { // first wait until background jobs are cleared to launch. // // this is because we only process active tenants and timelines, and the // Timeline::get_current_logical_size will spawn the logical size calculation, // which will not be rate-limited. tokio::select! { _ = cancel.cancelled() => { return; }, _ = background_jobs_barrier.wait() => {} }; pageserver::consumption_metrics::run(conf, tenant_manager, cancel).await; } }); ConsumptionMetricsTasks(CancellableTask { task, cancel }) }; // Spawn a task to listen for libpq connections. It will spawn further tasks // for each connection. We created the listener earlier already.
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
true
neondatabase/neon
https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/pageserver/src/bin/test_helper_slow_client_reads.rs
pageserver/src/bin/test_helper_slow_client_reads.rs
use std::io::{Read, Write, stdin, stdout}; use std::time::Duration; use clap::Parser; use pageserver_api::pagestream_api::{ PagestreamFeMessage, PagestreamRequest, PagestreamTestRequest, }; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; #[derive(clap::Parser)] struct Args { connstr: String, tenant_id: TenantId, timeline_id: TimelineId, } #[tokio::main] async fn main() -> anyhow::Result<()> { let Args { connstr, tenant_id, timeline_id, } = Args::parse(); let client = pageserver_client::page_service::Client::new(connstr).await?; let client = client.pagestream(tenant_id, timeline_id).await?; let (mut sender, _receiver) = client.split(); eprintln!("filling the pipe"); let mut msg = 0; loop { msg += 1; let fut = sender.send(PagestreamFeMessage::Test(PagestreamTestRequest { hdr: PagestreamRequest { reqid: 0, request_lsn: Lsn(23), not_modified_since: Lsn(23), }, batch_key: 42, message: format!("message {msg}"), })); let Ok(res) = tokio::time::timeout(Duration::from_secs(10), fut).await else { eprintln!("pipe seems full"); break; }; let _: () = res?; } let n = stdout().write(b"R")?; assert_eq!(n, 1); stdout().flush()?; eprintln!("waiting for signal to tell us to exit"); let mut buf = [0u8; 1]; stdin().read_exact(&mut buf)?; eprintln!("termination signal received, exiting"); anyhow::Ok(()) }
rust
Apache-2.0
015b1c7cb3259a6fcd5039bc2bd46a462e163ae8
2026-01-04T15:40:24.223849Z
false