repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/pull_timeline.rs | safekeeper/src/pull_timeline.rs | use std::cmp::min;
use std::io::{self, ErrorKind};
use std::ops::RangeInclusive;
use std::sync::Arc;
use anyhow::{Context, Result, anyhow, bail};
use bytes::Bytes;
use camino::Utf8PathBuf;
use chrono::{DateTime, Utc};
use futures::{SinkExt, StreamExt, TryStreamExt};
use http::StatusCode;
use http_utils::error::ApiError;
use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo};
use remote_storage::GenericRemoteStorage;
use reqwest::Certificate;
use safekeeper_api::models::{PullTimelineRequest, PullTimelineResponse, TimelineStatus};
use safekeeper_api::{Term, membership};
use safekeeper_client::mgmt_api;
use safekeeper_client::mgmt_api::Client;
use serde::Deserialize;
use tokio::fs::OpenOptions;
use tokio::io::AsyncWrite;
use tokio::sync::mpsc;
use tokio::task;
use tokio::time::sleep;
use tokio_tar::{Archive, Builder, Header};
use tokio_util::io::{CopyToBytes, SinkWriter};
use tokio_util::sync::PollSender;
use tracing::{error, info, instrument, warn};
use utils::crashsafe::fsync_async_opt;
use utils::id::{NodeId, TenantTimelineId};
use utils::logging::SecretString;
use utils::lsn::Lsn;
use utils::pausable_failpoint;
use crate::control_file::CONTROL_FILE_NAME;
use crate::state::{EvictionState, TimelinePersistentState};
use crate::timeline::{Timeline, TimelineError, WalResidentTimeline};
use crate::timelines_global_map::{create_temp_timeline_dir, validate_temp_timeline};
use crate::wal_storage::{open_wal_file, wal_file_paths};
use crate::{GlobalTimelines, debug_dump, wal_backup};
/// Stream tar archive of timeline to tx.
#[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))]
pub async fn stream_snapshot(
tli: Arc<Timeline>,
source: NodeId,
destination: NodeId,
tx: mpsc::Sender<Result<Bytes>>,
storage: Option<Arc<GenericRemoteStorage>>,
) {
match tli.try_wal_residence_guard().await {
Err(e) => {
tx.send(Err(anyhow!("Error checking residence: {:#}", e)))
.await
.ok();
}
Ok(maybe_resident_tli) => {
if let Err(e) = match maybe_resident_tli {
Some(resident_tli) => {
stream_snapshot_resident_guts(
resident_tli,
source,
destination,
tx.clone(),
storage,
)
.await
}
None => {
if let Some(storage) = storage {
stream_snapshot_offloaded_guts(
tli,
source,
destination,
tx.clone(),
&storage,
)
.await
} else {
tx.send(Err(anyhow!("remote storage not configured")))
.await
.ok();
return;
}
}
} {
// Error type/contents don't matter as they won't can't reach the client
// (hyper likely doesn't do anything with it), but http stream will be
// prematurely terminated. It would be nice to try to send the error in
// trailers though.
tx.send(Err(anyhow!("snapshot failed"))).await.ok();
error!("snapshot failed: {:#}", e);
}
}
}
}
/// State needed while streaming the snapshot.
pub struct SnapshotContext {
/// The interval of segment numbers. If None, the timeline hasn't had writes yet, so only send the control file
pub from_to_segno: Option<RangeInclusive<XLogSegNo>>,
pub term: Term,
pub last_log_term: Term,
pub flush_lsn: Lsn,
pub wal_seg_size: usize,
// used to remove WAL hold off in Drop.
pub tli: WalResidentTimeline,
}
impl Drop for SnapshotContext {
fn drop(&mut self) {
let tli = self.tli.clone();
task::spawn(async move {
let mut shared_state = tli.write_shared_state().await;
shared_state.wal_removal_on_hold = false;
});
}
}
/// Build a tokio_tar stream that sends encoded bytes into a Bytes channel.
fn prepare_tar_stream(
tx: mpsc::Sender<Result<Bytes>>,
) -> tokio_tar::Builder<impl AsyncWrite + Unpin + Send> {
// tokio-tar wants Write implementor, but we have mpsc tx <Result<Bytes>>;
// use SinkWriter as a Write impl. That is,
// - create Sink from the tx. It returns PollSendError if chan is closed.
let sink = PollSender::new(tx);
// - SinkWriter needs sink error to be io one, map it.
let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe));
// - SinkWriter wants sink type to be just Bytes, not Result<Bytes>, so map
// it with with(). Note that with() accepts async function which we don't
// need and allows the map to fail, which we don't need either, but hence
// two Oks.
let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) });
// - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap
// into CopyToBytes. This is a data copy.
let copy_to_bytes = CopyToBytes::new(oksink);
let writer = SinkWriter::new(copy_to_bytes);
let pinned_writer = Box::pin(writer);
// Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer
// which is also likely suboptimal.
Builder::new_non_terminated(pinned_writer)
}
/// Implementation of snapshot for an offloaded timeline, only reads control file
pub(crate) async fn stream_snapshot_offloaded_guts(
tli: Arc<Timeline>,
source: NodeId,
destination: NodeId,
tx: mpsc::Sender<Result<Bytes>>,
storage: &GenericRemoteStorage,
) -> Result<()> {
let mut ar = prepare_tar_stream(tx);
tli.snapshot_offloaded(&mut ar, source, destination, storage)
.await?;
ar.finish().await?;
Ok(())
}
/// Implementation of snapshot for a timeline which is resident (includes some segment data)
pub async fn stream_snapshot_resident_guts(
tli: WalResidentTimeline,
source: NodeId,
destination: NodeId,
tx: mpsc::Sender<Result<Bytes>>,
storage: Option<Arc<GenericRemoteStorage>>,
) -> Result<()> {
let mut ar = prepare_tar_stream(tx);
let bctx = tli
.start_snapshot(&mut ar, source, destination, storage)
.await?;
pausable_failpoint!("sk-snapshot-after-list-pausable");
if let Some(from_to_segno) = &bctx.from_to_segno {
let tli_dir = tli.get_timeline_dir();
info!(
"sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}",
from_to_segno.end() - from_to_segno.start() + 1,
from_to_segno.start(),
from_to_segno.end(),
bctx.term,
bctx.last_log_term,
bctx.flush_lsn,
);
for segno in from_to_segno.clone() {
let Some((mut sf, is_partial)) =
open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?
else {
// File is not found
let (wal_file_path, _wal_file_partial_path) =
wal_file_paths(&tli_dir, segno, bctx.wal_seg_size);
tracing::warn!("couldn't find WAL segment file {wal_file_path}");
bail!("couldn't find WAL segment file {wal_file_path}")
};
let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size);
if is_partial {
wal_file_name.push_str(".partial");
}
ar.append_file(&wal_file_name, &mut sf).await?;
}
} else {
info!("Not including any segments into the snapshot");
}
// Do the term check before ar.finish to make archive corrupted in case of
// term change. Client shouldn't ignore abrupt stream end, but to be sure.
tli.finish_snapshot(&bctx).await?;
ar.finish().await?;
Ok(())
}
impl Timeline {
/// Simple snapshot for an offloaded timeline: we will only upload a renamed partial segment and
/// pass a modified control file into the provided tar stream (nothing with data segments on disk, since
/// we are offloaded and there aren't any)
async fn snapshot_offloaded<W: AsyncWrite + Unpin + Send>(
self: &Arc<Timeline>,
ar: &mut tokio_tar::Builder<W>,
source: NodeId,
destination: NodeId,
storage: &GenericRemoteStorage,
) -> Result<()> {
// Take initial copy of control file, then release state lock
let mut control_file = {
let shared_state = self.write_shared_state().await;
let control_file = TimelinePersistentState::clone(shared_state.sk.state());
// Rare race: we got unevicted between entering function and reading control file.
// We error out and let API caller retry.
if !matches!(control_file.eviction_state, EvictionState::Offloaded(_)) {
bail!("Timeline was un-evicted during snapshot, please retry");
}
control_file
};
// Modify the partial segment of the in-memory copy for the control file to
// point to the destination safekeeper.
let replace = control_file
.partial_backup
.replace_uploaded_segment(source, destination)?;
let Some(replace) = replace else {
// In Manager:: ready_for_eviction, we do not permit eviction unless the timeline
// has a partial segment. It is unexpected that
anyhow::bail!("Timeline has no partial segment, cannot generate snapshot");
};
tracing::info!("Replacing uploaded partial segment in in-mem control file: {replace:?}");
// Optimistically try to copy the partial segment to the destination's path: this
// can fail if the timeline was un-evicted and modified in the background.
let remote_timeline_path = &self.remote_path;
wal_backup::copy_partial_segment(
storage,
&replace.previous.remote_path(remote_timeline_path),
&replace.current.remote_path(remote_timeline_path),
)
.await?;
// Since the S3 copy succeeded with the path given in our control file snapshot, and
// we are sending that snapshot in our response, we are giving the caller a consistent
// snapshot even if our local Timeline was unevicted or otherwise modified in the meantime.
let buf = control_file
.write_to_buf()
.with_context(|| "failed to serialize control store")?;
let mut header = Header::new_gnu();
header.set_size(buf.len().try_into().expect("never breaches u64"));
ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
.await
.with_context(|| "failed to append to archive")?;
Ok(())
}
}
impl WalResidentTimeline {
/// Start streaming tar archive with timeline:
/// 1) stream control file under lock;
/// 2) hold off WAL removal;
/// 3) collect SnapshotContext to understand which WAL segments should be
/// streamed.
///
/// Snapshot streams data up to flush_lsn. To make this safe, we must check
/// that term doesn't change during the procedure, or we risk sending mix of
/// WAL from different histories. Term is remembered in the SnapshotContext
/// and checked in finish_snapshot. Note that in the last segment some WAL
/// higher than flush_lsn set here might be streamed; that's fine as long as
/// terms doesn't change.
///
/// Alternatively we could send only up to commit_lsn to get some valid
/// state which later will be recovered by compute, in this case term check
/// is not needed, but we likely don't want that as there might be no
/// compute which could perform the recovery.
///
/// When returned SnapshotContext is dropped WAL hold is removed.
async fn start_snapshot<W: AsyncWrite + Unpin + Send>(
&self,
ar: &mut tokio_tar::Builder<W>,
source: NodeId,
destination: NodeId,
storage: Option<Arc<GenericRemoteStorage>>,
) -> Result<SnapshotContext> {
let mut shared_state = self.write_shared_state().await;
let wal_seg_size = shared_state.get_wal_seg_size();
let mut control_store = TimelinePersistentState::clone(shared_state.sk.state());
// Modify the partial segment of the in-memory copy for the control file to
// point to the destination safekeeper.
let replace = control_store
.partial_backup
.replace_uploaded_segment(source, destination)?;
if let Some(replace) = replace {
// The deserialized control file has an uploaded partial. We upload a copy
// of it to object storage for the destination safekeeper and send an updated
// control file in the snapshot.
tracing::info!(
"Replacing uploaded partial segment in in-mem control file: {replace:?}"
);
let remote_timeline_path = &self.tli.remote_path;
wal_backup::copy_partial_segment(
&*storage.context("remote storage not configured")?,
&replace.previous.remote_path(remote_timeline_path),
&replace.current.remote_path(remote_timeline_path),
)
.await?;
}
let buf = control_store
.write_to_buf()
.with_context(|| "failed to serialize control store")?;
let mut header = Header::new_gnu();
header.set_size(buf.len().try_into().expect("never breaches u64"));
ar.append_data(&mut header, CONTROL_FILE_NAME, buf.as_slice())
.await
.with_context(|| "failed to append to archive")?;
// We need to stream since the oldest segment someone (s3 or pageserver)
// still needs. This duplicates calc_horizon_lsn logic.
//
// We know that WAL wasn't removed up to this point because it cannot be
// removed further than `backup_lsn`. Since we're holding shared_state
// lock and setting `wal_removal_on_hold` later, it guarantees that WAL
// won't be removed until we're done.
let timeline_state = shared_state.sk.state();
let from_lsn = min(
timeline_state.remote_consistent_lsn,
timeline_state.backup_lsn,
);
let flush_lsn = shared_state.sk.flush_lsn();
let (send_segments, msg) = if from_lsn == Lsn::INVALID {
(false, "snapshot is called on uninitialized timeline")
} else {
(true, "timeline is initialized")
};
tracing::info!(
remote_consistent_lsn=%timeline_state.remote_consistent_lsn,
backup_lsn=%timeline_state.backup_lsn,
%flush_lsn,
"{msg}"
);
let from_segno = from_lsn.segment_number(wal_seg_size);
let term = shared_state.sk.state().acceptor_state.term;
let last_log_term = shared_state.sk.last_log_term();
let upto_segno = flush_lsn.segment_number(wal_seg_size);
// have some limit on max number of segments as a sanity check
const MAX_ALLOWED_SEGS: u64 = 1000;
let num_segs = upto_segno - from_segno + 1;
if num_segs > MAX_ALLOWED_SEGS {
bail!(
"snapshot is called on timeline with {} segments, but the limit is {}",
num_segs,
MAX_ALLOWED_SEGS
);
}
// Prevent WAL removal while we're streaming data.
//
// Since this a flag, not a counter just bail out if already set; we
// shouldn't need concurrent snapshotting.
if shared_state.wal_removal_on_hold {
bail!("wal_removal_on_hold is already true");
}
shared_state.wal_removal_on_hold = true;
// Drop shared_state to release the lock, before calling wal_residence_guard().
drop(shared_state);
let tli_copy = self.wal_residence_guard().await?;
let from_to_segno = send_segments.then_some(from_segno..=upto_segno);
let bctx = SnapshotContext {
from_to_segno,
term,
last_log_term,
flush_lsn,
wal_seg_size,
tli: tli_copy,
};
Ok(bctx)
}
/// Finish snapshotting: check that term(s) hasn't changed.
///
/// Note that WAL gc hold off is removed in Drop of SnapshotContext to not
/// forget this if snapshotting fails mid the way.
pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> {
let shared_state = self.read_shared_state().await;
let term = shared_state.sk.state().acceptor_state.term;
let last_log_term = shared_state.sk.last_log_term();
// There are some cases to relax this check (e.g. last_log_term might
// change, but as long as older history is strictly part of new that's
// fine), but there is no need to do it.
if bctx.term != term || bctx.last_log_term != last_log_term {
bail!(
"term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}",
bctx.term,
bctx.last_log_term,
term,
last_log_term
);
}
Ok(())
}
}
/// Response for debug dump request.
#[derive(Debug, Deserialize)]
pub struct DebugDumpResponse {
pub start_time: DateTime<Utc>,
pub finish_time: DateTime<Utc>,
pub timelines: Vec<debug_dump::Timeline>,
pub timelines_count: usize,
pub config: debug_dump::Config,
}
/// Find the most advanced safekeeper and pull timeline from it.
pub async fn handle_request(
request: PullTimelineRequest,
sk_auth_token: Option<SecretString>,
ssl_ca_certs: Vec<Certificate>,
global_timelines: Arc<GlobalTimelines>,
wait_for_peer_timeline_status: bool,
) -> Result<PullTimelineResponse, ApiError> {
if let Some(mconf) = &request.mconf {
let sk_id = global_timelines.get_sk_id();
if !mconf.contains(sk_id) {
return Err(ApiError::BadRequest(anyhow!(
"refused to pull timeline with {mconf}, node {sk_id} is not member of it",
)));
}
}
let existing_tli = global_timelines.get(TenantTimelineId::new(
request.tenant_id,
request.timeline_id,
));
if let Ok(timeline) = existing_tli {
let cur_generation = timeline
.read_shared_state()
.await
.sk
.state()
.mconf
.generation;
info!(
"Timeline {} already exists with generation {cur_generation}",
request.timeline_id,
);
if let Some(mconf) = request.mconf {
timeline
.membership_switch(mconf)
.await
.map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
}
return Ok(PullTimelineResponse {
safekeeper_host: None,
});
}
let mut http_client = reqwest::Client::builder();
for ssl_ca_cert in ssl_ca_certs {
http_client = http_client.add_root_certificate(ssl_ca_cert);
}
let http_client = http_client
.build()
.map_err(|e| ApiError::InternalServerError(e.into()))?;
let http_hosts = request.http_hosts.clone();
// Figure out statuses of potential donors.
let mut statuses = Vec::new();
if !wait_for_peer_timeline_status {
let responses: Vec<Result<TimelineStatus, mgmt_api::Error>> =
futures::future::join_all(http_hosts.iter().map(|url| async {
let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone());
let resp = cclient
.timeline_status(request.tenant_id, request.timeline_id)
.await?;
let info: TimelineStatus = resp
.json()
.await
.context("Failed to deserialize timeline status")
.map_err(|e| mgmt_api::Error::ReceiveErrorBody(e.to_string()))?;
Ok(info)
}))
.await;
for (i, response) in responses.into_iter().enumerate() {
match response {
Ok(status) => {
if let Some(mconf) = &request.mconf {
if status.mconf.generation > mconf.generation {
// We probably raced with another timeline membership change with higher generation.
// Ignore this request.
return Err(ApiError::Conflict(format!(
"cannot pull timeline with generation {}: timeline {} already exists with generation {} on {}",
mconf.generation,
request.timeline_id,
status.mconf.generation,
http_hosts[i],
)));
}
}
statuses.push((status, i));
}
Err(e) => {
info!("error fetching status from {}: {e}", http_hosts[i]);
}
}
}
// Allow missing responses from up to one safekeeper (say due to downtime)
// e.g. if we created a timeline on PS A and B, with C being offline. Then B goes
// offline and C comes online. Then we want a pull on C with A and B as hosts to work.
let min_required_successful = (http_hosts.len() - 1).max(1);
if statuses.len() < min_required_successful {
return Err(ApiError::InternalServerError(anyhow::anyhow!(
"only got {} successful status responses. required: {min_required_successful}",
statuses.len()
)));
}
} else {
let mut retry = true;
// We must get status from all other peers.
// Otherwise, we may run into split-brain scenario.
while retry {
statuses.clear();
retry = false;
for (i, url) in http_hosts.iter().enumerate() {
let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone());
match cclient
.timeline_status(request.tenant_id, request.timeline_id)
.await
{
Ok(resp) => {
if resp.status() == StatusCode::NOT_FOUND {
warn!(
"Timeline {} not found on peer SK {}, no need to pull it",
TenantTimelineId::new(request.tenant_id, request.timeline_id),
url
);
return Ok(PullTimelineResponse {
safekeeper_host: None,
});
}
let info: TimelineStatus = resp
.json()
.await
.context("Failed to deserialize timeline status")
.map_err(ApiError::InternalServerError)?;
statuses.push((info, i));
}
Err(e) => {
match e {
// If we get a 404, it means the timeline doesn't exist on this safekeeper.
// We can ignore this error.
mgmt_api::Error::ApiError(status, _)
if status == StatusCode::NOT_FOUND =>
{
warn!(
"Timeline {} not found on peer SK {}, no need to pull it",
TenantTimelineId::new(request.tenant_id, request.timeline_id),
url
);
return Ok(PullTimelineResponse {
safekeeper_host: None,
});
}
_ => {}
}
retry = true;
error!("Failed to get timeline status from {}: {:#}", url, e);
}
}
}
sleep(std::time::Duration::from_millis(100)).await;
}
}
let max_term = statuses
.iter()
.map(|(status, _)| status.acceptor_state.term)
.max()
.unwrap();
// Find the most advanced safekeeper
let (status, i) = statuses
.into_iter()
.max_by_key(|(status, _)| {
(
status.acceptor_state.epoch,
status.flush_lsn,
/* BEGIN_HADRON */
// We need to pull from the SK with the highest term.
// This is because another compute may come online and vote the same highest term again on the other two SKs.
// Then, there will be 2 computes running on the same term.
status.acceptor_state.term,
/* END_HADRON */
status.commit_lsn,
)
})
.unwrap();
let safekeeper_host = http_hosts[i].clone();
assert!(status.tenant_id == request.tenant_id);
assert!(status.timeline_id == request.timeline_id);
// TODO(diko): This is hadron only check to make sure that we pull the timeline
// from the safekeeper with the highest term during timeline restore.
// We could avoid returning the error by calling bump_term after pull_timeline.
// However, this is not a big deal because we retry the pull_timeline requests.
// The check should be removed together with removing custom hadron logic for
// safekeeper restore.
if wait_for_peer_timeline_status && status.acceptor_state.term != max_term {
return Err(ApiError::PreconditionFailed(
format!(
"choosen safekeeper {} has term {}, but the most advanced term is {}",
safekeeper_host, status.acceptor_state.term, max_term
)
.into(),
));
}
match pull_timeline(
status,
safekeeper_host,
sk_auth_token,
http_client,
global_timelines,
request.mconf,
)
.await
{
Ok(resp) => Ok(resp),
Err(e) => {
match e.downcast_ref::<TimelineError>() {
Some(TimelineError::AlreadyExists(_)) => Ok(PullTimelineResponse {
safekeeper_host: None,
}),
Some(TimelineError::Deleted(_)) => Err(ApiError::Conflict(format!(
"Timeline {}/{} deleted",
request.tenant_id, request.timeline_id
))),
Some(TimelineError::CreationInProgress(_)) => {
// We don't return success here because creation might still fail.
Err(ApiError::Conflict("Creation in progress".to_owned()))
}
_ => Err(ApiError::InternalServerError(e)),
}
}
}
}
async fn pull_timeline(
status: TimelineStatus,
host: String,
sk_auth_token: Option<SecretString>,
http_client: reqwest::Client,
global_timelines: Arc<GlobalTimelines>,
mconf: Option<membership::Configuration>,
) -> Result<PullTimelineResponse> {
let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id);
info!(
"pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}",
ttid,
host,
status.commit_lsn,
status.flush_lsn,
status.acceptor_state.term,
status.acceptor_state.epoch
);
let conf = &global_timelines.get_global_config();
let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?;
let client = Client::new(http_client, host.clone(), sk_auth_token.clone());
// Request stream with basebackup archive.
let bb_resp = client
.snapshot(status.tenant_id, status.timeline_id, conf.my_id)
.await?;
// Make Stream of Bytes from it...
let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other);
// and turn it into StreamReader implementing AsyncRead.
let bb_reader = tokio_util::io::StreamReader::new(bb_stream);
// Extract it on the fly to the disk. We don't use simple unpack() to fsync
// files.
let mut entries = Archive::new(bb_reader).entries()?;
while let Some(base_tar_entry) = entries.next().await {
let mut entry = base_tar_entry?;
let header = entry.header();
let file_path = header.path()?.into_owned();
match header.entry_type() {
tokio_tar::EntryType::Regular => {
let utf8_file_path =
Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path");
let dst_path = tli_dir_path.join(utf8_file_path);
let mut f = OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(&dst_path)
.await?;
tokio::io::copy(&mut entry, &mut f).await?;
// fsync the file
f.sync_all().await?;
}
_ => {
bail!(
"entry {} in backup tar archive is of unexpected type: {:?}",
file_path.display(),
header.entry_type()
);
}
}
}
// fsync temp timeline directory to remember its contents.
fsync_async_opt(&tli_dir_path, !conf.no_sync).await?;
let generation = mconf.as_ref().map(|c| c.generation);
// Let's create timeline from temp directory and verify that it's correct
let (commit_lsn, flush_lsn) =
validate_temp_timeline(conf, ttid, &tli_dir_path, generation).await?;
info!(
"finished downloading timeline {}, commit_lsn={}, flush_lsn={}",
ttid, commit_lsn, flush_lsn
);
assert!(status.commit_lsn <= status.flush_lsn);
// Finally, load the timeline.
let timeline = global_timelines
.load_temp_timeline(ttid, &tli_dir_path, generation)
.await?;
if let Some(mconf) = mconf {
// Switch to provided mconf to guarantee that the timeline will not
// be deleted by request with older generation.
// The generation might already be higer than the one in mconf, e.g.
// if another membership_switch request was executed between `load_temp_timeline`
// and `membership_switch`, but that's totaly fine. `membership_switch` will
// ignore switch to older generation.
timeline.membership_switch(mconf).await?;
}
Ok(PullTimelineResponse {
safekeeper_host: Some(host),
})
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/recovery.rs | safekeeper/src/recovery.rs | //! This module implements pulling WAL from peer safekeepers if compute can't
//! provide it, i.e. safekeeper lags too much.
use std::fmt;
use std::pin::pin;
use std::time::SystemTime;
use anyhow::{Context, bail};
use futures::StreamExt;
use postgres_protocol::message::backend::ReplicationMessage;
use reqwest::Certificate;
use safekeeper_api::Term;
use safekeeper_api::membership::INVALID_GENERATION;
use safekeeper_api::models::{PeerInfo, TimelineStatus};
use tokio::select;
use tokio::sync::mpsc::{Receiver, Sender, channel};
use tokio::time::{self, Duration, sleep, timeout};
use tokio_postgres::replication::ReplicationStream;
use tokio_postgres::types::PgLsn;
use tracing::*;
use utils::id::NodeId;
use utils::lsn::Lsn;
use utils::postgres_client::{
ConnectionConfigArgs, PostgresClientProtocol, wal_stream_connection_config,
};
use crate::SafeKeeperConf;
use crate::receive_wal::{MSG_QUEUE_SIZE, REPLY_QUEUE_SIZE, WalAcceptor};
use crate::safekeeper::{
AcceptorProposerMessage, AppendRequest, AppendRequestHeader, ProposerAcceptorMessage,
ProposerElected, TermHistory, TermLsn, VoteRequest,
};
use crate::timeline::WalResidentTimeline;
/// Entrypoint for per timeline task which always runs, checking whether
/// recovery for this safekeeper is needed and starting it if so.
#[instrument(name = "recovery", skip_all, fields(ttid = %tli.ttid))]
pub async fn recovery_main(tli: WalResidentTimeline, conf: SafeKeeperConf) {
info!("started");
let cancel = tli.cancel.clone();
select! {
_ = recovery_main_loop(tli, conf) => { unreachable!() }
_ = cancel.cancelled() => {
info!("stopped");
}
}
}
/// Should we start fetching WAL from a peer safekeeper, and if yes, from
/// which? Answer is yes, i.e. .donors is not empty if 1) there is something
/// to fetch, and we can do that without running elections; 2) there is no
/// actively streaming compute, as we don't want to compete with it.
///
/// If donor(s) are choosen, theirs last_log_term is guaranteed to be equal
/// to its last_log_term so we are sure such a leader ever had been elected.
///
/// All possible donors are returned so that we could keep connection to the
/// current one if it is good even if it slightly lags behind.
///
/// Note that term conditions above might be not met, but safekeepers are
/// still not aligned on last flush_lsn. Generally in this case until
/// elections are run it is not possible to say which safekeeper should
/// recover from which one -- history which would be committed is different
/// depending on assembled quorum (e.g. classic picture 8 from Raft paper).
/// Thus we don't try to predict it here.
async fn recovery_needed(
tli: &WalResidentTimeline,
heartbeat_timeout: Duration,
) -> RecoveryNeededInfo {
let ss = tli.read_shared_state().await;
let term = ss.sk.state().acceptor_state.term;
let last_log_term = ss.sk.last_log_term();
let flush_lsn = ss.sk.flush_lsn();
// note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us.
let mut peers = ss.get_peers(heartbeat_timeout);
// Sort by <last log term, lsn> pairs.
peers.sort_by(|p1, p2| {
let tl1 = TermLsn {
term: p1.last_log_term,
lsn: p1.flush_lsn,
};
let tl2 = TermLsn {
term: p2.last_log_term,
lsn: p2.flush_lsn,
};
tl2.cmp(&tl1) // desc
});
let num_streaming_computes = tli.get_walreceivers().get_num_streaming();
let donors = if num_streaming_computes > 0 {
vec![] // If there is a streaming compute, don't try to recover to not intervene.
} else {
peers
.iter()
.filter_map(|candidate| {
// Are we interested in this candidate?
let candidate_tl = TermLsn {
term: candidate.last_log_term,
lsn: candidate.flush_lsn,
};
let my_tl = TermLsn {
term: last_log_term,
lsn: flush_lsn,
};
if my_tl < candidate_tl {
// Yes, we are interested. Can we pull from it without
// (re)running elections? It is possible if 1) his term
// is equal to his last_log_term so we could act on
// behalf of leader of this term (we must be sure he was
// ever elected) and 2) our term is not higher, or we'll refuse data.
if candidate.term == candidate.last_log_term && candidate.term >= term {
Some(Donor::from(candidate))
} else {
None
}
} else {
None
}
})
.collect()
};
RecoveryNeededInfo {
term,
last_log_term,
flush_lsn,
peers,
num_streaming_computes,
donors,
}
}
/// Result of Timeline::recovery_needed, contains donor(s) if recovery needed and
/// fields to explain the choice.
#[derive(Debug)]
pub struct RecoveryNeededInfo {
/// my term
pub term: Term,
/// my last_log_term
pub last_log_term: Term,
/// my flush_lsn
pub flush_lsn: Lsn,
/// peers from which we can fetch WAL, for observability.
pub peers: Vec<PeerInfo>,
/// for observability
pub num_streaming_computes: usize,
pub donors: Vec<Donor>,
}
// Custom to omit not important fields from PeerInfo.
impl fmt::Display for RecoveryNeededInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{{")?;
write!(
f,
"term: {}, last_log_term: {}, flush_lsn: {}, peers: {{",
self.term, self.last_log_term, self.flush_lsn
)?;
for p in self.peers.iter() {
write!(
f,
"PeerInfo {{ sk_id: {}, term: {}, last_log_term: {}, flush_lsn: {} }}, ",
p.sk_id, p.term, p.last_log_term, p.flush_lsn
)?;
}
write!(
f,
"}} num_streaming_computes: {}, donors: {:?}",
self.num_streaming_computes, self.donors
)
}
}
#[derive(Clone, Debug)]
pub struct Donor {
pub sk_id: NodeId,
/// equals to last_log_term
pub term: Term,
pub flush_lsn: Lsn,
pub pg_connstr: String,
pub http_connstr: String,
pub https_connstr: Option<String>,
}
impl From<&PeerInfo> for Donor {
fn from(p: &PeerInfo) -> Self {
Donor {
sk_id: p.sk_id,
term: p.term,
flush_lsn: p.flush_lsn,
pg_connstr: p.pg_connstr.clone(),
http_connstr: p.http_connstr.clone(),
https_connstr: p.https_connstr.clone(),
}
}
}
const CHECK_INTERVAL_MS: u64 = 2000;
/// Check regularly whether we need to start recovery.
async fn recovery_main_loop(tli: WalResidentTimeline, conf: SafeKeeperConf) {
let check_duration = Duration::from_millis(CHECK_INTERVAL_MS);
loop {
let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await;
match recovery_needed_info.donors.first() {
Some(donor) => {
info!(
"starting recovery from donor {}: {}",
donor.sk_id, recovery_needed_info
);
let res = tli.wal_residence_guard().await;
if let Err(e) = res {
warn!("failed to obtain guard: {}", e);
continue;
}
match recover(res.unwrap(), donor, &conf).await {
// Note: 'write_wal rewrites WAL written before' error is
// expected here and might happen if compute and recovery
// concurrently write the same data. Eventually compute
// should win.
Err(e) => warn!("recovery failed: {:#}", e),
Ok(msg) => info!("recovery finished: {}", msg),
}
}
None => {
trace!(
"recovery not needed or not possible: {}",
recovery_needed_info
);
}
}
sleep(check_duration).await;
}
}
/// Recover from the specified donor. Returns message explaining normal finish
/// reason or error.
async fn recover(
tli: WalResidentTimeline,
donor: &Donor,
conf: &SafeKeeperConf,
) -> anyhow::Result<String> {
// Learn donor term switch history to figure out starting point.
let mut client = reqwest::Client::builder();
for cert in &conf.ssl_ca_certs {
client = client.add_root_certificate(Certificate::from_der(cert.contents())?);
}
let client = client
.build()
.context("Failed to build http client for recover")?;
let url = if conf.use_https_safekeeper_api {
if let Some(https_connstr) = donor.https_connstr.as_ref() {
format!("https://{https_connstr}")
} else {
anyhow::bail!(
"cannot recover from donor {}: \
https is enabled, but https_connstr is not specified",
donor.sk_id
);
}
} else {
format!("http://{}", donor.http_connstr)
};
let timeline_info: TimelineStatus = client
.get(format!(
"{}/v1/tenant/{}/timeline/{}",
url, tli.ttid.tenant_id, tli.ttid.timeline_id
))
.send()
.await?
.json()
.await?;
if timeline_info.acceptor_state.term != donor.term {
bail!(
"donor term changed from {} to {}",
donor.term,
timeline_info.acceptor_state.term
);
}
// convert from API TermSwitchApiEntry into TermLsn.
let donor_th = TermHistory(
timeline_info
.acceptor_state
.term_history
.iter()
.map(|tl| Into::<TermLsn>::into(*tl))
.collect(),
);
// Now understand our term history.
let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest {
generation: INVALID_GENERATION,
term: donor.term,
});
let vote_response = match tli
.process_msg(&vote_request)
.await
.context("VoteRequest handling")?
{
Some(AcceptorProposerMessage::VoteResponse(vr)) => vr,
_ => {
bail!("unexpected VoteRequest response"); // unreachable
}
};
if vote_response.term != donor.term {
bail!(
"our term changed from {} to {}",
donor.term,
vote_response.term
);
}
let last_common_point = match TermHistory::find_highest_common_point(
&donor_th,
&vote_response.term_history,
vote_response.flush_lsn,
) {
None => bail!(
"couldn't find common point in histories, donor {:?}, sk {:?}",
donor_th,
vote_response.term_history,
),
Some(lcp) => lcp,
};
info!("found last common point at {:?}", last_common_point);
// truncate WAL locally
let pe = ProposerAcceptorMessage::Elected(ProposerElected {
generation: INVALID_GENERATION,
term: donor.term,
start_streaming_at: last_common_point.lsn,
term_history: donor_th,
});
// Successful ProposerElected handling always returns None. If term changed,
// we'll find out that during the streaming. Note: it is expected to get
// 'refusing to overwrite correct WAL' here if walproposer reconnected
// concurrently, restart helps here.
tli.process_msg(&pe)
.await
.context("ProposerElected handling")?;
recovery_stream(tli, donor, last_common_point.lsn, conf).await
}
// Pull WAL from donor, assuming handshake is already done.
async fn recovery_stream(
tli: WalResidentTimeline,
donor: &Donor,
start_streaming_at: Lsn,
conf: &SafeKeeperConf,
) -> anyhow::Result<String> {
// TODO: pass auth token
let connection_conf_args = ConnectionConfigArgs {
protocol: PostgresClientProtocol::Vanilla,
ttid: tli.ttid,
shard_number: None,
shard_count: None,
shard_stripe_size: None,
listen_pg_addr_str: &donor.pg_connstr,
auth_token: None,
availability_zone: None,
};
let cfg = wal_stream_connection_config(connection_conf_args)?;
let mut cfg = cfg.to_tokio_postgres_config();
// It will make safekeeper give out not committed WAL (up to flush_lsn).
cfg.application_name(&format!("safekeeper_{}", conf.my_id));
cfg.replication_mode(tokio_postgres::config::ReplicationMode::Physical);
let connect_timeout = Duration::from_millis(10000);
let (client, connection) = match time::timeout(
connect_timeout,
cfg.connect(tokio_postgres::NoTls),
)
.await
{
Ok(client_and_conn) => client_and_conn?,
Err(_elapsed) => {
bail!(
"timed out while waiting {connect_timeout:?} for connection to peer safekeeper to open"
);
}
};
trace!("connected to {:?}", donor);
// The connection object performs the actual communication with the
// server, spawn it off to run on its own.
let ttid = tli.ttid;
tokio::spawn(async move {
if let Err(e) = connection
.instrument(info_span!("recovery task connection poll", ttid = %ttid))
.await
{
// This logging isn't very useful as error is anyway forwarded to client.
trace!(
"tokio_postgres connection object finished with error: {}",
e
);
}
});
let query = format!(
"START_REPLICATION PHYSICAL {} (term='{}')",
start_streaming_at, donor.term
);
let copy_stream = client.copy_both_simple(&query).await?;
let physical_stream = ReplicationStream::new(copy_stream);
// As in normal walreceiver, do networking and writing to disk in parallel.
let (msg_tx, msg_rx) = channel(MSG_QUEUE_SIZE);
let (reply_tx, reply_rx) = channel(REPLY_QUEUE_SIZE);
let wa = WalAcceptor::spawn(tli.wal_residence_guard().await?, msg_rx, reply_tx, None);
let res = tokio::select! {
r = network_io(physical_stream, msg_tx, donor.clone(), tli, conf.clone()) => r,
r = read_replies(reply_rx, donor.term) => r.map(|()| None),
};
// Join the spawned WalAcceptor. At this point chans to/from it passed to
// network routines are dropped, so it will exit as soon as it touches them.
match wa.await {
Ok(Ok(())) => {
// WalAcceptor finished normally, termination reason is different
match res {
Ok(Some(success_desc)) => Ok(success_desc),
Ok(None) => bail!("unexpected recovery end without error/success"), // can't happen
Err(e) => Err(e), // network error or term change
}
}
Ok(Err(e)) => Err(e), // error while processing message
Err(e) => bail!("WalAcceptor panicked: {}", e),
}
}
// Perform network part of streaming: read data and push it to msg_tx, send KA
// to make sender hear from us. If there is nothing coming for a while, check
// for termination.
// Returns
// - Ok(None) if channel to WalAcceptor closed -- its task should return error.
// - Ok(Some(String)) if recovery successfully completed.
// - Err if error happened while reading/writing to socket.
async fn network_io(
physical_stream: ReplicationStream,
msg_tx: Sender<ProposerAcceptorMessage>,
donor: Donor,
tli: WalResidentTimeline,
conf: SafeKeeperConf,
) -> anyhow::Result<Option<String>> {
let mut physical_stream = pin!(physical_stream);
let mut last_received_lsn = Lsn::INVALID;
// tear down connection if no data arrives withing this period
let no_data_timeout = Duration::from_millis(30000);
loop {
let msg = match timeout(no_data_timeout, physical_stream.next()).await {
Ok(next) => match next {
None => bail!("unexpected end of replication stream"),
Some(msg) => msg.context("get replication message")?,
},
Err(_) => bail!("no message received within {:?}", no_data_timeout),
};
match msg {
ReplicationMessage::XLogData(xlog_data) => {
let ar_hdr = AppendRequestHeader {
generation: INVALID_GENERATION,
term: donor.term,
begin_lsn: Lsn(xlog_data.wal_start()),
end_lsn: Lsn(xlog_data.wal_start()) + xlog_data.data().len() as u64,
commit_lsn: Lsn::INVALID, // do not attempt to advance, peer communication anyway does it
truncate_lsn: Lsn::INVALID, // do not attempt to advance
};
let ar = AppendRequest {
h: ar_hdr,
wal_data: xlog_data.into_data(),
};
trace!(
"processing AppendRequest {}-{}, len {}",
ar.h.begin_lsn,
ar.h.end_lsn,
ar.wal_data.len()
);
last_received_lsn = ar.h.end_lsn;
if msg_tx
.send(ProposerAcceptorMessage::AppendRequest(ar))
.await
.is_err()
{
return Ok(None); // chan closed, WalAcceptor terminated
}
}
ReplicationMessage::PrimaryKeepAlive(_) => {
// keepalive means nothing is being streamed for a while. Check whether we need to stop.
let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await;
// do current donors still contain one we currently connected to?
if !recovery_needed_info
.donors
.iter()
.any(|d| d.sk_id == donor.sk_id)
{
// Most likely it means we are caughtup.
// note: just exiting makes tokio_postgres send CopyFail to the far end.
return Ok(Some(format!(
"terminating at {} as connected safekeeper {} with term {} is not a donor anymore: {}",
last_received_lsn, donor.sk_id, donor.term, recovery_needed_info
)));
}
}
_ => {}
}
// Send reply to each message to keep connection alive. Ideally we
// should do that once in a while instead, but this again requires
// stream split or similar workaround, and recovery is anyway not that
// performance critical.
//
// We do not know here real write/flush LSNs (need to take mutex again
// or check replies which are read in different future), but neither
// sender much cares about them, so just send last received.
physical_stream
.as_mut()
.standby_status_update(
PgLsn::from(last_received_lsn.0),
PgLsn::from(last_received_lsn.0),
PgLsn::from(last_received_lsn.0),
SystemTime::now(),
0,
)
.await?;
}
}
// Read replies from WalAcceptor. We are not interested much in sending them to
// donor safekeeper, so don't route them anywhere. However, we should check if
// term changes and exit if it does.
// Returns Ok(()) if channel closed, Err in case of term change.
async fn read_replies(
mut reply_rx: Receiver<AcceptorProposerMessage>,
donor_term: Term,
) -> anyhow::Result<()> {
loop {
match reply_rx.recv().await {
Some(msg) => {
if let AcceptorProposerMessage::AppendResponse(ar) = msg {
if ar.term != donor_term {
bail!("donor term changed from {} to {}", donor_term, ar.term);
}
}
}
None => return Ok(()), // chan closed, WalAcceptor terminated
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/send_wal.rs | safekeeper/src/send_wal.rs | //! This module implements the streaming side of replication protocol, starting
//! with the "START_REPLICATION" message, and registry of walsenders.
use std::cmp::{max, min};
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use anyhow::{Context as AnyhowContext, bail};
use bytes::Bytes;
use futures::FutureExt;
use itertools::Itertools;
use parking_lot::Mutex;
use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend, PostgresBackendReader, QueryError};
use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, get_current_timestamp};
use postgres_ffi_types::TimestampTz;
use pq_proto::{BeMessage, WalSndKeepAlive, XLogDataBody};
use safekeeper_api::Term;
use safekeeper_api::models::{
HotStandbyFeedback, INVALID_FULL_TRANSACTION_ID, ReplicationFeedback, StandbyFeedback,
StandbyReply,
};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::sync::watch::Receiver;
use tokio::time::timeout;
use tracing::*;
use utils::bin_ser::BeSer;
use utils::failpoint_support;
use utils::lsn::Lsn;
use utils::pageserver_feedback::PageserverFeedback;
use utils::postgres_client::PostgresClientProtocol;
use crate::handler::SafekeeperPostgresHandler;
use crate::metrics::{RECEIVED_PS_FEEDBACKS, WAL_READERS};
use crate::receive_wal::WalReceivers;
use crate::safekeeper::TermLsn;
use crate::send_interpreted_wal::{
Batch, InterpretedWalReader, InterpretedWalReaderHandle, InterpretedWalSender,
};
use crate::timeline::WalResidentTimeline;
use crate::wal_reader_stream::StreamingWalReader;
use crate::wal_storage::WalReader;
// See: https://www.postgresql.org/docs/13/protocol-replication.html
const HOT_STANDBY_FEEDBACK_TAG_BYTE: u8 = b'h';
const STANDBY_STATUS_UPDATE_TAG_BYTE: u8 = b'r';
// neon extension of replication protocol
const NEON_STATUS_UPDATE_TAG_BYTE: u8 = b'z';
/// WalSenders registry. Timeline holds it (wrapped in Arc).
pub struct WalSenders {
mutex: Mutex<WalSendersShared>,
walreceivers: Arc<WalReceivers>,
}
pub struct WalSendersTimelineMetricValues {
pub ps_feedback_counter: u64,
pub ps_corruption_detected: bool,
pub last_ps_feedback: PageserverFeedback,
pub interpreted_wal_reader_tasks: usize,
}
impl WalSenders {
pub fn new(walreceivers: Arc<WalReceivers>) -> Arc<WalSenders> {
Arc::new(WalSenders {
mutex: Mutex::new(WalSendersShared::new()),
walreceivers,
})
}
/// Register new walsender. Returned guard provides access to the slot and
/// automatically deregisters in Drop.
fn register(self: &Arc<WalSenders>, walsender_state: WalSenderState) -> WalSenderGuard {
let slots = &mut self.mutex.lock().slots;
// find empty slot or create new one
let pos = if let Some(pos) = slots.iter().position(|s| s.is_none()) {
slots[pos] = Some(walsender_state);
pos
} else {
let pos = slots.len();
slots.push(Some(walsender_state));
pos
};
WalSenderGuard {
id: pos,
walsenders: self.clone(),
}
}
fn create_or_update_interpreted_reader<
FUp: FnOnce(&Arc<InterpretedWalReaderHandle>) -> anyhow::Result<()>,
FNew: FnOnce() -> InterpretedWalReaderHandle,
>(
self: &Arc<WalSenders>,
id: WalSenderId,
start_pos: Lsn,
max_delta_for_fanout: Option<u64>,
update: FUp,
create: FNew,
) -> anyhow::Result<()> {
let state = &mut self.mutex.lock();
let mut selected_interpreted_reader = None;
for slot in state.slots.iter().flatten() {
if let WalSenderState::Interpreted(slot_state) = slot {
if let Some(ref interpreted_reader) = slot_state.interpreted_wal_reader {
let select = match (interpreted_reader.current_position(), max_delta_for_fanout)
{
(Some(pos), Some(max_delta)) => {
let delta = pos.0.abs_diff(start_pos.0);
delta <= max_delta
}
// Reader is not active
(None, _) => false,
// Gating fanout by max delta is disabled.
// Attach to any active reader.
(_, None) => true,
};
if select {
selected_interpreted_reader = Some(interpreted_reader.clone());
break;
}
}
}
}
let slot = state.get_slot_mut(id);
let slot_state = match slot {
WalSenderState::Interpreted(s) => s,
WalSenderState::Vanilla(_) => unreachable!(),
};
let selected_or_new = match selected_interpreted_reader {
Some(selected) => {
update(&selected)?;
selected
}
None => Arc::new(create()),
};
slot_state.interpreted_wal_reader = Some(selected_or_new);
Ok(())
}
/// Get state of all walsenders.
pub fn get_all_public(self: &Arc<WalSenders>) -> Vec<safekeeper_api::models::WalSenderState> {
self.mutex
.lock()
.slots
.iter()
.flatten()
.map(|state| match state {
WalSenderState::Vanilla(s) => {
safekeeper_api::models::WalSenderState::Vanilla(s.clone())
}
WalSenderState::Interpreted(s) => {
safekeeper_api::models::WalSenderState::Interpreted(s.public_state.clone())
}
})
.collect()
}
/// Get LSN of the most lagging pageserver receiver. Return None if there are no
/// active walsenders.
pub fn laggard_lsn(self: &Arc<WalSenders>) -> Option<Lsn> {
self.mutex
.lock()
.slots
.iter()
.flatten()
.filter_map(|s| match s.get_feedback() {
ReplicationFeedback::Pageserver(feedback) => Some(feedback.last_received_lsn),
ReplicationFeedback::Standby(_) => None,
})
.min()
}
/// Returns total counter of pageserver feedbacks received and last feedback.
pub fn info_for_metrics(self: &Arc<WalSenders>) -> WalSendersTimelineMetricValues {
let shared = self.mutex.lock();
let interpreted_wal_reader_tasks = shared
.slots
.iter()
.filter_map(|ss| match ss {
Some(WalSenderState::Interpreted(int)) => int.interpreted_wal_reader.as_ref(),
Some(WalSenderState::Vanilla(_)) => None,
None => None,
})
.unique_by(|reader| Arc::as_ptr(reader))
.count();
WalSendersTimelineMetricValues {
ps_feedback_counter: shared.ps_feedback_counter,
ps_corruption_detected: shared.ps_corruption_detected,
last_ps_feedback: shared.last_ps_feedback,
interpreted_wal_reader_tasks,
}
}
/// Get aggregated hot standby feedback (we send it to compute).
pub fn get_hotstandby(self: &Arc<WalSenders>) -> StandbyFeedback {
self.mutex.lock().agg_standby_feedback
}
/// Record new pageserver feedback, update aggregated values.
fn record_ps_feedback(self: &Arc<WalSenders>, id: WalSenderId, feedback: &PageserverFeedback) {
let mut shared = self.mutex.lock();
*shared.get_slot_mut(id).get_mut_feedback() = ReplicationFeedback::Pageserver(*feedback);
shared.last_ps_feedback = *feedback;
shared.ps_feedback_counter += 1;
if feedback.corruption_detected {
shared.ps_corruption_detected = true;
}
drop(shared);
RECEIVED_PS_FEEDBACKS.inc();
// send feedback to connected walproposers
self.walreceivers.broadcast_pageserver_feedback(*feedback);
}
/// Record standby reply.
fn record_standby_reply(self: &Arc<WalSenders>, id: WalSenderId, reply: &StandbyReply) {
let mut shared = self.mutex.lock();
let slot = shared.get_slot_mut(id);
debug!(
"Record standby reply: ts={} apply_lsn={}",
reply.reply_ts, reply.apply_lsn
);
match &mut slot.get_mut_feedback() {
ReplicationFeedback::Standby(sf) => sf.reply = *reply,
ReplicationFeedback::Pageserver(_) => {
*slot.get_mut_feedback() = ReplicationFeedback::Standby(StandbyFeedback {
reply: *reply,
hs_feedback: HotStandbyFeedback::empty(),
})
}
}
}
/// Record hot standby feedback, update aggregated value.
fn record_hs_feedback(self: &Arc<WalSenders>, id: WalSenderId, feedback: &HotStandbyFeedback) {
let mut shared = self.mutex.lock();
let slot = shared.get_slot_mut(id);
match &mut slot.get_mut_feedback() {
ReplicationFeedback::Standby(sf) => sf.hs_feedback = *feedback,
ReplicationFeedback::Pageserver(_) => {
*slot.get_mut_feedback() = ReplicationFeedback::Standby(StandbyFeedback {
reply: StandbyReply::empty(),
hs_feedback: *feedback,
})
}
}
shared.update_reply_feedback();
}
/// Get remote_consistent_lsn reported by the pageserver. Returns None if
/// client is not pageserver.
pub fn get_ws_remote_consistent_lsn(self: &Arc<WalSenders>, id: WalSenderId) -> Option<Lsn> {
let shared = self.mutex.lock();
let slot = shared.get_slot(id);
match slot.get_feedback() {
ReplicationFeedback::Pageserver(feedback) => Some(feedback.remote_consistent_lsn),
_ => None,
}
}
/// Unregister walsender.
fn unregister(self: &Arc<WalSenders>, id: WalSenderId) {
let mut shared = self.mutex.lock();
shared.slots[id] = None;
shared.update_reply_feedback();
}
}
struct WalSendersShared {
// aggregated over all walsenders value
agg_standby_feedback: StandbyFeedback,
// last feedback ever received from any pageserver, empty if none
last_ps_feedback: PageserverFeedback,
// total counter of pageserver feedbacks received
ps_feedback_counter: u64,
// Hadron: true iff we received a pageserver feedback that incidated
// data corruption in the timeline
ps_corruption_detected: bool,
slots: Vec<Option<WalSenderState>>,
}
/// Safekeeper internal definitions of wal sender state
///
/// As opposed to [`safekeeper_api::models::WalSenderState`] these struct may
/// include state that we don not wish to expose to the public api.
#[derive(Debug, Clone)]
pub(crate) enum WalSenderState {
Vanilla(VanillaWalSenderInternalState),
Interpreted(InterpretedWalSenderInternalState),
}
type VanillaWalSenderInternalState = safekeeper_api::models::VanillaWalSenderState;
#[derive(Debug, Clone)]
pub(crate) struct InterpretedWalSenderInternalState {
public_state: safekeeper_api::models::InterpretedWalSenderState,
interpreted_wal_reader: Option<Arc<InterpretedWalReaderHandle>>,
}
impl WalSenderState {
fn get_addr(&self) -> &SocketAddr {
match self {
WalSenderState::Vanilla(state) => &state.addr,
WalSenderState::Interpreted(state) => &state.public_state.addr,
}
}
fn get_feedback(&self) -> &ReplicationFeedback {
match self {
WalSenderState::Vanilla(state) => &state.feedback,
WalSenderState::Interpreted(state) => &state.public_state.feedback,
}
}
fn get_mut_feedback(&mut self) -> &mut ReplicationFeedback {
match self {
WalSenderState::Vanilla(state) => &mut state.feedback,
WalSenderState::Interpreted(state) => &mut state.public_state.feedback,
}
}
}
impl WalSendersShared {
fn new() -> Self {
WalSendersShared {
agg_standby_feedback: StandbyFeedback::empty(),
last_ps_feedback: PageserverFeedback::empty(),
ps_feedback_counter: 0,
ps_corruption_detected: false,
slots: Vec::new(),
}
}
/// Get content of provided id slot, it must exist.
fn get_slot(&self, id: WalSenderId) -> &WalSenderState {
self.slots[id].as_ref().expect("walsender doesn't exist")
}
/// Get mut content of provided id slot, it must exist.
fn get_slot_mut(&mut self, id: WalSenderId) -> &mut WalSenderState {
self.slots[id].as_mut().expect("walsender doesn't exist")
}
/// Update aggregated hot standy and normal reply feedbacks. We just take min of valid xmins
/// and ts.
fn update_reply_feedback(&mut self) {
let mut agg = HotStandbyFeedback::empty();
let mut reply_agg = StandbyReply::empty();
for ws_state in self.slots.iter().flatten() {
if let ReplicationFeedback::Standby(standby_feedback) = ws_state.get_feedback() {
let hs_feedback = standby_feedback.hs_feedback;
// doing Option math like op1.iter().chain(op2.iter()).min()
// would be nicer, but we serialize/deserialize this struct
// directly, so leave as is for now
if hs_feedback.xmin != INVALID_FULL_TRANSACTION_ID {
if agg.xmin != INVALID_FULL_TRANSACTION_ID {
agg.xmin = min(agg.xmin, hs_feedback.xmin);
} else {
agg.xmin = hs_feedback.xmin;
}
agg.ts = max(agg.ts, hs_feedback.ts);
}
if hs_feedback.catalog_xmin != INVALID_FULL_TRANSACTION_ID {
if agg.catalog_xmin != INVALID_FULL_TRANSACTION_ID {
agg.catalog_xmin = min(agg.catalog_xmin, hs_feedback.catalog_xmin);
} else {
agg.catalog_xmin = hs_feedback.catalog_xmin;
}
agg.ts = max(agg.ts, hs_feedback.ts);
}
let reply = standby_feedback.reply;
if reply.write_lsn != Lsn::INVALID {
if reply_agg.write_lsn != Lsn::INVALID {
reply_agg.write_lsn = Lsn::min(reply_agg.write_lsn, reply.write_lsn);
} else {
reply_agg.write_lsn = reply.write_lsn;
}
}
if reply.flush_lsn != Lsn::INVALID {
if reply_agg.flush_lsn != Lsn::INVALID {
reply_agg.flush_lsn = Lsn::min(reply_agg.flush_lsn, reply.flush_lsn);
} else {
reply_agg.flush_lsn = reply.flush_lsn;
}
}
if reply.apply_lsn != Lsn::INVALID {
if reply_agg.apply_lsn != Lsn::INVALID {
reply_agg.apply_lsn = Lsn::min(reply_agg.apply_lsn, reply.apply_lsn);
} else {
reply_agg.apply_lsn = reply.apply_lsn;
}
}
if reply.reply_ts != 0 {
if reply_agg.reply_ts != 0 {
reply_agg.reply_ts = TimestampTz::min(reply_agg.reply_ts, reply.reply_ts);
} else {
reply_agg.reply_ts = reply.reply_ts;
}
}
}
}
self.agg_standby_feedback = StandbyFeedback {
reply: reply_agg,
hs_feedback: agg,
};
}
}
// id of the occupied slot in WalSenders to access it (and save in the
// WalSenderGuard). We could give Arc directly to the slot, but there is not
// much sense in that as values aggregation which is performed on each feedback
// receival iterates over all walsenders.
pub type WalSenderId = usize;
/// Scope guard to access slot in WalSenders registry and unregister from it in
/// Drop.
pub struct WalSenderGuard {
id: WalSenderId,
walsenders: Arc<WalSenders>,
}
impl WalSenderGuard {
pub fn id(&self) -> WalSenderId {
self.id
}
pub fn walsenders(&self) -> &Arc<WalSenders> {
&self.walsenders
}
}
impl Drop for WalSenderGuard {
fn drop(&mut self) {
self.walsenders.unregister(self.id);
}
}
impl SafekeeperPostgresHandler {
/// Wrapper around handle_start_replication_guts handling result. Error is
/// handled here while we're still in walsender ttid span; with API
/// extension, this can probably be moved into postgres_backend.
pub async fn handle_start_replication<IO: AsyncRead + AsyncWrite + Unpin + Send>(
&mut self,
pgb: &mut PostgresBackend<IO>,
start_pos: Lsn,
term: Option<Term>,
) -> Result<(), QueryError> {
let tli = self
.global_timelines
.get(self.ttid)
.map_err(|e| QueryError::Other(e.into()))?;
let residence_guard = tli.wal_residence_guard().await?;
if let Err(end) = self
.handle_start_replication_guts(pgb, start_pos, term, residence_guard)
.await
{
let info = tli.get_safekeeper_info(&self.conf).await;
// Log the result and probably send it to the client, closing the stream.
pgb.handle_copy_stream_end(end)
.instrument(info_span!("", term=%info.term, last_log_term=%info.last_log_term, flush_lsn=%Lsn(info.flush_lsn), commit_lsn=%Lsn(info.flush_lsn)))
.await;
}
Ok(())
}
pub async fn handle_start_replication_guts<IO: AsyncRead + AsyncWrite + Unpin + Send>(
&mut self,
pgb: &mut PostgresBackend<IO>,
start_pos: Lsn,
term: Option<Term>,
tli: WalResidentTimeline,
) -> Result<(), CopyStreamHandlerEnd> {
let appname = self.appname.clone();
// Use a guard object to remove our entry from the timeline when we are done.
let ws_guard = match self.protocol() {
PostgresClientProtocol::Vanilla => Arc::new(tli.get_walsenders().register(
WalSenderState::Vanilla(VanillaWalSenderInternalState {
ttid: self.ttid,
addr: *pgb.get_peer_addr(),
conn_id: self.conn_id,
appname: self.appname.clone(),
feedback: ReplicationFeedback::Pageserver(PageserverFeedback::empty()),
}),
)),
PostgresClientProtocol::Interpreted { .. } => Arc::new(tli.get_walsenders().register(
WalSenderState::Interpreted(InterpretedWalSenderInternalState {
public_state: safekeeper_api::models::InterpretedWalSenderState {
ttid: self.ttid,
shard: self.shard.unwrap(),
addr: *pgb.get_peer_addr(),
conn_id: self.conn_id,
appname: self.appname.clone(),
feedback: ReplicationFeedback::Pageserver(PageserverFeedback::empty()),
},
interpreted_wal_reader: None,
}),
)),
};
// Walsender can operate in one of two modes which we select by
// application_name: give only committed WAL (used by pageserver) or all
// existing WAL (up to flush_lsn, used by walproposer or peer recovery).
// The second case is always driven by a consensus leader which term
// must be supplied.
let end_watch = if term.is_some() {
EndWatch::Flush(tli.get_term_flush_lsn_watch_rx())
} else {
EndWatch::Commit(tli.get_commit_lsn_watch_rx())
};
// we don't check term here; it will be checked on first waiting/WAL reading anyway.
let end_pos = end_watch.get();
if end_pos < start_pos {
info!(
"requested start_pos {} is ahead of available WAL end_pos {}",
start_pos, end_pos
);
}
info!(
"starting streaming from {:?}, available WAL ends at {}, recovery={}, appname={:?}, protocol={:?}",
start_pos,
end_pos,
matches!(end_watch, EndWatch::Flush(_)),
appname,
self.protocol(),
);
// switch to copy
pgb.write_message(&BeMessage::CopyBothResponse).await?;
let wal_reader = tli.get_walreader(start_pos).await?;
// Split to concurrently receive and send data; replies are generally
// not synchronized with sends, so this avoids deadlocks.
let reader = pgb.split().context("START_REPLICATION split")?;
let send_fut = match self.protocol() {
PostgresClientProtocol::Vanilla => {
let sender = WalSender {
pgb,
// should succeed since we're already holding another guard
tli: tli.wal_residence_guard().await?,
appname: appname.clone(),
start_pos,
end_pos,
term,
end_watch,
ws_guard: ws_guard.clone(),
wal_reader,
send_buf: vec![0u8; MAX_SEND_SIZE],
};
FutureExt::boxed(sender.run())
}
PostgresClientProtocol::Interpreted {
format,
compression,
} => {
let pg_version =
PgMajorVersion::try_from(tli.tli.get_state().await.1.server.pg_version)
.unwrap();
let end_watch_view = end_watch.view();
let wal_residence_guard = tli.wal_residence_guard().await?;
let (tx, rx) = tokio::sync::mpsc::channel::<Batch>(2);
let shard = self.shard.unwrap();
if self.conf.wal_reader_fanout && !shard.is_unsharded() {
let ws_id = ws_guard.id();
ws_guard.walsenders().create_or_update_interpreted_reader(
ws_id,
start_pos,
self.conf.max_delta_for_fanout,
{
let tx = tx.clone();
|reader| {
tracing::info!(
"Fanning out interpreted wal reader at {}",
start_pos
);
reader
.fanout(shard, tx, start_pos)
.with_context(|| "Failed to fan out reader")
}
},
|| {
tracing::info!("Spawning interpreted wal reader at {}", start_pos);
let wal_stream = StreamingWalReader::new(
wal_residence_guard,
term,
start_pos,
end_pos,
end_watch,
MAX_SEND_SIZE,
);
InterpretedWalReader::spawn(
wal_stream, start_pos, tx, shard, pg_version, &appname,
)
},
)?;
let sender = InterpretedWalSender {
format,
compression,
appname,
tli: tli.wal_residence_guard().await?,
start_lsn: start_pos,
pgb,
end_watch_view,
wal_sender_guard: ws_guard.clone(),
rx,
};
FutureExt::boxed(sender.run())
} else {
let wal_reader = StreamingWalReader::new(
wal_residence_guard,
term,
start_pos,
end_pos,
end_watch,
MAX_SEND_SIZE,
);
let reader = InterpretedWalReader::new(
wal_reader, start_pos, tx, shard, pg_version, None,
);
let sender = InterpretedWalSender {
format,
compression,
appname: appname.clone(),
tli: tli.wal_residence_guard().await?,
start_lsn: start_pos,
pgb,
end_watch_view,
wal_sender_guard: ws_guard.clone(),
rx,
};
FutureExt::boxed(async move {
// Sender returns an Err on all code paths.
// If the sender finishes first, we will drop the reader future.
// If the reader finishes first, the sender will finish too since
// the wal sender has dropped.
let res = tokio::try_join!(sender.run(), reader.run(start_pos, &appname));
match res.map(|_| ()) {
Ok(_) => unreachable!("sender finishes with Err by convention"),
err_res => err_res,
}
})
}
}
};
let tli_cancel = tli.cancel.clone();
let mut reply_reader = ReplyReader {
reader,
ws_guard: ws_guard.clone(),
tli,
};
let res = tokio::select! {
// todo: add read|write .context to these errors
r = send_fut => r,
r = reply_reader.run() => r,
_ = tli_cancel.cancelled() => {
return Err(CopyStreamHandlerEnd::Cancelled);
}
};
let ws_state = ws_guard
.walsenders
.mutex
.lock()
.get_slot(ws_guard.id)
.clone();
info!(
"finished streaming to {}, feedback={:?}",
ws_state.get_addr(),
ws_state.get_feedback(),
);
// Join pg backend back.
pgb.unsplit(reply_reader.reader)?;
res
}
}
/// TODO(vlad): maybe lift this instead
/// Walsender streams either up to commit_lsn (normally) or flush_lsn in the
/// given term (recovery by walproposer or peer safekeeper).
#[derive(Clone)]
pub(crate) enum EndWatch {
Commit(Receiver<Lsn>),
Flush(Receiver<TermLsn>),
}
impl EndWatch {
pub(crate) fn view(&self) -> EndWatchView {
EndWatchView(self.clone())
}
/// Get current end of WAL.
pub(crate) fn get(&self) -> Lsn {
match self {
EndWatch::Commit(r) => *r.borrow(),
EndWatch::Flush(r) => r.borrow().lsn,
}
}
/// Wait for the update.
pub(crate) async fn changed(&mut self) -> anyhow::Result<()> {
match self {
EndWatch::Commit(r) => r.changed().await?,
EndWatch::Flush(r) => r.changed().await?,
}
Ok(())
}
pub(crate) async fn wait_for_lsn(
&mut self,
lsn: Lsn,
client_term: Option<Term>,
) -> anyhow::Result<Lsn> {
loop {
let end_pos = self.get();
if end_pos > lsn {
return Ok(end_pos);
}
if let EndWatch::Flush(rx) = &self {
let curr_term = rx.borrow().term;
if let Some(client_term) = client_term {
if curr_term != client_term {
bail!("term changed: requested {}, now {}", client_term, curr_term);
}
}
}
self.changed().await?;
}
}
}
pub(crate) struct EndWatchView(EndWatch);
impl EndWatchView {
pub(crate) fn get(&self) -> Lsn {
self.0.get()
}
}
/// A half driving sending WAL.
struct WalSender<'a, IO> {
pgb: &'a mut PostgresBackend<IO>,
tli: WalResidentTimeline,
appname: Option<String>,
// Position since which we are sending next chunk.
start_pos: Lsn,
// WAL up to this position is known to be locally available.
// Usually this is the same as the latest commit_lsn, but in case of
// walproposer recovery, this is flush_lsn.
//
// We send this LSN to the receiver as wal_end, so that it knows how much
// WAL this safekeeper has. This LSN should be as fresh as possible.
end_pos: Lsn,
/// When streaming uncommitted part, the term the client acts as the leader
/// in. Streaming is stopped if local term changes to a different (higher)
/// value.
term: Option<Term>,
/// Watch channel receiver to learn end of available WAL (and wait for its advancement).
end_watch: EndWatch,
ws_guard: Arc<WalSenderGuard>,
wal_reader: WalReader,
// buffer for readling WAL into to send it
send_buf: Vec<u8>,
}
const POLL_STATE_TIMEOUT: Duration = Duration::from_secs(1);
impl<IO: AsyncRead + AsyncWrite + Unpin> WalSender<'_, IO> {
/// Send WAL until
/// - an error occurs
/// - receiver is caughtup and there is no computes (if streaming up to commit_lsn)
/// - timeline's cancellation token fires
///
/// Err(CopyStreamHandlerEnd) is always returned; Result is used only for ?
/// convenience.
async fn run(mut self) -> Result<(), CopyStreamHandlerEnd> {
let metric = WAL_READERS
.get_metric_with_label_values(&[
"future",
self.appname.as_deref().unwrap_or("safekeeper"),
])
.unwrap();
metric.inc();
scopeguard::defer! {
metric.dec();
}
loop {
// Wait for the next portion if it is not there yet, or just
// update our end of WAL available for sending value, we
// communicate it to the receiver.
self.wait_wal().await?;
assert!(
self.end_pos > self.start_pos,
"nothing to send after waiting for WAL"
);
// try to send as much as available, capped by MAX_SEND_SIZE
let mut chunk_end_pos = self.start_pos + MAX_SEND_SIZE as u64;
// if we went behind available WAL, back off
if chunk_end_pos >= self.end_pos {
chunk_end_pos = self.end_pos;
} else {
// If sending not up to end pos, round down to page boundary to
// avoid breaking WAL record not at page boundary, as protocol
// demands. See walsender.c (XLogSendPhysical).
chunk_end_pos = chunk_end_pos
.checked_sub(chunk_end_pos.block_offset())
.unwrap();
}
let send_size = (chunk_end_pos.0 - self.start_pos.0) as usize;
let send_buf = &mut self.send_buf[..send_size];
let send_size: usize;
{
// If uncommitted part is being pulled, check that the term is
// still the expected one.
let _term_guard = if let Some(t) = self.term {
Some(self.tli.acquire_term(t).await?)
} else {
None
};
// Read WAL into buffer. send_size can be additionally capped to
// segment boundary here.
send_size = self.wal_reader.read(send_buf).await?
};
let send_buf = &send_buf[..send_size];
// and send it, while respecting Timeline::cancel
let msg = BeMessage::XLogData(XLogDataBody {
wal_start: self.start_pos.0,
wal_end: self.end_pos.0,
timestamp: get_current_timestamp(),
data: send_buf,
});
self.pgb.write_message(&msg).await?;
if let Some(appname) = &self.appname {
if appname == "replica" {
failpoint_support::sleep_millis_async!("sk-send-wal-replica-sleep");
}
}
trace!(
"sent {} bytes of WAL {}-{}",
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/control_file_upgrade.rs | safekeeper/src/control_file_upgrade.rs | //! Code to deal with safekeeper control file upgrades
use std::vec;
use anyhow::{Result, bail};
use postgres_versioninfo::PgVersionId;
use pq_proto::SystemId;
use safekeeper_api::membership::{Configuration, INVALID_GENERATION};
use safekeeper_api::{ServerInfo, Term};
use serde::{Deserialize, Serialize};
use tracing::*;
use utils::bin_ser::LeSer;
use utils::id::{NodeId, TenantId, TimelineId};
use utils::lsn::Lsn;
use crate::safekeeper::{AcceptorState, PgUuid, TermHistory, TermLsn};
use crate::state::{EvictionState, TimelinePersistentState};
use crate::wal_backup_partial;
/// Persistent consensus state of the acceptor.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
struct AcceptorStateV1 {
/// acceptor's last term it voted for (advanced in 1 phase)
term: Term,
/// acceptor's epoch (advanced, i.e. bumped to 'term' when VCL is reached).
epoch: Term,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
struct SafeKeeperStateV1 {
/// persistent acceptor state
acceptor_state: AcceptorStateV1,
/// information about server
server: ServerInfoV2,
/// Unique id of the last *elected* proposer we dealt with. Not needed
/// for correctness, exists for monitoring purposes.
proposer_uuid: PgUuid,
/// part of WAL acknowledged by quorum and available locally
commit_lsn: Lsn,
/// minimal LSN which may be needed for recovery of some safekeeper (end_lsn
/// of last record streamed to everyone)
truncate_lsn: Lsn,
// Safekeeper starts receiving WAL from this LSN, zeros before it ought to
// be skipped during decoding.
wal_start_lsn: Lsn,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerInfoV2 {
/// Postgres server version
pub pg_version: PgVersionId,
pub system_id: SystemId,
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
pub wal_seg_size: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SafeKeeperStateV2 {
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
pub server: ServerInfoV2,
/// Unique id of the last *elected* proposer we dealt with. Not needed
/// for correctness, exists for monitoring purposes.
pub proposer_uuid: PgUuid,
/// part of WAL acknowledged by quorum and available locally
pub commit_lsn: Lsn,
/// minimal LSN which may be needed for recovery of some safekeeper (end_lsn
/// of last record streamed to everyone)
pub truncate_lsn: Lsn,
// Safekeeper starts receiving WAL from this LSN, zeros before it ought to
// be skipped during decoding.
pub wal_start_lsn: Lsn,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerInfoV3 {
/// Postgres server version
pub pg_version: PgVersionId,
pub system_id: SystemId,
#[serde(with = "hex")]
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: TimelineId,
pub wal_seg_size: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SafeKeeperStateV3 {
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
pub server: ServerInfoV3,
/// Unique id of the last *elected* proposer we dealt with. Not needed
/// for correctness, exists for monitoring purposes.
#[serde(with = "hex")]
pub proposer_uuid: PgUuid,
/// part of WAL acknowledged by quorum and available locally
pub commit_lsn: Lsn,
/// minimal LSN which may be needed for recovery of some safekeeper (end_lsn
/// of last record streamed to everyone)
pub truncate_lsn: Lsn,
// Safekeeper starts receiving WAL from this LSN, zeros before it ought to
// be skipped during decoding.
pub wal_start_lsn: Lsn,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SafeKeeperStateV4 {
#[serde(with = "hex")]
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: TimelineId,
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
pub server: ServerInfo,
/// Unique id of the last *elected* proposer we dealt with. Not needed
/// for correctness, exists for monitoring purposes.
#[serde(with = "hex")]
pub proposer_uuid: PgUuid,
/// Part of WAL acknowledged by quorum and available locally. Always points
/// to record boundary.
pub commit_lsn: Lsn,
/// First LSN not yet offloaded to s3. Useful to persist to avoid finding
/// out offloading progress on boot.
pub s3_wal_lsn: Lsn,
/// Minimal LSN which may be needed for recovery of some safekeeper (end_lsn
/// of last record streamed to everyone). Persisting it helps skipping
/// recovery in walproposer, generally we compute it from peers. In
/// walproposer proto called 'truncate_lsn'.
pub peer_horizon_lsn: Lsn,
/// LSN of the oldest known checkpoint made by pageserver and successfully
/// pushed to s3. We don't remove WAL beyond it. Persisted only for
/// informational purposes, we receive it from pageserver (or broker).
pub remote_consistent_lsn: Lsn,
// Peers and their state as we remember it. Knowing peers themselves is
// fundamental; but state is saved here only for informational purposes and
// obviously can be stale. (Currently not saved at all, but let's provision
// place to have less file version upgrades).
pub peers: PersistedPeers,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SafeKeeperStateV7 {
#[serde(with = "hex")]
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: TimelineId,
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
pub server: ServerInfo,
/// Unique id of the last *elected* proposer we dealt with. Not needed
/// for correctness, exists for monitoring purposes.
#[serde(with = "hex")]
pub proposer_uuid: PgUuid,
/// Since which LSN this timeline generally starts. Safekeeper might have
/// joined later.
pub timeline_start_lsn: Lsn,
/// Since which LSN safekeeper has (had) WAL for this timeline.
/// All WAL segments next to one containing local_start_lsn are
/// filled with data from the beginning.
pub local_start_lsn: Lsn,
/// Part of WAL acknowledged by quorum *and available locally*. Always points
/// to record boundary.
pub commit_lsn: Lsn,
/// LSN that points to the end of the last backed up segment. Useful to
/// persist to avoid finding out offloading progress on boot.
pub backup_lsn: Lsn,
/// Minimal LSN which may be needed for recovery of some safekeeper (end_lsn
/// of last record streamed to everyone). Persisting it helps skipping
/// recovery in walproposer, generally we compute it from peers. In
/// walproposer proto called 'truncate_lsn'. Updates are currently drived
/// only by walproposer.
pub peer_horizon_lsn: Lsn,
/// LSN of the oldest known checkpoint made by pageserver and successfully
/// pushed to s3. We don't remove WAL beyond it. Persisted only for
/// informational purposes, we receive it from pageserver (or broker).
pub remote_consistent_lsn: Lsn,
// Peers and their state as we remember it. Knowing peers themselves is
// fundamental; but state is saved here only for informational purposes and
// obviously can be stale. (Currently not saved at all, but let's provision
// place to have less file version upgrades).
pub peers: PersistedPeers,
}
/// Persistent information stored on safekeeper node about timeline.
/// On disk data is prefixed by magic and format version and followed by checksum.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SafeKeeperStateV8 {
#[serde(with = "hex")]
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: TimelineId,
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
pub server: ServerInfo,
/// Unique id of the last *elected* proposer we dealt with. Not needed
/// for correctness, exists for monitoring purposes.
#[serde(with = "hex")]
pub proposer_uuid: PgUuid,
/// Since which LSN this timeline generally starts. Safekeeper might have
/// joined later.
pub timeline_start_lsn: Lsn,
/// Since which LSN safekeeper has (had) WAL for this timeline.
/// All WAL segments next to one containing local_start_lsn are
/// filled with data from the beginning.
pub local_start_lsn: Lsn,
/// Part of WAL acknowledged by quorum *and available locally*. Always points
/// to record boundary.
pub commit_lsn: Lsn,
/// LSN that points to the end of the last backed up segment. Useful to
/// persist to avoid finding out offloading progress on boot.
pub backup_lsn: Lsn,
/// Minimal LSN which may be needed for recovery of some safekeeper (end_lsn
/// of last record streamed to everyone). Persisting it helps skipping
/// recovery in walproposer, generally we compute it from peers. In
/// walproposer proto called 'truncate_lsn'. Updates are currently drived
/// only by walproposer.
pub peer_horizon_lsn: Lsn,
/// LSN of the oldest known checkpoint made by pageserver and successfully
/// pushed to s3. We don't remove WAL beyond it. Persisted only for
/// informational purposes, we receive it from pageserver (or broker).
pub remote_consistent_lsn: Lsn,
/// Peers and their state as we remember it. Knowing peers themselves is
/// fundamental; but state is saved here only for informational purposes and
/// obviously can be stale. (Currently not saved at all, but let's provision
/// place to have less file version upgrades).
pub peers: PersistedPeers,
/// Holds names of partial segments uploaded to remote storage. Used to
/// clean up old objects without leaving garbage in remote storage.
pub partial_backup: wal_backup_partial::State,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PersistedPeers(pub Vec<(NodeId, PersistedPeerInfo)>);
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PersistedPeerInfo {
/// LSN up to which safekeeper offloaded WAL to s3.
pub backup_lsn: Lsn,
/// Term of the last entry.
pub term: Term,
/// LSN of the last record.
pub flush_lsn: Lsn,
/// Up to which LSN safekeeper regards its WAL as committed.
pub commit_lsn: Lsn,
}
impl PersistedPeerInfo {
pub fn new() -> Self {
Self {
backup_lsn: Lsn::INVALID,
term: safekeeper_api::INITIAL_TERM,
flush_lsn: Lsn(0),
commit_lsn: Lsn(0),
}
}
}
// make clippy happy
impl Default for PersistedPeerInfo {
fn default() -> Self {
Self::new()
}
}
/// Note: SafekeeperStateVn is old name for TimelinePersistentStateVn.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TimelinePersistentStateV9 {
#[serde(with = "hex")]
pub tenant_id: TenantId,
#[serde(with = "hex")]
pub timeline_id: TimelineId,
/// persistent acceptor state
pub acceptor_state: AcceptorState,
/// information about server
pub server: ServerInfo,
/// Unique id of the last *elected* proposer we dealt with. Not needed
/// for correctness, exists for monitoring purposes.
#[serde(with = "hex")]
pub proposer_uuid: PgUuid,
/// Since which LSN this timeline generally starts. Safekeeper might have
/// joined later.
pub timeline_start_lsn: Lsn,
/// Since which LSN safekeeper has (had) WAL for this timeline.
/// All WAL segments next to one containing local_start_lsn are
/// filled with data from the beginning.
pub local_start_lsn: Lsn,
/// Part of WAL acknowledged by quorum *and available locally*. Always points
/// to record boundary.
pub commit_lsn: Lsn,
/// LSN that points to the end of the last backed up segment. Useful to
/// persist to avoid finding out offloading progress on boot.
pub backup_lsn: Lsn,
/// Minimal LSN which may be needed for recovery of some safekeeper (end_lsn
/// of last record streamed to everyone). Persisting it helps skipping
/// recovery in walproposer, generally we compute it from peers. In
/// walproposer proto called 'truncate_lsn'. Updates are currently drived
/// only by walproposer.
pub peer_horizon_lsn: Lsn,
/// LSN of the oldest known checkpoint made by pageserver and successfully
/// pushed to s3. We don't remove WAL beyond it. Persisted only for
/// informational purposes, we receive it from pageserver (or broker).
pub remote_consistent_lsn: Lsn,
/// Peers and their state as we remember it. Knowing peers themselves is
/// fundamental; but state is saved here only for informational purposes and
/// obviously can be stale. (Currently not saved at all, but let's provision
/// place to have less file version upgrades).
pub peers: PersistedPeers,
/// Holds names of partial segments uploaded to remote storage. Used to
/// clean up old objects without leaving garbage in remote storage.
pub partial_backup: wal_backup_partial::State,
/// Eviction state of the timeline. If it's Offloaded, we should download
/// WAL files from remote storage to serve the timeline.
pub eviction_state: EvictionState,
}
pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result<TimelinePersistentState> {
// migrate to storing full term history
if version == 1 {
info!("reading safekeeper control file version {}", version);
let oldstate = SafeKeeperStateV1::des(&buf[..buf.len()])?;
let ac = AcceptorState {
term: oldstate.acceptor_state.term,
term_history: TermHistory(vec![TermLsn {
term: oldstate.acceptor_state.epoch,
lsn: Lsn(0),
}]),
};
return Ok(TimelinePersistentState {
tenant_id: oldstate.server.tenant_id,
timeline_id: oldstate.server.timeline_id,
mconf: Configuration::empty(),
acceptor_state: ac,
server: ServerInfo {
pg_version: oldstate.server.pg_version,
system_id: oldstate.server.system_id,
wal_seg_size: oldstate.server.wal_seg_size,
},
proposer_uuid: oldstate.proposer_uuid,
timeline_start_lsn: Lsn(0),
local_start_lsn: Lsn(0),
commit_lsn: oldstate.commit_lsn,
backup_lsn: Lsn(0),
peer_horizon_lsn: oldstate.truncate_lsn,
remote_consistent_lsn: Lsn(0),
partial_backup: wal_backup_partial::State::default(),
eviction_state: EvictionState::Present,
creation_ts: std::time::SystemTime::UNIX_EPOCH,
});
// migrate to hexing some ids
} else if version == 2 {
info!("reading safekeeper control file version {}", version);
let oldstate = SafeKeeperStateV2::des(&buf[..buf.len()])?;
let server = ServerInfo {
pg_version: oldstate.server.pg_version,
system_id: oldstate.server.system_id,
wal_seg_size: oldstate.server.wal_seg_size,
};
return Ok(TimelinePersistentState {
tenant_id: oldstate.server.tenant_id,
timeline_id: oldstate.server.timeline_id,
mconf: Configuration::empty(),
acceptor_state: oldstate.acceptor_state,
server,
proposer_uuid: oldstate.proposer_uuid,
timeline_start_lsn: Lsn(0),
local_start_lsn: Lsn(0),
commit_lsn: oldstate.commit_lsn,
backup_lsn: Lsn(0),
peer_horizon_lsn: oldstate.truncate_lsn,
remote_consistent_lsn: Lsn(0),
partial_backup: wal_backup_partial::State::default(),
eviction_state: EvictionState::Present,
creation_ts: std::time::SystemTime::UNIX_EPOCH,
});
// migrate to moving tenant_id/timeline_id to the top and adding some lsns
} else if version == 3 {
info!("reading safekeeper control file version {version}");
let oldstate = SafeKeeperStateV3::des(&buf[..buf.len()])?;
let server = ServerInfo {
pg_version: oldstate.server.pg_version,
system_id: oldstate.server.system_id,
wal_seg_size: oldstate.server.wal_seg_size,
};
return Ok(TimelinePersistentState {
tenant_id: oldstate.server.tenant_id,
timeline_id: oldstate.server.timeline_id,
mconf: Configuration::empty(),
acceptor_state: oldstate.acceptor_state,
server,
proposer_uuid: oldstate.proposer_uuid,
timeline_start_lsn: Lsn(0),
local_start_lsn: Lsn(0),
commit_lsn: oldstate.commit_lsn,
backup_lsn: Lsn(0),
peer_horizon_lsn: oldstate.truncate_lsn,
remote_consistent_lsn: Lsn(0),
partial_backup: wal_backup_partial::State::default(),
eviction_state: EvictionState::Present,
creation_ts: std::time::SystemTime::UNIX_EPOCH,
});
// migrate to having timeline_start_lsn
} else if version == 4 {
info!("reading safekeeper control file version {}", version);
let oldstate = SafeKeeperStateV4::des(&buf[..buf.len()])?;
let server = ServerInfo {
pg_version: oldstate.server.pg_version,
system_id: oldstate.server.system_id,
wal_seg_size: oldstate.server.wal_seg_size,
};
return Ok(TimelinePersistentState {
tenant_id: oldstate.tenant_id,
timeline_id: oldstate.timeline_id,
mconf: Configuration::empty(),
acceptor_state: oldstate.acceptor_state,
server,
proposer_uuid: oldstate.proposer_uuid,
timeline_start_lsn: Lsn(0),
local_start_lsn: Lsn(0),
commit_lsn: oldstate.commit_lsn,
backup_lsn: Lsn::INVALID,
peer_horizon_lsn: oldstate.peer_horizon_lsn,
remote_consistent_lsn: Lsn(0),
partial_backup: wal_backup_partial::State::default(),
eviction_state: EvictionState::Present,
creation_ts: std::time::SystemTime::UNIX_EPOCH,
});
} else if version == 5 {
info!("reading safekeeper control file version {}", version);
let mut oldstate = TimelinePersistentState::des(&buf[..buf.len()])?;
if oldstate.timeline_start_lsn != Lsn(0) {
return Ok(oldstate);
}
// set special timeline_start_lsn because we don't know the real one
info!("setting timeline_start_lsn and local_start_lsn to Lsn(1)");
oldstate.timeline_start_lsn = Lsn(1);
oldstate.local_start_lsn = Lsn(1);
return Ok(oldstate);
} else if version == 6 {
info!("reading safekeeper control file version {}", version);
let mut oldstate = TimelinePersistentState::des(&buf[..buf.len()])?;
if oldstate.server.pg_version != PgVersionId::UNKNOWN {
return Ok(oldstate);
}
// set pg_version to the default v14
info!("setting pg_version to 140005");
oldstate.server.pg_version = PgVersionId::from_full_pg_version(140005);
return Ok(oldstate);
} else if version == 7 {
info!("reading safekeeper control file version {}", version);
let oldstate = SafeKeeperStateV7::des(&buf[..buf.len()])?;
return Ok(TimelinePersistentState {
tenant_id: oldstate.tenant_id,
timeline_id: oldstate.timeline_id,
mconf: Configuration::empty(),
acceptor_state: oldstate.acceptor_state,
server: oldstate.server,
proposer_uuid: oldstate.proposer_uuid,
timeline_start_lsn: oldstate.timeline_start_lsn,
local_start_lsn: oldstate.local_start_lsn,
commit_lsn: oldstate.commit_lsn,
backup_lsn: oldstate.backup_lsn,
peer_horizon_lsn: oldstate.peer_horizon_lsn,
remote_consistent_lsn: oldstate.remote_consistent_lsn,
partial_backup: wal_backup_partial::State::default(),
eviction_state: EvictionState::Present,
creation_ts: std::time::SystemTime::UNIX_EPOCH,
});
} else if version == 8 {
let oldstate = SafeKeeperStateV8::des(&buf[..buf.len()])?;
return Ok(TimelinePersistentState {
tenant_id: oldstate.tenant_id,
timeline_id: oldstate.timeline_id,
mconf: Configuration::empty(),
acceptor_state: oldstate.acceptor_state,
server: oldstate.server,
proposer_uuid: oldstate.proposer_uuid,
timeline_start_lsn: oldstate.timeline_start_lsn,
local_start_lsn: oldstate.local_start_lsn,
commit_lsn: oldstate.commit_lsn,
backup_lsn: oldstate.backup_lsn,
peer_horizon_lsn: oldstate.peer_horizon_lsn,
remote_consistent_lsn: oldstate.remote_consistent_lsn,
partial_backup: oldstate.partial_backup,
eviction_state: EvictionState::Present,
creation_ts: std::time::SystemTime::UNIX_EPOCH,
});
} else if version == 9 {
let oldstate = TimelinePersistentStateV9::des(&buf[..buf.len()])?;
return Ok(TimelinePersistentState {
tenant_id: oldstate.tenant_id,
timeline_id: oldstate.timeline_id,
mconf: Configuration::empty(),
acceptor_state: oldstate.acceptor_state,
server: oldstate.server,
proposer_uuid: oldstate.proposer_uuid,
timeline_start_lsn: oldstate.timeline_start_lsn,
local_start_lsn: oldstate.local_start_lsn,
commit_lsn: oldstate.commit_lsn,
backup_lsn: oldstate.backup_lsn,
peer_horizon_lsn: oldstate.peer_horizon_lsn,
remote_consistent_lsn: oldstate.remote_consistent_lsn,
partial_backup: oldstate.partial_backup,
eviction_state: oldstate.eviction_state,
creation_ts: std::time::SystemTime::UNIX_EPOCH,
});
}
// TODO: persist the file back to the disk after upgrade
// TODO: think about backward compatibility and rollbacks
bail!("unsupported safekeeper control file version {}", version)
}
// Used as a temp hack to make forward compatibility test work. Should be
// removed after PR adding v10 is merged.
pub fn downgrade_v10_to_v9(state: &TimelinePersistentState) -> TimelinePersistentStateV9 {
assert!(state.mconf.generation == INVALID_GENERATION);
TimelinePersistentStateV9 {
tenant_id: state.tenant_id,
timeline_id: state.timeline_id,
acceptor_state: state.acceptor_state.clone(),
server: state.server.clone(),
proposer_uuid: state.proposer_uuid,
timeline_start_lsn: state.timeline_start_lsn,
local_start_lsn: state.local_start_lsn,
commit_lsn: state.commit_lsn,
backup_lsn: state.backup_lsn,
peer_horizon_lsn: state.peer_horizon_lsn,
remote_consistent_lsn: state.remote_consistent_lsn,
peers: PersistedPeers(vec![]),
partial_backup: state.partial_backup.clone(),
eviction_state: state.eviction_state,
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use postgres_versioninfo::PgMajorVersion;
use utils::Hex;
use utils::id::NodeId;
use super::*;
use crate::control_file_upgrade::PersistedPeerInfo;
#[test]
fn roundtrip_v1() {
let tenant_id = TenantId::from_str("cf0480929707ee75372337efaa5ecf96").unwrap();
let timeline_id = TimelineId::from_str("112ded66422aa5e953e5440fa5427ac4").unwrap();
let state = SafeKeeperStateV1 {
acceptor_state: AcceptorStateV1 {
term: 42,
epoch: 43,
},
server: ServerInfoV2 {
pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321,
tenant_id,
timeline_id,
wal_seg_size: 0x12345678,
},
proposer_uuid: {
let mut arr = timeline_id.as_arr();
arr.reverse();
arr
},
commit_lsn: Lsn(1234567800),
truncate_lsn: Lsn(123456780),
wal_start_lsn: Lsn(1234567800 - 8),
};
let ser = state.ser().unwrap();
#[rustfmt::skip]
let expected = [
// term
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// epoch
0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// pg_version = 140000
0xE0, 0x22, 0x02, 0x00,
// system_id
0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12,
// tenant_id
0xcf, 0x04, 0x80, 0x92, 0x97, 0x07, 0xee, 0x75, 0x37, 0x23, 0x37, 0xef, 0xaa, 0x5e, 0xcf, 0x96,
// timeline_id
0x11, 0x2d, 0xed, 0x66, 0x42, 0x2a, 0xa5, 0xe9, 0x53, 0xe5, 0x44, 0x0f, 0xa5, 0x42, 0x7a, 0xc4,
// wal_seg_size
0x78, 0x56, 0x34, 0x12,
// proposer_uuid
0xc4, 0x7a, 0x42, 0xa5, 0x0f, 0x44, 0xe5, 0x53, 0xe9, 0xa5, 0x2a, 0x42, 0x66, 0xed, 0x2d, 0x11,
// commit_lsn
0x78, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
// truncate_lsn
0x0c, 0xcd, 0x5b, 0x07, 0x00, 0x00, 0x00, 0x00,
// wal_start_lsn
0x70, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
];
assert_eq!(Hex(&ser), Hex(&expected));
let deser = SafeKeeperStateV1::des(&ser).unwrap();
assert_eq!(state, deser);
}
#[test]
fn roundtrip_v2() {
let tenant_id = TenantId::from_str("cf0480929707ee75372337efaa5ecf96").unwrap();
let timeline_id = TimelineId::from_str("112ded66422aa5e953e5440fa5427ac4").unwrap();
let state = SafeKeeperStateV2 {
acceptor_state: AcceptorState {
term: 42,
term_history: TermHistory(vec![TermLsn {
lsn: Lsn(0x1),
term: 41,
}]),
},
server: ServerInfoV2 {
pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321,
tenant_id,
timeline_id,
wal_seg_size: 0x12345678,
},
proposer_uuid: {
let mut arr = timeline_id.as_arr();
arr.reverse();
arr
},
commit_lsn: Lsn(1234567800),
truncate_lsn: Lsn(123456780),
wal_start_lsn: Lsn(1234567800 - 8),
};
let ser = state.ser().unwrap();
let expected = [
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
0x34, 0x12, 0xcf, 0x04, 0x80, 0x92, 0x97, 0x07, 0xee, 0x75, 0x37, 0x23, 0x37, 0xef,
0xaa, 0x5e, 0xcf, 0x96, 0x11, 0x2d, 0xed, 0x66, 0x42, 0x2a, 0xa5, 0xe9, 0x53, 0xe5,
0x44, 0x0f, 0xa5, 0x42, 0x7a, 0xc4, 0x78, 0x56, 0x34, 0x12, 0xc4, 0x7a, 0x42, 0xa5,
0x0f, 0x44, 0xe5, 0x53, 0xe9, 0xa5, 0x2a, 0x42, 0x66, 0xed, 0x2d, 0x11, 0x78, 0x02,
0x96, 0x49, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xcd, 0x5b, 0x07, 0x00, 0x00, 0x00, 0x00,
0x70, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
];
assert_eq!(Hex(&ser), Hex(&expected));
let deser = SafeKeeperStateV2::des(&ser).unwrap();
assert_eq!(state, deser);
}
#[test]
fn roundtrip_v3() {
let tenant_id = TenantId::from_str("cf0480929707ee75372337efaa5ecf96").unwrap();
let timeline_id = TimelineId::from_str("112ded66422aa5e953e5440fa5427ac4").unwrap();
let state = SafeKeeperStateV3 {
acceptor_state: AcceptorState {
term: 42,
term_history: TermHistory(vec![TermLsn {
lsn: Lsn(0x1),
term: 41,
}]),
},
server: ServerInfoV3 {
pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321,
tenant_id,
timeline_id,
wal_seg_size: 0x12345678,
},
proposer_uuid: {
let mut arr = timeline_id.as_arr();
arr.reverse();
arr
},
commit_lsn: Lsn(1234567800),
truncate_lsn: Lsn(123456780),
wal_start_lsn: Lsn(1234567800 - 8),
};
let ser = state.ser().unwrap();
let expected = [
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56,
0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x66, 0x30, 0x34,
0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37,
0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36,
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31, 0x32, 0x64, 0x65, 0x64,
0x36, 0x36, 0x34, 0x32, 0x32, 0x61, 0x61, 0x35, 0x65, 0x39, 0x35, 0x33, 0x65, 0x35,
0x34, 0x34, 0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34, 0x78, 0x56,
0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x34, 0x37, 0x61,
0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39,
0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31,
0x78, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xcd, 0x5b, 0x07, 0x00, 0x00,
0x00, 0x00, 0x70, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
];
assert_eq!(Hex(&ser), Hex(&expected));
let deser = SafeKeeperStateV3::des(&ser).unwrap();
assert_eq!(state, deser);
}
#[test]
fn roundtrip_v4() {
let tenant_id = TenantId::from_str("cf0480929707ee75372337efaa5ecf96").unwrap();
let timeline_id = TimelineId::from_str("112ded66422aa5e953e5440fa5427ac4").unwrap();
let state = SafeKeeperStateV4 {
tenant_id,
timeline_id,
acceptor_state: AcceptorState {
term: 42,
term_history: TermHistory(vec![TermLsn {
lsn: Lsn(0x1),
term: 41,
}]),
},
server: ServerInfo {
pg_version: PgVersionId::from(PgMajorVersion::PG14),
system_id: 0x1234567887654321,
wal_seg_size: 0x12345678,
},
proposer_uuid: {
let mut arr = timeline_id.as_arr();
arr.reverse();
arr
},
peers: PersistedPeers(vec![(
NodeId(1),
PersistedPeerInfo {
backup_lsn: Lsn(1234567000),
term: 42,
flush_lsn: Lsn(1234567800 - 8),
commit_lsn: Lsn(1234567600),
},
)]),
commit_lsn: Lsn(1234567800),
s3_wal_lsn: Lsn(1234567300),
peer_horizon_lsn: Lsn(9999999),
remote_consistent_lsn: Lsn(1234560000),
};
let ser = state.ser().unwrap();
let expected = [
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x66, 0x30, 0x34, 0x38, 0x30,
0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37, 0x32, 0x33,
0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36, 0x20, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31, 0x32, 0x64, 0x65, 0x64, 0x36, 0x36,
0x34, 0x32, 0x32, 0x61, 0x61, 0x35, 0x65, 0x39, 0x35, 0x33, 0x65, 0x35, 0x34, 0x34,
0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34, 0x2a, 0x00, 0x00, 0x00,
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/wal_storage.rs | safekeeper/src/wal_storage.rs | //! This module has everything to deal with WAL -- reading and writing to disk.
//!
//! Safekeeper WAL is stored in the timeline directory, in format similar to pg_wal.
//! PG timeline is always 1, so WAL segments are usually have names like this:
//! - 000000010000000000000001
//! - 000000010000000000000002.partial
//!
//! Note that last file has `.partial` suffix, that's different from postgres.
use std::cmp::{max, min};
use std::future::Future;
use std::io::{ErrorKind, SeekFrom};
use std::pin::Pin;
use anyhow::{Context, Result, bail};
use bytes::Bytes;
use camino::{Utf8Path, Utf8PathBuf};
use futures::future::BoxFuture;
use postgres_ffi::v14::xlog_utils::{IsPartialXLogFileName, IsXLogFileName, XLogFromFileName};
use postgres_ffi::waldecoder::WalStreamDecoder;
use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo, dispatch_pgversion};
use postgres_versioninfo::{PgMajorVersion, PgVersionId};
use pq_proto::SystemId;
use remote_storage::RemotePath;
use std::sync::Arc;
use tokio::fs::{self, File, OpenOptions, remove_file};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
use tracing::*;
use utils::crashsafe::durable_rename;
use utils::id::TenantTimelineId;
use utils::lsn::Lsn;
use crate::metrics::{
REMOVED_WAL_SEGMENTS, WAL_DISK_IO_ERRORS, WAL_STORAGE_OPERATION_SECONDS, WalStorageMetrics,
time_io_closure,
};
use crate::state::TimelinePersistentState;
use crate::wal_backup::{WalBackup, read_object, remote_timeline_path};
pub trait Storage {
// Last written LSN.
fn write_lsn(&self) -> Lsn;
/// LSN of last durably stored WAL record.
fn flush_lsn(&self) -> Lsn;
/// Initialize segment by creating proper long header at the beginning of
/// the segment and short header at the page of given LSN. This is only used
/// for timeline initialization because compute will stream data only since
/// init_lsn. Other segment headers are included in compute stream.
fn initialize_first_segment(
&mut self,
init_lsn: Lsn,
) -> impl Future<Output = Result<()>> + Send;
/// Write piece of WAL from buf to disk, but not necessarily sync it.
fn write_wal(&mut self, startpos: Lsn, buf: &[u8]) -> impl Future<Output = Result<()>> + Send;
/// Truncate WAL at specified LSN, which must be the end of WAL record.
fn truncate_wal(&mut self, end_pos: Lsn) -> impl Future<Output = Result<()>> + Send;
/// Durably store WAL on disk, up to the last written WAL record.
fn flush_wal(&mut self) -> impl Future<Output = Result<()>> + Send;
/// Remove all segments <= given segno. Returns function doing that as we
/// want to perform it without timeline lock.
fn remove_up_to(&self, segno_up_to: XLogSegNo) -> BoxFuture<'static, anyhow::Result<()>>;
/// Release resources associated with the storage -- technically, close FDs.
/// Currently we don't remove timelines until restart (#3146), so need to
/// spare descriptors. This would be useful for temporary tli detach as
/// well.
fn close(&mut self) {}
/// Get metrics for this timeline.
fn get_metrics(&self) -> WalStorageMetrics;
}
/// PhysicalStorage is a storage that stores WAL on disk. Writes are separated from flushes
/// for better performance. Storage is initialized in the constructor.
///
/// WAL is stored in segments, each segment is a file. Last segment has ".partial" suffix in
/// its filename and may be not fully flushed.
///
/// Relationship of LSNs:
/// `write_lsn` >= `write_record_lsn` >= `flush_record_lsn`
///
/// When storage is created first time, all LSNs are zeroes and there are no segments on disk.
pub struct PhysicalStorage {
metrics: WalStorageMetrics,
timeline_dir: Utf8PathBuf,
/// Disables fsync if true.
no_sync: bool,
/// Size of WAL segment in bytes.
wal_seg_size: usize,
pg_version: PgVersionId,
system_id: u64,
/// Written to disk, but possibly still in the cache and not fully persisted.
/// Also can be ahead of record_lsn, if happen to be in the middle of a WAL record.
write_lsn: Lsn,
/// The LSN of the last WAL record written to disk. Still can be not fully
/// flushed.
///
/// Note: Normally it (and flush_record_lsn) is <= write_lsn, but after xlog
/// switch ingest the reverse is true because we don't bump write_lsn up to
/// the next segment: WAL stream from the compute doesn't have the gap and
/// for simplicity / as a sanity check we disallow any non-sequential
/// writes, so write zeros as is.
///
/// Similar effect is in theory possible due to LSN alignment: if record
/// ends at *2, decoder will report end lsn as *8 even though we haven't
/// written these zeros yet. In practice compute likely never sends
/// non-aligned chunks of data.
write_record_lsn: Lsn,
/// The last LSN flushed to disk. May be in the middle of a record.
///
/// NB: when the rest of the system refers to `flush_lsn`, it usually
/// actually refers to `flush_record_lsn`. This ambiguity can be dangerous
/// and should be resolved.
flush_lsn: Lsn,
/// The LSN of the last WAL record flushed to disk.
flush_record_lsn: Lsn,
/// Decoder is required for detecting boundaries of WAL records.
decoder: WalStreamDecoder,
/// Cached open file for the last segment.
///
/// If Some(file) is open, then it always:
/// - has ".partial" suffix
/// - points to write_lsn, so no seek is needed for writing
/// - doesn't point to the end of the segment
file: Option<File>,
/// When true, WAL truncation potentially has been interrupted and we need
/// to finish it before allowing WAL writes; see truncate_wal for details.
/// In this case [`write_lsn`] can be less than actually written WAL on
/// disk. In particular, there can be a case with unexpected .partial file.
///
/// Imagine the following:
/// - 000000010000000000000001
/// - it was fully written, but the last record is split between 2
/// segments
/// - after restart, `find_end_of_wal()` returned 0/1FFFFF0, which is in
/// the end of this segment
/// - `write_lsn`, `write_record_lsn` and `flush_record_lsn` were
/// initialized to 0/1FFFFF0
/// - 000000010000000000000002.partial
/// - it has only 1 byte written, which is not enough to make a full WAL
/// record
///
/// Partial segment 002 has no WAL records, and it will be removed by the
/// next truncate_wal(). This flag will be set to false after the first
/// successful truncate_wal() call.
///
/// [`write_lsn`]: Self::write_lsn
pending_wal_truncation: bool,
}
impl PhysicalStorage {
/// Create new storage. If commit_lsn is not zero, flush_lsn is tried to be restored from
/// the disk. Otherwise, all LSNs are set to zero.
pub fn new(
ttid: &TenantTimelineId,
timeline_dir: &Utf8Path,
state: &TimelinePersistentState,
no_sync: bool,
) -> Result<PhysicalStorage> {
let wal_seg_size = state.server.wal_seg_size as usize;
// Find out where stored WAL ends, starting at commit_lsn which is a
// known recent record boundary (unless we don't have WAL at all).
//
// NB: find_end_of_wal MUST be backwards compatible with the previously
// written WAL. If find_end_of_wal fails to read any WAL written by an
// older version of the code, we could lose data forever.
let write_lsn = if state.commit_lsn == Lsn(0) {
Lsn(0)
} else {
let version = PgMajorVersion::try_from(state.server.pg_version).unwrap();
dispatch_pgversion!(
version,
pgv::xlog_utils::find_end_of_wal(
timeline_dir.as_std_path(),
wal_seg_size,
state.commit_lsn,
)?,
bail!("unsupported postgres version: {}", version)
)
};
// note: this assumes we fsync'ed whole datadir on start.
let flush_lsn = write_lsn;
debug!(
"initialized storage for timeline {}, flush_lsn={}, commit_lsn={}, peer_horizon_lsn={}",
ttid.timeline_id, flush_lsn, state.commit_lsn, state.peer_horizon_lsn,
);
if flush_lsn < state.commit_lsn {
// note: can never happen. find_end_of_wal returns provided start_lsn
// (state.commit_lsn in our case) if it doesn't find anything.
bail!(
"timeline {} potential data loss: flush_lsn {} by find_end_of_wal is less than commit_lsn {} from control file",
ttid.timeline_id,
flush_lsn,
state.commit_lsn
);
}
if flush_lsn < state.peer_horizon_lsn {
warn!(
"timeline {}: flush_lsn {} is less than cfile peer_horizon_lsn {}",
ttid.timeline_id, flush_lsn, state.peer_horizon_lsn
);
}
Ok(PhysicalStorage {
metrics: WalStorageMetrics::default(),
timeline_dir: timeline_dir.to_path_buf(),
no_sync,
wal_seg_size,
pg_version: state.server.pg_version,
system_id: state.server.system_id,
write_lsn,
write_record_lsn: write_lsn,
flush_lsn,
flush_record_lsn: flush_lsn,
decoder: WalStreamDecoder::new(
write_lsn,
PgMajorVersion::try_from(state.server.pg_version).unwrap(),
),
file: None,
pending_wal_truncation: true,
})
}
/// Get all known state of the storage.
pub fn internal_state(&self) -> (Lsn, Lsn, Lsn, bool) {
(
self.write_lsn,
self.write_record_lsn,
self.flush_record_lsn,
self.file.is_some(),
)
}
/// Call fsync if config requires so.
async fn fsync_file(&mut self, file: &File) -> Result<()> {
if !self.no_sync {
self.metrics
.observe_flush_seconds(time_io_closure(file.sync_all()).await?);
}
Ok(())
}
/// Call fdatasync if config requires so.
async fn fdatasync_file(&mut self, file: &File) -> Result<()> {
if !self.no_sync {
self.metrics
.observe_flush_seconds(time_io_closure(file.sync_data()).await?);
}
Ok(())
}
/// Open or create WAL segment file. Caller must call seek to the wanted position.
/// Returns `file` and `is_partial`.
async fn open_or_create(&mut self, segno: XLogSegNo) -> Result<(File, bool)> {
let (wal_file_path, wal_file_partial_path) =
wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size);
// Try to open already completed segment
if let Ok(file) = OpenOptions::new().write(true).open(&wal_file_path).await {
Ok((file, false))
} else if let Ok(file) = OpenOptions::new()
.write(true)
.open(&wal_file_partial_path)
.await
{
// Try to open existing partial file
Ok((file, true))
} else {
let _timer = WAL_STORAGE_OPERATION_SECONDS
.with_label_values(&["initialize_segment"])
.start_timer();
// Create and fill new partial file
//
// We're using fdatasync during WAL writing, so file size must not
// change; to this end it is filled with zeros here. To avoid using
// half initialized segment, first bake it under tmp filename and
// then rename.
let tmp_path = self.timeline_dir.join("waltmp");
let file: File = File::create(&tmp_path).await.with_context(|| {
/* BEGIN_HADRON */
WAL_DISK_IO_ERRORS.inc();
/* END_HADRON */
format!("Failed to open tmp wal file {:?}", &tmp_path)
})?;
fail::fail_point!("sk-zero-segment", |_| {
info!("sk-zero-segment failpoint hit");
Err(anyhow::anyhow!("failpoint: sk-zero-segment"))
});
file.set_len(self.wal_seg_size as u64).await?;
if let Err(e) = durable_rename(&tmp_path, &wal_file_partial_path, !self.no_sync).await {
// Probably rename succeeded, but fsync of it failed. Remove
// the file then to avoid using it.
remove_file(wal_file_partial_path)
.await
.or_else(utils::fs_ext::ignore_not_found)?;
return Err(e.into());
}
Ok((file, true))
}
}
/// Write WAL bytes, which are known to be located in a single WAL segment. Returns true if the
/// segment was completed, closed, and flushed to disk.
async fn write_in_segment(&mut self, segno: u64, xlogoff: usize, buf: &[u8]) -> Result<bool> {
let mut file = if let Some(file) = self.file.take() {
file
} else {
let (mut file, is_partial) = self.open_or_create(segno).await?;
assert!(is_partial, "unexpected write into non-partial segment file");
file.seek(SeekFrom::Start(xlogoff as u64)).await?;
file
};
file.write_all(buf).await?;
// Note: flush just ensures write above reaches the OS (this is not
// needed in case of sync IO as Write::write there calls directly write
// syscall, but needed in case of async). It does *not* fsyncs the file.
file.flush().await?;
if xlogoff + buf.len() == self.wal_seg_size {
// If we reached the end of a WAL segment, flush and close it.
self.fdatasync_file(&file).await?;
// Rename partial file to completed file
let (wal_file_path, wal_file_partial_path) =
wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size);
fs::rename(wal_file_partial_path, wal_file_path).await?;
Ok(true)
} else {
// otherwise, file can be reused later
self.file = Some(file);
Ok(false)
}
}
/// Writes WAL to the segment files, until everything is writed. If some segments
/// are fully written, they are flushed to disk. The last (partial) segment can
/// be flushed separately later.
///
/// Updates `write_lsn` and `flush_lsn`.
async fn write_exact(&mut self, pos: Lsn, mut buf: &[u8]) -> Result<()> {
// TODO: this shouldn't be possible, except possibly with write_lsn == 0.
// Rename this method to `append_exact`, and make it append-only, removing
// the `pos` parameter and this check. For this reason, we don't update
// `flush_lsn` here.
if self.write_lsn != pos {
// need to flush the file before discarding it
if let Some(file) = self.file.take() {
self.fdatasync_file(&file).await?;
}
self.write_lsn = pos;
}
while !buf.is_empty() {
// Extract WAL location for this block
let xlogoff = self.write_lsn.segment_offset(self.wal_seg_size);
let segno = self.write_lsn.segment_number(self.wal_seg_size);
// If crossing a WAL boundary, only write up until we reach wal segment size.
let bytes_write = if xlogoff + buf.len() > self.wal_seg_size {
self.wal_seg_size - xlogoff
} else {
buf.len()
};
let flushed = self
.write_in_segment(segno, xlogoff, &buf[..bytes_write])
.await
/* BEGIN_HADRON */
.inspect_err(|_| WAL_DISK_IO_ERRORS.inc())?;
/* END_HADRON */
self.write_lsn += bytes_write as u64;
if flushed {
self.flush_lsn = self.write_lsn;
}
buf = &buf[bytes_write..];
}
Ok(())
}
}
impl Storage for PhysicalStorage {
// Last written LSN.
fn write_lsn(&self) -> Lsn {
self.write_lsn
}
/// flush_lsn returns LSN of last durably stored WAL record.
///
/// TODO: flush_lsn() returns flush_record_lsn, but write_lsn() returns write_lsn: confusing.
#[allow(clippy::misnamed_getters)]
fn flush_lsn(&self) -> Lsn {
self.flush_record_lsn
}
async fn initialize_first_segment(&mut self, init_lsn: Lsn) -> Result<()> {
let _timer = WAL_STORAGE_OPERATION_SECONDS
.with_label_values(&["initialize_first_segment"])
.start_timer();
let segno = init_lsn.segment_number(self.wal_seg_size);
let (mut file, _) = self.open_or_create(segno).await?;
let major_pg_version = PgMajorVersion::try_from(self.pg_version).unwrap();
let wal_seg =
postgres_ffi::generate_wal_segment(segno, self.system_id, major_pg_version, init_lsn)?;
file.seek(SeekFrom::Start(0)).await?;
file.write_all(&wal_seg).await?;
file.flush().await?;
info!("initialized segno {} at lsn {}", segno, init_lsn);
// note: file is *not* fsynced
Ok(())
}
/// Write WAL to disk.
async fn write_wal(&mut self, startpos: Lsn, buf: &[u8]) -> Result<()> {
// Disallow any non-sequential writes, which can result in gaps or overwrites.
// If we need to move the pointer, use truncate_wal() instead.
if self.write_lsn > startpos {
bail!(
"write_wal rewrites WAL written before, write_lsn={}, startpos={}",
self.write_lsn,
startpos
);
}
if self.write_lsn < startpos && self.write_lsn != Lsn(0) {
bail!(
"write_wal creates gap in written WAL, write_lsn={}, startpos={}",
self.write_lsn,
startpos
);
}
if self.pending_wal_truncation {
bail!(
"write_wal called with pending WAL truncation, write_lsn={}, startpos={}",
self.write_lsn,
startpos
);
}
let write_seconds = time_io_closure(self.write_exact(startpos, buf)).await?;
// WAL is written, updating write metrics
self.metrics.observe_write_seconds(write_seconds);
self.metrics.observe_write_bytes(buf.len());
// Figure out the last record's end LSN and update `write_record_lsn`
// (if we got a whole record). The write may also have closed and
// flushed a segment, so update `flush_record_lsn` as well.
if self.decoder.available() != startpos {
info!(
"restart decoder from {} to {}",
self.decoder.available(),
startpos,
);
let pg_version = self.decoder.pg_version;
self.decoder = WalStreamDecoder::new(startpos, pg_version);
}
self.decoder.feed_bytes(buf);
if self.write_record_lsn <= self.flush_lsn {
// We may have flushed a previously written record.
self.flush_record_lsn = self.write_record_lsn;
}
while let Some((lsn, _rec)) = self.decoder.poll_decode()? {
self.write_record_lsn = lsn;
if lsn <= self.flush_lsn {
self.flush_record_lsn = lsn;
}
}
Ok(())
}
async fn flush_wal(&mut self) -> Result<()> {
if self.flush_record_lsn == self.write_record_lsn {
// no need to do extra flush
return Ok(());
}
if let Some(unflushed_file) = self.file.take() {
self.fdatasync_file(&unflushed_file)
.await
/* BEGIN_HADRON */
.inspect_err(|_| WAL_DISK_IO_ERRORS.inc())?;
/* END_HADRON */
self.file = Some(unflushed_file);
} else {
// We have unflushed data (write_lsn != flush_lsn), but no file. This
// shouldn't happen, since the segment is flushed on close.
bail!(
"unexpected unflushed data with no open file, write_lsn={}, flush_lsn={}",
self.write_lsn,
self.flush_record_lsn
);
}
// everything is flushed now, let's update flush_lsn
self.flush_lsn = self.write_lsn;
self.flush_record_lsn = self.write_record_lsn;
Ok(())
}
/// Truncate written WAL by removing all WAL segments after the given LSN.
/// end_pos must point to the end of the WAL record.
async fn truncate_wal(&mut self, end_pos: Lsn) -> Result<()> {
let _timer = WAL_STORAGE_OPERATION_SECONDS
.with_label_values(&["truncate_wal"])
.start_timer();
// Streaming must not create a hole, so truncate cannot be called on
// non-written lsn.
if self.write_record_lsn != Lsn(0) && end_pos > self.write_record_lsn {
bail!(
"truncate_wal called on non-written WAL, write_record_lsn={}, end_pos={}",
self.write_record_lsn,
end_pos
);
}
// Quick exit if nothing to do and we know that the state is clean to
// avoid writing up to 16 MiB of zeros on disk (this happens on each
// connect).
if !self.pending_wal_truncation
&& end_pos == self.write_lsn
&& end_pos == self.flush_record_lsn
{
return Ok(());
}
// Atomicity: we start with LSNs reset because once on disk deletion is
// started it can't be reversed. However, we might crash/error in the
// middle, leaving garbage above the truncation point. In theory,
// concatenated with previous records it might form bogus WAL (though
// very unlikely in practice because CRC would guard from that). To
// protect, set pending_wal_truncation flag before beginning: it means
// truncation must be retried and WAL writes are prohibited until it
// succeeds. Flag is also set on boot because we don't know if the last
// state was clean.
//
// Protocol (HandleElected before first AppendRequest) ensures we'll
// always try to ensure clean truncation before any writes.
self.pending_wal_truncation = true;
self.write_lsn = end_pos;
self.flush_lsn = end_pos;
self.write_record_lsn = end_pos;
self.flush_record_lsn = end_pos;
// Close previously opened file, if any
if let Some(unflushed_file) = self.file.take() {
self.fdatasync_file(&unflushed_file).await?;
}
let xlogoff = end_pos.segment_offset(self.wal_seg_size);
let segno = end_pos.segment_number(self.wal_seg_size);
// Remove all segments after the given LSN.
remove_segments_from_disk(&self.timeline_dir, self.wal_seg_size, |x| x > segno).await?;
let (file, is_partial) = self.open_or_create(segno).await?;
// Fill end with zeroes
file.set_len(xlogoff as u64).await?;
file.set_len(self.wal_seg_size as u64).await?;
self.fsync_file(&file).await?;
if !is_partial {
// Make segment partial once again
let (wal_file_path, wal_file_partial_path) =
wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size);
fs::rename(wal_file_path, wal_file_partial_path).await?;
}
self.pending_wal_truncation = false;
info!("truncated WAL to {}", end_pos);
Ok(())
}
fn remove_up_to(&self, segno_up_to: XLogSegNo) -> BoxFuture<'static, anyhow::Result<()>> {
let timeline_dir = self.timeline_dir.clone();
let wal_seg_size = self.wal_seg_size;
Box::pin(async move {
remove_segments_from_disk(&timeline_dir, wal_seg_size, |x| x <= segno_up_to).await
})
}
fn close(&mut self) {
// close happens in destructor
let _open_file = self.file.take();
}
fn get_metrics(&self) -> WalStorageMetrics {
self.metrics.clone()
}
}
/// Remove all WAL segments in timeline_dir that match the given predicate.
async fn remove_segments_from_disk(
timeline_dir: &Utf8Path,
wal_seg_size: usize,
remove_predicate: impl Fn(XLogSegNo) -> bool,
) -> Result<()> {
let _timer = WAL_STORAGE_OPERATION_SECONDS
.with_label_values(&["remove_segments_from_disk"])
.start_timer();
let mut n_removed = 0;
let mut min_removed = u64::MAX;
let mut max_removed = u64::MIN;
let mut entries = fs::read_dir(timeline_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let entry_path = entry.path();
let fname = entry_path.file_name().unwrap();
/* Ignore files that are not XLOG segments */
if !IsXLogFileName(fname) && !IsPartialXLogFileName(fname) {
continue;
}
let (segno, _) = XLogFromFileName(fname, wal_seg_size)?;
if remove_predicate(segno) {
remove_file(entry_path).await?;
n_removed += 1;
min_removed = min(min_removed, segno);
max_removed = max(max_removed, segno);
REMOVED_WAL_SEGMENTS.inc();
}
}
if n_removed > 0 {
info!(
"removed {} WAL segments [{}; {}]",
n_removed, min_removed, max_removed
);
}
Ok(())
}
pub struct WalReader {
remote_path: RemotePath,
timeline_dir: Utf8PathBuf,
wal_seg_size: usize,
pos: Lsn,
wal_segment: Option<Pin<Box<dyn AsyncRead + Send + Sync>>>,
// S3 will be used to read WAL if LSN is not available locally
wal_backup: Arc<WalBackup>,
// We don't have WAL locally if LSN is less than local_start_lsn
local_start_lsn: Lsn,
// We will respond with zero-ed bytes before this Lsn as long as
// pos is in the same segment as timeline_start_lsn.
timeline_start_lsn: Lsn,
// integer version number of PostgreSQL, e.g. 14; 15; 16
pg_version: PgMajorVersion,
system_id: SystemId,
timeline_start_segment: Option<Bytes>,
}
impl WalReader {
pub fn new(
ttid: &TenantTimelineId,
timeline_dir: Utf8PathBuf,
state: &TimelinePersistentState,
start_pos: Lsn,
wal_backup: Arc<WalBackup>,
) -> Result<Self> {
if state.server.wal_seg_size == 0 || state.local_start_lsn == Lsn(0) {
bail!("state uninitialized, no data to read");
}
// TODO: Upgrade to bail!() once we know this couldn't possibly happen
if state.timeline_start_lsn == Lsn(0) {
warn!("timeline_start_lsn uninitialized before initializing wal reader");
}
if start_pos
< state
.timeline_start_lsn
.segment_lsn(state.server.wal_seg_size as usize)
{
bail!(
"Requested streaming from {}, which is before the start of the timeline {}, and also doesn't start at the first segment of that timeline",
start_pos,
state.timeline_start_lsn
);
}
Ok(Self {
remote_path: remote_timeline_path(ttid)?,
timeline_dir,
wal_seg_size: state.server.wal_seg_size as usize,
pos: start_pos,
wal_segment: None,
wal_backup,
local_start_lsn: state.local_start_lsn,
timeline_start_lsn: state.timeline_start_lsn,
pg_version: PgMajorVersion::try_from(state.server.pg_version).unwrap(),
system_id: state.server.system_id,
timeline_start_segment: None,
})
}
/// Read WAL at current position into provided buf, returns number of bytes
/// read. It can be smaller than buf size only if segment boundary is
/// reached.
pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
// If this timeline is new, we may not have a full segment yet, so
// we pad the first bytes of the timeline's first WAL segment with 0s
if self.pos < self.timeline_start_lsn {
debug_assert_eq!(
self.pos.segment_number(self.wal_seg_size),
self.timeline_start_lsn.segment_number(self.wal_seg_size)
);
// All bytes after timeline_start_lsn are in WAL, but those before
// are not, so we manually construct an empty segment for the bytes
// not available in this timeline.
if self.timeline_start_segment.is_none() {
let it = postgres_ffi::generate_wal_segment(
self.timeline_start_lsn.segment_number(self.wal_seg_size),
self.system_id,
self.pg_version,
self.timeline_start_lsn,
)?;
self.timeline_start_segment = Some(it);
}
assert!(self.timeline_start_segment.is_some());
let segment = self.timeline_start_segment.take().unwrap();
let seg_bytes = &segment[..];
// How much of the current segment have we already consumed?
let pos_seg_offset = self.pos.segment_offset(self.wal_seg_size);
// How many bytes may we consume in total?
let tl_start_seg_offset = self.timeline_start_lsn.segment_offset(self.wal_seg_size);
debug_assert!(seg_bytes.len() > pos_seg_offset);
debug_assert!(seg_bytes.len() > tl_start_seg_offset);
// Copy as many bytes as possible into the buffer
let len = (tl_start_seg_offset - pos_seg_offset).min(buf.len());
buf[0..len].copy_from_slice(&seg_bytes[pos_seg_offset..pos_seg_offset + len]);
self.pos += len as u64;
// If we're done with the segment, we can release it's memory.
// However, if we're not yet done, store it so that we don't have to
// construct the segment the next time this function is called.
if self.pos < self.timeline_start_lsn {
self.timeline_start_segment = Some(segment);
}
return Ok(len);
}
let mut wal_segment = match self.wal_segment.take() {
Some(reader) => reader,
None => self.open_segment().await?,
};
// How much to read and send in message? We cannot cross the WAL file
// boundary, and we don't want send more than provided buffer.
let xlogoff = self.pos.segment_offset(self.wal_seg_size);
let send_size = min(buf.len(), self.wal_seg_size - xlogoff);
// Read some data from the file.
let buf = &mut buf[0..send_size];
let send_size = wal_segment.read_exact(buf).await?;
self.pos += send_size as u64;
// Decide whether to reuse this file. If we don't set wal_segment here
// a new reader will be opened next time.
if self.pos.segment_offset(self.wal_seg_size) != 0 {
self.wal_segment = Some(wal_segment);
}
Ok(send_size)
}
/// Open WAL segment at the current position of the reader.
async fn open_segment(&self) -> Result<Pin<Box<dyn AsyncRead + Send + Sync>>> {
let xlogoff = self.pos.segment_offset(self.wal_seg_size);
let segno = self.pos.segment_number(self.wal_seg_size);
let wal_file_name = XLogFileName(PG_TLI, segno, self.wal_seg_size);
// Try to open local file, if we may have WAL locally
if self.pos >= self.local_start_lsn {
let res = open_wal_file(&self.timeline_dir, segno, self.wal_seg_size).await?;
if let Some((mut file, _)) = res {
file.seek(SeekFrom::Start(xlogoff as u64)).await?;
return Ok(Box::pin(file));
} else {
// NotFound is expected, fall through to remote read
}
}
// Try to open remote file, if remote reads are enabled
if let Some(storage) = self.wal_backup.get_storage() {
let remote_wal_file_path = self.remote_path.join(&wal_file_name);
return read_object(&storage, &remote_wal_file_path, xlogoff as u64).await;
}
bail!("WAL segment is not found")
}
}
/// Helper function for opening WAL segment `segno` in `dir`. Returns file and
/// whether it is .partial.
pub(crate) async fn open_wal_file(
timeline_dir: &Utf8Path,
segno: XLogSegNo,
wal_seg_size: usize,
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/patch_control_file.rs | safekeeper/src/patch_control_file.rs | use std::sync::Arc;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tracing::info;
use crate::state::TimelinePersistentState;
use crate::timeline::Timeline;
#[derive(Deserialize, Debug, Clone)]
pub struct Request {
/// JSON object with fields to update
pub updates: serde_json::Value,
/// List of fields to apply
pub apply_fields: Vec<String>,
}
#[derive(Serialize)]
pub struct Response {
pub old_control_file: TimelinePersistentState,
pub new_control_file: TimelinePersistentState,
}
/// Patch control file with given request. Will update the persistent state using
/// fields from the request and persist the new state on disk.
pub async fn handle_request(tli: Arc<Timeline>, request: Request) -> anyhow::Result<Response> {
let response = tli
.map_control_file(|state| {
let old_control_file = state.clone();
let new_control_file = state_apply_diff(&old_control_file, &request)?;
info!(
"patching control file, old: {:?}, new: {:?}, patch: {:?}",
old_control_file, new_control_file, request
);
*state = new_control_file.clone();
Ok(Response {
old_control_file,
new_control_file,
})
})
.await?;
Ok(response)
}
fn state_apply_diff(
state: &TimelinePersistentState,
request: &Request,
) -> anyhow::Result<TimelinePersistentState> {
let mut json_value = serde_json::to_value(state)?;
if let Value::Object(a) = &mut json_value {
if let Value::Object(b) = &request.updates {
json_apply_diff(a, b, &request.apply_fields)?;
} else {
anyhow::bail!("request.updates is not a json object")
}
} else {
anyhow::bail!("TimelinePersistentState is not a json object")
}
let new_state: TimelinePersistentState = serde_json::from_value(json_value)?;
Ok(new_state)
}
fn json_apply_diff(
object: &mut serde_json::Map<String, Value>,
updates: &serde_json::Map<String, Value>,
apply_keys: &Vec<String>,
) -> anyhow::Result<()> {
for key in apply_keys {
if let Some(new_value) = updates.get(key) {
if let Some(existing_value) = object.get_mut(key) {
*existing_value = new_value.clone();
} else {
anyhow::bail!("key not found in original object: {}", key);
}
} else {
anyhow::bail!("key not found in request.updates: {}", key);
}
}
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/wal_backup_partial.rs | safekeeper/src/wal_backup_partial.rs | //! Safekeeper timeline has a background task which is subscribed to `commit_lsn`
//! and `flush_lsn` updates.
//!
//! After the partial segment was updated (`flush_lsn` was changed), the segment
//! will be uploaded to S3 within the configured `partial_backup_timeout`.
//!
//! The filename format for partial segments is
//! `Segment_Term_Flush_Commit_skNN.partial`, where:
//! - `Segment` – the segment name, like `000000010000000000000001`
//! - `Term` – current term
//! - `Flush` – flush_lsn in hex format `{:016X}`, e.g. `00000000346BC568`
//! - `Commit` – commit_lsn in the same hex format
//! - `NN` – safekeeper_id, like `1`
//!
//! The full object name example:
//! `000000010000000000000002_2_0000000002534868_0000000002534410_sk1.partial`
//!
//! Each safekeeper will keep info about remote partial segments in its control
//! file. Code updates state in the control file before doing any S3 operations.
//! This way control file stores information about all potentially existing
//! remote partial segments and can clean them up after uploading a newer version.
use std::sync::Arc;
use camino::Utf8PathBuf;
use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo};
use remote_storage::{GenericRemoteStorage, RemotePath};
use safekeeper_api::Term;
use serde::{Deserialize, Serialize};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, warn};
use utils::id::NodeId;
use utils::lsn::Lsn;
use crate::SafeKeeperConf;
use crate::metrics::{
MISC_OPERATION_SECONDS, PARTIAL_BACKUP_UPLOADED_BYTES, PARTIAL_BACKUP_UPLOADS,
};
use crate::rate_limit::{RateLimiter, rand_duration};
use crate::timeline::WalResidentTimeline;
use crate::timeline_manager::StateSnapshot;
use crate::wal_backup::{self};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum UploadStatus {
/// Upload is in progress. This status should be used only for garbage collection,
/// don't read data from the remote storage with this status.
InProgress,
/// Upload is finished. There is always at most one segment with this status.
/// It means that the segment is actual and can be used.
Uploaded,
/// Deletion is in progress. This status should be used only for garbage collection,
/// don't read data from the remote storage with this status.
Deleting,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PartialRemoteSegment {
pub status: UploadStatus,
pub name: String,
pub commit_lsn: Lsn,
pub flush_lsn: Lsn,
// We should use last_log_term here, otherwise it's possible to have inconsistent data in the
// remote storage.
//
// More info here: https://github.com/neondatabase/neon/pull/8022#discussion_r1654738405
pub term: Term,
}
impl PartialRemoteSegment {
fn eq_without_status(&self, other: &Self) -> bool {
self.name == other.name
&& self.commit_lsn == other.commit_lsn
&& self.flush_lsn == other.flush_lsn
&& self.term == other.term
}
pub(crate) fn remote_path(&self, remote_timeline_path: &RemotePath) -> RemotePath {
remote_timeline_path.join(&self.name)
}
}
// NB: these structures are a part of a control_file, you can't change them without
// changing the control file format version.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct State {
pub segments: Vec<PartialRemoteSegment>,
}
#[derive(Debug)]
pub(crate) struct ReplaceUploadedSegment {
pub(crate) previous: PartialRemoteSegment,
pub(crate) current: PartialRemoteSegment,
}
impl State {
/// Find an Uploaded segment. There should be only one Uploaded segment at a time.
pub(crate) fn uploaded_segment(&self) -> Option<PartialRemoteSegment> {
self.segments
.iter()
.find(|seg| seg.status == UploadStatus::Uploaded)
.cloned()
}
/// Replace the name of the Uploaded segment (if one exists) in order to match
/// it with `destination` safekeeper. Returns a description of the change or None
/// wrapped in anyhow::Result.
pub(crate) fn replace_uploaded_segment(
&mut self,
source: NodeId,
destination: NodeId,
) -> anyhow::Result<Option<ReplaceUploadedSegment>> {
let current = self
.segments
.iter_mut()
.find(|seg| seg.status == UploadStatus::Uploaded);
let current = match current {
Some(some) => some,
None => {
return anyhow::Ok(None);
}
};
// Sanity check that the partial segment we are replacing is belongs
// to the `source` SK.
if !current
.name
.ends_with(format!("sk{}.partial", source.0).as_str())
{
anyhow::bail!(
"Partial segment name ({}) doesn't match self node id ({})",
current.name,
source
);
}
let previous = current.clone();
let new_name = current.name.replace(
format!("_sk{}", source.0).as_str(),
format!("_sk{}", destination.0).as_str(),
);
current.name = new_name;
anyhow::Ok(Some(ReplaceUploadedSegment {
previous,
current: current.clone(),
}))
}
}
pub struct PartialBackup {
wal_seg_size: usize,
tli: WalResidentTimeline,
conf: SafeKeeperConf,
local_prefix: Utf8PathBuf,
remote_timeline_path: RemotePath,
storage: Arc<GenericRemoteStorage>,
state: State,
}
impl PartialBackup {
pub async fn new(
tli: WalResidentTimeline,
conf: SafeKeeperConf,
storage: Arc<GenericRemoteStorage>,
) -> PartialBackup {
let (_, persistent_state) = tli.get_state().await;
let wal_seg_size = tli.get_wal_seg_size().await;
let local_prefix = tli.get_timeline_dir();
let remote_timeline_path = tli.remote_path.clone();
PartialBackup {
wal_seg_size,
tli,
state: persistent_state.partial_backup,
conf,
local_prefix,
remote_timeline_path,
storage,
}
}
// Read-only methods for getting segment names
fn segno(&self, lsn: Lsn) -> XLogSegNo {
lsn.segment_number(self.wal_seg_size)
}
fn segment_name(&self, segno: u64) -> String {
XLogFileName(PG_TLI, segno, self.wal_seg_size)
}
fn remote_segment_name(
&self,
segno: u64,
term: u64,
commit_lsn: Lsn,
flush_lsn: Lsn,
) -> String {
format!(
"{}_{}_{:016X}_{:016X}_sk{}.partial",
self.segment_name(segno),
term,
flush_lsn.0,
commit_lsn.0,
self.conf.my_id.0,
)
}
fn local_segment_name(&self, segno: u64) -> String {
format!("{}.partial", self.segment_name(segno))
}
}
impl PartialBackup {
/// Takes a lock to read actual safekeeper state and returns a segment that should be uploaded.
async fn prepare_upload(&self) -> PartialRemoteSegment {
// this operation takes a lock to get the actual state
let sk_info = self.tli.get_safekeeper_info(&self.conf).await;
let flush_lsn = Lsn(sk_info.flush_lsn);
let commit_lsn = Lsn(sk_info.commit_lsn);
let last_log_term = sk_info.last_log_term;
let segno = self.segno(flush_lsn);
let name = self.remote_segment_name(segno, last_log_term, commit_lsn, flush_lsn);
PartialRemoteSegment {
status: UploadStatus::InProgress,
name,
commit_lsn,
flush_lsn,
term: last_log_term,
}
}
/// Reads segment from disk and uploads it to the remote storage.
async fn upload_segment(&mut self, prepared: PartialRemoteSegment) -> anyhow::Result<()> {
let flush_lsn = prepared.flush_lsn;
let segno = self.segno(flush_lsn);
// We're going to backup bytes from the start of the segment up to flush_lsn.
let backup_bytes = flush_lsn.segment_offset(self.wal_seg_size);
let local_path = self.local_prefix.join(self.local_segment_name(segno));
let remote_path = prepared.remote_path(&self.remote_timeline_path);
// Upload first `backup_bytes` bytes of the segment to the remote storage.
wal_backup::backup_partial_segment(&self.storage, &local_path, &remote_path, backup_bytes)
.await?;
PARTIAL_BACKUP_UPLOADED_BYTES.inc_by(backup_bytes as u64);
// We uploaded the segment, now let's verify that the data is still actual.
// If the term changed, we cannot guarantee the validity of the uploaded data.
// If the term is the same, we know the data is not corrupted.
let sk_info = self.tli.get_safekeeper_info(&self.conf).await;
if sk_info.last_log_term != prepared.term {
anyhow::bail!("term changed during upload");
}
assert!(prepared.commit_lsn <= Lsn(sk_info.commit_lsn));
assert!(prepared.flush_lsn <= Lsn(sk_info.flush_lsn));
Ok(())
}
/// Write new state to disk. If in-memory and on-disk states diverged, returns an error.
async fn commit_state(&mut self, new_state: State) -> anyhow::Result<()> {
self.tli
.map_control_file(|cf| {
if cf.partial_backup != self.state {
let memory = self.state.clone();
self.state = cf.partial_backup.clone();
anyhow::bail!(
"partial backup state diverged, memory={:?}, disk={:?}",
memory,
cf.partial_backup
);
}
cf.partial_backup = new_state.clone();
Ok(())
})
.await?;
// update in-memory state
self.state = new_state;
Ok(())
}
/// Upload the latest version of the partial segment and garbage collect older versions.
#[instrument(name = "upload", skip_all, fields(name = %prepared.name))]
async fn do_upload(&mut self, prepared: &PartialRemoteSegment) -> anyhow::Result<()> {
let _timer = MISC_OPERATION_SECONDS
.with_label_values(&["partial_do_upload"])
.start_timer();
info!("starting upload {:?}", prepared);
let state_0 = self.state.clone();
let state_1 = {
let mut state = state_0.clone();
state.segments.push(prepared.clone());
state
};
// we're going to upload a new segment, let's write it to disk to make GC later
self.commit_state(state_1).await?;
self.upload_segment(prepared.clone()).await?;
let state_2 = {
let mut state = state_0.clone();
for seg in state.segments.iter_mut() {
seg.status = UploadStatus::Deleting;
}
let mut actual_remote_segment = prepared.clone();
actual_remote_segment.status = UploadStatus::Uploaded;
state.segments.push(actual_remote_segment);
state
};
// we've uploaded new segment, it's actual, all other segments should be GCed
self.commit_state(state_2).await?;
self.gc().await?;
Ok(())
}
// Prepend to the given segments remote prefix and delete them from the
// remote storage.
async fn delete_segments(&self, segments_to_delete: &Vec<String>) -> anyhow::Result<()> {
info!("deleting objects: {:?}", segments_to_delete);
let mut objects_to_delete = vec![];
for seg in segments_to_delete.iter() {
let remote_path = self.remote_timeline_path.join(seg);
objects_to_delete.push(remote_path);
}
wal_backup::delete_objects(&self.storage, &objects_to_delete).await
}
/// Delete all non-Uploaded segments from the remote storage. There should be only one
/// Uploaded segment at a time.
#[instrument(name = "gc", skip_all)]
async fn gc(&mut self) -> anyhow::Result<()> {
let mut segments_to_delete = vec![];
let new_segments: Vec<PartialRemoteSegment> = self
.state
.segments
.iter()
.filter_map(|seg| {
if seg.status == UploadStatus::Uploaded {
Some(seg.clone())
} else {
segments_to_delete.push(seg.name.clone());
None
}
})
.collect();
if new_segments.len() == 1 {
// we have an uploaded segment, it must not be deleted from remote storage
segments_to_delete.retain(|name| name != &new_segments[0].name);
} else {
// there should always be zero or one uploaded segment
assert!(
new_segments.is_empty(),
"too many uploaded segments: {new_segments:?}"
);
}
// execute the deletion
self.delete_segments(&segments_to_delete).await?;
// now we can update the state on disk
let new_state = {
let mut state = self.state.clone();
state.segments = new_segments;
state
};
self.commit_state(new_state).await?;
Ok(())
}
/// Remove uploaded segment(s) from the state and remote storage. Aimed for
/// manual intervention, not normally needed.
/// Returns list of segments which potentially existed in the remote storage.
pub async fn reset(&mut self) -> anyhow::Result<Vec<String>> {
let segments_to_delete = self
.state
.segments
.iter()
.map(|seg| seg.name.clone())
.collect();
// First reset cfile state, and only then objects themselves. If the
// later fails we might leave some garbage behind; that's ok for this
// single time usage.
let new_state = State { segments: vec![] };
self.commit_state(new_state).await?;
self.delete_segments(&segments_to_delete).await?;
Ok(segments_to_delete)
}
}
/// Check if everything is uploaded and partial backup task doesn't need to run.
pub(crate) fn needs_uploading(
state: &StateSnapshot,
uploaded: &Option<PartialRemoteSegment>,
) -> bool {
match uploaded {
Some(uploaded) => {
uploaded.status != UploadStatus::Uploaded
|| uploaded.flush_lsn != state.flush_lsn
|| uploaded.commit_lsn != state.commit_lsn
|| uploaded.term != state.last_log_term
}
None => true,
}
}
/// Main task for partial backup. It waits for the flush_lsn to change and then uploads the
/// partial segment to the remote storage. It also does garbage collection of old segments.
///
/// When there is nothing more to do and the last segment was successfully uploaded, the task
/// returns PartialRemoteSegment, to signal readiness for offloading the timeline.
#[instrument(name = "partial_backup", skip_all, fields(ttid = %tli.ttid))]
pub async fn main_task(
tli: WalResidentTimeline,
conf: SafeKeeperConf,
limiter: RateLimiter,
cancel: CancellationToken,
storage: Arc<GenericRemoteStorage>,
) -> Option<PartialRemoteSegment> {
debug!("started");
let await_duration = conf.partial_backup_timeout;
let mut first_iteration = true;
let mut commit_lsn_rx = tli.get_commit_lsn_watch_rx();
let mut flush_lsn_rx = tli.get_term_flush_lsn_watch_rx();
let mut backup = PartialBackup::new(tli, conf, storage).await;
debug!("state: {:?}", backup.state);
// The general idea is that each safekeeper keeps only one partial segment
// both in remote storage and in local state. If this is not true, something
// went wrong.
const MAX_SIMULTANEOUS_SEGMENTS: usize = 10;
'outer: loop {
if backup.state.segments.len() > MAX_SIMULTANEOUS_SEGMENTS {
warn!(
"too many segments in control_file state, running gc: {}",
backup.state.segments.len()
);
backup.gc().await.unwrap_or_else(|e| {
error!("failed to run gc: {:#}", e);
});
}
// wait until we have something to upload
let uploaded_segment = backup.state.uploaded_segment();
if let Some(seg) = &uploaded_segment {
// check if uploaded segment matches the current state
if flush_lsn_rx.borrow().lsn == seg.flush_lsn
&& *commit_lsn_rx.borrow() == seg.commit_lsn
&& flush_lsn_rx.borrow().term == seg.term
{
// we have nothing to do, the last segment is already uploaded
debug!(
"exiting, uploaded up to term={} flush_lsn={} commit_lsn={}",
seg.term, seg.flush_lsn, seg.commit_lsn
);
return Some(seg.clone());
}
}
// if we don't have any data and zero LSNs, wait for something
while flush_lsn_rx.borrow().lsn == Lsn(0) {
tokio::select! {
_ = backup.tli.cancel.cancelled() => {
info!("timeline canceled");
return None;
}
_ = cancel.cancelled() => {
info!("task canceled");
return None;
}
_ = flush_lsn_rx.changed() => {}
}
}
// smoothing the load after restart, by sleeping for a random time.
// if this is not the first iteration, we will wait for the full await_duration
let await_duration = if first_iteration {
first_iteration = false;
rand_duration(&await_duration)
} else {
await_duration
};
// fixing the segno and waiting some time to prevent reuploading the same segment too often
let pending_segno = backup.segno(flush_lsn_rx.borrow().lsn);
let timeout = tokio::time::sleep(await_duration);
tokio::pin!(timeout);
let mut timeout_expired = false;
// waiting until timeout expires OR segno changes
'inner: loop {
tokio::select! {
_ = backup.tli.cancel.cancelled() => {
info!("timeline canceled");
return None;
}
_ = cancel.cancelled() => {
info!("task canceled");
return None;
}
_ = commit_lsn_rx.changed() => {}
_ = flush_lsn_rx.changed() => {
let segno = backup.segno(flush_lsn_rx.borrow().lsn);
if segno != pending_segno {
// previous segment is no longer partial, aborting the wait
break 'inner;
}
}
_ = &mut timeout => {
// timeout expired, now we are ready for upload
timeout_expired = true;
break 'inner;
}
}
}
if !timeout_expired {
// likely segno has changed, let's try again in the next iteration
continue 'outer;
}
// limit concurrent uploads
let _upload_permit = tokio::select! {
acq = limiter.acquire_partial_backup() => acq,
_ = backup.tli.cancel.cancelled() => {
info!("timeline canceled");
return None;
}
_ = cancel.cancelled() => {
info!("task canceled");
return None;
}
};
let prepared = backup.prepare_upload().await;
if let Some(seg) = &uploaded_segment {
if seg.eq_without_status(&prepared) {
// we already uploaded this segment, nothing to do
continue 'outer;
}
}
match backup.do_upload(&prepared).await {
Ok(()) => {
debug!(
"uploaded {} up to flush_lsn {}",
prepared.name, prepared.flush_lsn
);
PARTIAL_BACKUP_UPLOADS.with_label_values(&["ok"]).inc();
}
Err(e) => {
info!("failed to upload {}: {:#}", prepared.name, e);
PARTIAL_BACKUP_UPLOADS.with_label_values(&["error"]).inc();
}
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/bin/safekeeper.rs | safekeeper/src/bin/safekeeper.rs | //
// Main entry point for the safekeeper executable
//
use std::fs::{self, File};
use std::io::{ErrorKind, Write};
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use anyhow::{Context, Result, bail};
use camino::{Utf8Path, Utf8PathBuf};
use clap::{ArgAction, Parser};
use futures::future::BoxFuture;
use futures::stream::FuturesUnordered;
use futures::{FutureExt, StreamExt};
use http_utils::tls_certs::ReloadingCertificateResolver;
use metrics::set_build_info_metric;
use remote_storage::RemoteStorageConfig;
use safekeeper::defaults::{
DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_EVICTION_MIN_RESIDENT,
DEFAULT_GLOBAL_DISK_CHECK_INTERVAL, DEFAULT_HEARTBEAT_TIMEOUT, DEFAULT_HTTP_LISTEN_ADDR,
DEFAULT_MAX_GLOBAL_DISK_USAGE_RATIO, DEFAULT_MAX_OFFLOADER_LAG_BYTES,
DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES, DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES,
DEFAULT_PARTIAL_BACKUP_CONCURRENCY, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR,
DEFAULT_SSL_CERT_FILE, DEFAULT_SSL_CERT_RELOAD_PERIOD, DEFAULT_SSL_KEY_FILE,
};
use safekeeper::hadron;
use safekeeper::wal_backup::WalBackup;
use safekeeper::{
BACKGROUND_RUNTIME, BROKER_RUNTIME, GlobalTimelines, HTTP_RUNTIME, SafeKeeperConf,
WAL_SERVICE_RUNTIME, broker, control_file, http, wal_service,
};
use sd_notify::NotifyState;
use storage_broker::{DEFAULT_ENDPOINT, Uri};
use tokio::runtime::Handle;
use tokio::signal::unix::{SignalKind, signal};
use tokio::task::JoinError;
use tracing::*;
use utils::auth::{JwtAuth, Scope, SwappableJwtAuth};
use utils::id::NodeId;
use utils::logging::{self, LogFormat, SecretString};
use utils::metrics_collector::{METRICS_COLLECTION_INTERVAL, METRICS_COLLECTOR};
use utils::sentry_init::init_sentry;
use utils::{pid_file, project_build_tag, project_git_version, tcp_listener};
use safekeeper::hadron::{
GLOBAL_DISK_LIMIT_EXCEEDED, get_filesystem_capacity, get_filesystem_usage,
};
use safekeeper::metrics::GLOBAL_DISK_UTIL_CHECK_SECONDS;
use std::sync::atomic::Ordering;
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
/// Configure jemalloc to profile heap allocations by sampling stack traces every 2 MB (1 << 21).
/// This adds roughly 3% overhead for allocations on average, which is acceptable considering
/// performance-sensitive code will avoid allocations as far as possible anyway.
#[allow(non_upper_case_globals)]
#[unsafe(export_name = "malloc_conf")]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0";
const PID_FILE_NAME: &str = "safekeeper.pid";
const ID_FILE_NAME: &str = "safekeeper.id";
project_git_version!(GIT_VERSION);
project_build_tag!(BUILD_TAG);
const FEATURES: &[&str] = &[
#[cfg(feature = "testing")]
"testing",
];
fn version() -> String {
format!(
"{GIT_VERSION} failpoints: {}, features: {:?}",
fail::has_failpoints(),
FEATURES,
)
}
const ABOUT: &str = r#"
A fleet of safekeepers is responsible for reliably storing WAL received from
compute, passing it through consensus (mitigating potential computes brain
split), and serving the hardened part further downstream to pageserver(s).
"#;
#[derive(Parser)]
#[command(name = "Neon safekeeper", version = GIT_VERSION, about = ABOUT, long_about = None)]
struct Args {
/// Path to the safekeeper data directory.
#[arg(short = 'D', long, default_value = "./")]
datadir: Utf8PathBuf,
/// Safekeeper node id.
#[arg(long)]
id: Option<u64>,
/// Initialize safekeeper with given id and exit.
#[arg(long)]
init: bool,
/// Listen endpoint for receiving/sending WAL in the form host:port.
#[arg(short, long, default_value = DEFAULT_PG_LISTEN_ADDR)]
listen_pg: String,
/// Listen endpoint for receiving/sending WAL in the form host:port allowing
/// only tenant scoped auth tokens. Pointless if auth is disabled.
#[arg(long, default_value = None, verbatim_doc_comment)]
listen_pg_tenant_only: Option<String>,
/// Listen http endpoint for management and metrics in the form host:port.
#[arg(long, default_value = DEFAULT_HTTP_LISTEN_ADDR)]
listen_http: String,
/// Listen https endpoint for management and metrics in the form host:port.
#[arg(long, default_value = None)]
listen_https: Option<String>,
/// Advertised endpoint for receiving/sending WAL in the form host:port. If not
/// specified, listen_pg is used to advertise instead.
#[arg(long, default_value = None)]
advertise_pg: Option<String>,
/// Availability zone of the safekeeper.
#[arg(long)]
availability_zone: Option<String>,
/// Do not wait for changes to be written safely to disk. Unsafe.
#[arg(short, long)]
no_sync: bool,
/// Dump control file at path specified by this argument and exit.
#[arg(long)]
dump_control_file: Option<Utf8PathBuf>,
/// Broker endpoint for storage nodes coordination in the form
/// http[s]://host:port. In case of https schema TLS is connection is
/// established; plaintext otherwise.
#[arg(long, default_value = DEFAULT_ENDPOINT, verbatim_doc_comment)]
broker_endpoint: Uri,
/// Broker keepalive interval.
#[arg(long, value_parser= humantime::parse_duration, default_value = storage_broker::DEFAULT_KEEPALIVE_INTERVAL)]
broker_keepalive_interval: Duration,
/// Peer safekeeper is considered dead after not receiving heartbeats from
/// it during this period passed as a human readable duration.
#[arg(long, value_parser= humantime::parse_duration, default_value = DEFAULT_HEARTBEAT_TIMEOUT, verbatim_doc_comment)]
heartbeat_timeout: Duration,
/// Enable/disable peer recovery.
#[arg(long, default_value = "false", action=ArgAction::Set)]
peer_recovery: bool,
/// Remote storage configuration for WAL backup (offloading to s3) as TOML
/// inline table, e.g.
/// {max_concurrent_syncs = 17, max_sync_errors = 13, bucket_name = "<BUCKETNAME>", bucket_region = "<REGION>", concurrency_limit = 119}
/// Safekeeper offloads WAL to
/// [prefix_in_bucket/]<tenant_id>/<timeline_id>/<segment_file>, mirroring
/// structure on the file system.
#[arg(long, value_parser = parse_remote_storage, verbatim_doc_comment)]
remote_storage: Option<RemoteStorageConfig>,
/// Safekeeper won't be elected for WAL offloading if it is lagging for more than this value in bytes
#[arg(long, default_value_t = DEFAULT_MAX_OFFLOADER_LAG_BYTES)]
max_offloader_lag: u64,
/* BEGIN_HADRON */
/// Safekeeper will re-elect a new offloader if the current backup lagging for more than this value in bytes
#[arg(long, default_value_t = DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES)]
max_reelect_offloader_lag_bytes: u64,
/// Safekeeper will stop accepting new WALs if the timeline disk usage exceeds this value in bytes.
/// Setting this value to 0 disables the limit.
#[arg(long, default_value_t = DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES)]
max_timeline_disk_usage_bytes: u64,
/* END_HADRON */
/// Number of max parallel WAL segments to be offloaded to remote storage.
#[arg(long, default_value = "5")]
wal_backup_parallel_jobs: usize,
/// Disable WAL backup to s3. When disabled, safekeeper removes WAL ignoring
/// WAL backup horizon.
#[arg(long)]
disable_wal_backup: bool,
/// If given, enables auth on incoming connections to WAL service endpoint
/// (--listen-pg). Value specifies path to a .pem public key used for
/// validations of JWT tokens. Empty string is allowed and means disabling
/// auth.
#[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
pg_auth_public_key_path: Option<Utf8PathBuf>,
/// If given, enables auth on incoming connections to tenant only WAL
/// service endpoint (--listen-pg-tenant-only). Value specifies path to a
/// .pem public key used for validations of JWT tokens. Empty string is
/// allowed and means disabling auth.
#[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
pg_tenant_only_auth_public_key_path: Option<Utf8PathBuf>,
/// If given, enables auth on incoming connections to http management
/// service endpoint (--listen-http). Value specifies path to a .pem public
/// key used for validations of JWT tokens. Empty string is allowed and
/// means disabling auth.
#[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)]
http_auth_public_key_path: Option<Utf8PathBuf>,
/// Format for logging, either 'plain' or 'json'.
#[arg(long, default_value = "plain")]
log_format: String,
/// Run everything in single threaded current thread runtime, might be
/// useful for debugging.
#[arg(long)]
current_thread_runtime: bool,
/// Keep horizon for walsenders, i.e. don't remove WAL segments that are
/// still needed for existing replication connection.
#[arg(long)]
walsenders_keep_horizon: bool,
/// Controls how long backup will wait until uploading the partial segment.
#[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_PARTIAL_BACKUP_TIMEOUT, verbatim_doc_comment)]
partial_backup_timeout: Duration,
/// Disable task to push messages to broker every second. Supposed to
/// be used in tests.
#[arg(long)]
disable_periodic_broker_push: bool,
/// Enable automatic switching to offloaded state.
#[arg(long)]
enable_offload: bool,
/// Delete local WAL files after offloading. When disabled, they will be left on disk.
#[arg(long)]
delete_offloaded_wal: bool,
/// Pending updates to control file will be automatically saved after this interval.
#[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_CONTROL_FILE_SAVE_INTERVAL)]
control_file_save_interval: Duration,
/// Number of allowed concurrent uploads of partial segments to remote storage.
#[arg(long, default_value = DEFAULT_PARTIAL_BACKUP_CONCURRENCY)]
partial_backup_concurrency: usize,
/// How long a timeline must be resident before it is eligible for eviction.
/// Usually, timeline eviction has to wait for `partial_backup_timeout` before being eligible for eviction,
/// but if a timeline is un-evicted and then _not_ written to, it would immediately flap to evicting again,
/// if it weren't for `eviction_min_resident` preventing that.
///
/// Also defines interval for eviction retries.
#[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_EVICTION_MIN_RESIDENT)]
eviction_min_resident: Duration,
/// Enable fanning out WAL to different shards from the same reader
#[arg(long)]
wal_reader_fanout: bool,
/// Only fan out the WAL reader if the absoulte delta between the new requested position
/// and the current position of the reader is smaller than this value.
#[arg(long)]
max_delta_for_fanout: Option<u64>,
/// Path to a file with certificate's private key for https API.
#[arg(long, default_value = DEFAULT_SSL_KEY_FILE)]
ssl_key_file: Utf8PathBuf,
/// Path to a file with a X509 certificate for https API.
#[arg(long, default_value = DEFAULT_SSL_CERT_FILE)]
ssl_cert_file: Utf8PathBuf,
/// Period to reload certificate and private key from files.
#[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_SSL_CERT_RELOAD_PERIOD)]
ssl_cert_reload_period: Duration,
/// Trusted root CA certificates to use in https APIs.
#[arg(long)]
ssl_ca_file: Option<Utf8PathBuf>,
/// Flag to use https for requests to peer's safekeeper API.
#[arg(long)]
use_https_safekeeper_api: bool,
/// Path to the JWT auth token used to authenticate with other safekeepers.
#[arg(long)]
auth_token_path: Option<Utf8PathBuf>,
/// Enable TLS in WAL service API.
/// Does not force TLS: the client negotiates TLS usage during the handshake.
/// Uses key and certificate from ssl_key_file/ssl_cert_file.
#[arg(long)]
enable_tls_wal_service_api: bool,
/// Controls whether to collect all metrics on each scrape or to return potentially stale
/// results.
#[arg(long, default_value_t = true)]
force_metric_collection_on_scrape: bool,
/// Run in development mode (disables security checks)
#[arg(long, help = "Run in development mode (disables security checks)")]
dev: bool,
/* BEGIN_HADRON */
#[arg(long)]
enable_pull_timeline_on_startup: bool,
/// How often to scan entire data-dir for total disk usage
#[arg(long, value_parser=humantime::parse_duration, default_value = DEFAULT_GLOBAL_DISK_CHECK_INTERVAL)]
global_disk_check_interval: Duration,
/// The portion of the filesystem capacity that can be used by all timelines.
/// A circuit breaker will trip and reject all WAL writes if the total usage
/// exceeds this ratio.
/// Set to 0 to disable the global disk usage limit.
#[arg(long, default_value_t = DEFAULT_MAX_GLOBAL_DISK_USAGE_RATIO)]
max_global_disk_usage_ratio: f64,
/* END_HADRON */
}
// Like PathBufValueParser, but allows empty string.
fn opt_pathbuf_parser(s: &str) -> Result<Utf8PathBuf, String> {
Ok(Utf8PathBuf::from_str(s).unwrap())
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> anyhow::Result<()> {
// We want to allow multiple occurences of the same arg (taking the last) so
// that neon_local could generate command with defaults + overrides without
// getting 'argument cannot be used multiple times' error. This seems to be
// impossible with pure Derive API, so convert struct to Command, modify it,
// parse arguments, and then fill the struct back.
let cmd = <Args as clap::CommandFactory>::command()
.args_override_self(true)
.version(version());
let mut matches = cmd.get_matches();
let mut args = <Args as clap::FromArgMatches>::from_arg_matches_mut(&mut matches)?;
// I failed to modify opt_pathbuf_parser to return Option<PathBuf> in
// reasonable time, so turn empty string into option post factum.
if let Some(pb) = &args.pg_auth_public_key_path {
if pb.as_os_str().is_empty() {
args.pg_auth_public_key_path = None;
}
}
if let Some(pb) = &args.pg_tenant_only_auth_public_key_path {
if pb.as_os_str().is_empty() {
args.pg_tenant_only_auth_public_key_path = None;
}
}
if let Some(pb) = &args.http_auth_public_key_path {
if pb.as_os_str().is_empty() {
args.http_auth_public_key_path = None;
}
}
if let Some(addr) = args.dump_control_file {
let state = control_file::FileStorage::load_control_file(addr)?;
let json = serde_json::to_string(&state)?;
print!("{json}");
return Ok(());
}
// important to keep the order of:
// 1. init logging
// 2. tracing panic hook
// 3. sentry
logging::init(
LogFormat::from_config(&args.log_format)?,
logging::TracingErrorLayerEnablement::Disabled,
logging::Output::Stdout,
)?;
logging::replace_panic_hook_with_tracing_panic_hook().forget();
info!("version: {GIT_VERSION}");
info!("buld_tag: {BUILD_TAG}");
let args_workdir = &args.datadir;
let workdir = args_workdir.canonicalize_utf8().with_context(|| {
format!("Failed to get the absolute path for input workdir {args_workdir:?}")
})?;
// Change into the data directory.
std::env::set_current_dir(&workdir)?;
// Prevent running multiple safekeepers on the same directory
let lock_file_path = workdir.join(PID_FILE_NAME);
let lock_file =
pid_file::claim_for_current_process(&lock_file_path).context("claim pid file")?;
info!("claimed pid file at {lock_file_path:?}");
// ensure that the lock file is held even if the main thread of the process is panics
// we need to release the lock file only when the current process is gone
std::mem::forget(lock_file);
// Set or read our ID.
let id = set_id(&workdir, args.id.map(NodeId))?;
if args.init {
return Ok(());
}
let pg_auth = match args.pg_auth_public_key_path.as_ref() {
None => {
info!("pg auth is disabled");
None
}
Some(path) => {
info!("loading pg auth JWT key from {path}");
Some(Arc::new(
JwtAuth::from_key_path(path).context("failed to load the auth key")?,
))
}
};
let pg_tenant_only_auth = match args.pg_tenant_only_auth_public_key_path.as_ref() {
None => {
info!("pg tenant only auth is disabled");
None
}
Some(path) => {
info!("loading pg tenant only auth JWT key from {path}");
Some(Arc::new(
JwtAuth::from_key_path(path).context("failed to load the auth key")?,
))
}
};
let http_auth = match args.http_auth_public_key_path.as_ref() {
None => {
info!("http auth is disabled");
None
}
Some(path) => {
info!("loading http auth JWT key(s) from {path}");
let jwt_auth = JwtAuth::from_key_path(path).context("failed to load the auth key")?;
Some(Arc::new(SwappableJwtAuth::new(jwt_auth)))
}
};
// Load JWT auth token to connect to other safekeepers for pull_timeline.
let sk_auth_token = if let Some(auth_token_path) = args.auth_token_path.as_ref() {
info!("loading JWT token for authentication with safekeepers from {auth_token_path}");
let auth_token = tokio::fs::read_to_string(auth_token_path).await?;
Some(SecretString::from(auth_token.trim().to_owned()))
} else {
info!("no JWT token for authentication with safekeepers detected");
None
};
let ssl_ca_certs = match args.ssl_ca_file.as_ref() {
Some(ssl_ca_file) => {
tracing::info!("Using ssl root CA file: {ssl_ca_file:?}");
let buf = tokio::fs::read(ssl_ca_file).await?;
pem::parse_many(&buf)?
.into_iter()
.filter(|pem| pem.tag() == "CERTIFICATE")
.collect()
}
None => Vec::new(),
};
let conf = Arc::new(SafeKeeperConf {
workdir,
my_id: id,
listen_pg_addr: args.listen_pg,
listen_pg_addr_tenant_only: args.listen_pg_tenant_only,
listen_http_addr: args.listen_http,
listen_https_addr: args.listen_https,
advertise_pg_addr: args.advertise_pg,
availability_zone: args.availability_zone,
no_sync: args.no_sync,
broker_endpoint: args.broker_endpoint,
broker_keepalive_interval: args.broker_keepalive_interval,
heartbeat_timeout: args.heartbeat_timeout,
peer_recovery_enabled: args.peer_recovery,
remote_storage: args.remote_storage,
max_offloader_lag_bytes: args.max_offloader_lag,
/* BEGIN_HADRON */
max_reelect_offloader_lag_bytes: args.max_reelect_offloader_lag_bytes,
max_timeline_disk_usage_bytes: args.max_timeline_disk_usage_bytes,
/* END_HADRON */
wal_backup_enabled: !args.disable_wal_backup,
backup_parallel_jobs: args.wal_backup_parallel_jobs,
pg_auth,
pg_tenant_only_auth,
http_auth,
sk_auth_token,
current_thread_runtime: args.current_thread_runtime,
walsenders_keep_horizon: args.walsenders_keep_horizon,
partial_backup_timeout: args.partial_backup_timeout,
disable_periodic_broker_push: args.disable_periodic_broker_push,
enable_offload: args.enable_offload,
delete_offloaded_wal: args.delete_offloaded_wal,
control_file_save_interval: args.control_file_save_interval,
partial_backup_concurrency: args.partial_backup_concurrency,
eviction_min_resident: args.eviction_min_resident,
wal_reader_fanout: args.wal_reader_fanout,
max_delta_for_fanout: args.max_delta_for_fanout,
ssl_key_file: args.ssl_key_file,
ssl_cert_file: args.ssl_cert_file,
ssl_cert_reload_period: args.ssl_cert_reload_period,
ssl_ca_certs,
use_https_safekeeper_api: args.use_https_safekeeper_api,
enable_tls_wal_service_api: args.enable_tls_wal_service_api,
force_metric_collection_on_scrape: args.force_metric_collection_on_scrape,
/* BEGIN_HADRON */
advertise_pg_addr_tenant_only: None,
enable_pull_timeline_on_startup: args.enable_pull_timeline_on_startup,
hcc_base_url: None,
global_disk_check_interval: args.global_disk_check_interval,
max_global_disk_usage_ratio: args.max_global_disk_usage_ratio,
/* END_HADRON */
});
// initialize sentry if SENTRY_DSN is provided
let _sentry_guard = init_sentry(
Some(GIT_VERSION.into()),
&[("node_id", &conf.my_id.to_string())],
);
start_safekeeper(conf).await
}
/// Result of joining any of main tasks: upper error means task failed to
/// complete, e.g. panicked, inner is error produced by task itself.
type JoinTaskRes = Result<anyhow::Result<()>, JoinError>;
async fn start_safekeeper(conf: Arc<SafeKeeperConf>) -> Result<()> {
// fsync the datadir to make sure we have a consistent state on disk.
if !conf.no_sync {
let dfd = File::open(&conf.workdir).context("open datadir for syncfs")?;
let started = Instant::now();
utils::crashsafe::syncfs(dfd)?;
let elapsed = started.elapsed();
info!(
elapsed_ms = elapsed.as_millis(),
"syncfs data directory done"
);
}
info!("starting safekeeper WAL service on {}", conf.listen_pg_addr);
let pg_listener = tcp_listener::bind(conf.listen_pg_addr.clone()).map_err(|e| {
error!("failed to bind to address {}: {}", conf.listen_pg_addr, e);
e
})?;
let pg_listener_tenant_only =
if let Some(listen_pg_addr_tenant_only) = &conf.listen_pg_addr_tenant_only {
info!(
"starting safekeeper tenant scoped WAL service on {}",
listen_pg_addr_tenant_only
);
let listener = tcp_listener::bind(listen_pg_addr_tenant_only.clone()).map_err(|e| {
error!(
"failed to bind to address {}: {}",
listen_pg_addr_tenant_only, e
);
e
})?;
Some(listener)
} else {
None
};
info!(
"starting safekeeper HTTP service on {}",
conf.listen_http_addr
);
let http_listener = tcp_listener::bind(conf.listen_http_addr.clone()).map_err(|e| {
error!("failed to bind to address {}: {}", conf.listen_http_addr, e);
e
})?;
let https_listener = match conf.listen_https_addr.as_ref() {
Some(listen_https_addr) => {
info!("starting safekeeper HTTPS service on {}", listen_https_addr);
Some(tcp_listener::bind(listen_https_addr).map_err(|e| {
error!("failed to bind to address {}: {}", listen_https_addr, e);
e
})?)
}
None => None,
};
let wal_backup = Arc::new(WalBackup::new(&conf).await?);
let global_timelines = Arc::new(GlobalTimelines::new(conf.clone(), wal_backup.clone()));
// Register metrics collector for active timelines. It's important to do this
// after daemonizing, otherwise process collector will be upset.
let timeline_collector = safekeeper::metrics::TimelineCollector::new(global_timelines.clone());
metrics::register_internal(Box::new(timeline_collector))?;
// Keep handles to main tasks to die if any of them disappears.
let mut tasks_handles: FuturesUnordered<BoxFuture<(String, JoinTaskRes)>> =
FuturesUnordered::new();
// Start wal backup launcher before loading timelines as we'll notify it
// through the channel about timelines which need offloading, not draining
// the channel would cause deadlock.
let current_thread_rt = conf
.current_thread_runtime
.then(|| Handle::try_current().expect("no runtime in main"));
// Load all timelines from disk to memory.
global_timelines.init().await?;
/* BEGIN_HADRON */
if conf.enable_pull_timeline_on_startup && global_timelines.timelines_count() == 0 {
match hadron::hcc_pull_timelines(&conf, global_timelines.clone()).await {
Ok(_) => {
info!("Successfully pulled all timelines from peer safekeepers");
}
Err(e) => {
error!("Failed to pull timelines from peer safekeepers: {:?}", e);
return Err(e);
}
}
}
/* END_HADRON */
// Run everything in current thread rt, if asked.
if conf.current_thread_runtime {
info!("running in current thread runtime");
}
let tls_server_config = if conf.listen_https_addr.is_some() || conf.enable_tls_wal_service_api {
let ssl_key_file = conf.ssl_key_file.clone();
let ssl_cert_file = conf.ssl_cert_file.clone();
let ssl_cert_reload_period = conf.ssl_cert_reload_period;
// Create resolver in BACKGROUND_RUNTIME, so the background certificate reloading
// task is run in this runtime.
let cert_resolver = current_thread_rt
.as_ref()
.unwrap_or_else(|| BACKGROUND_RUNTIME.handle())
.spawn(async move {
ReloadingCertificateResolver::new(
"main",
&ssl_key_file,
&ssl_cert_file,
ssl_cert_reload_period,
)
.await
})
.await??;
let config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(cert_resolver);
Some(Arc::new(config))
} else {
None
};
let wal_service_handle = current_thread_rt
.as_ref()
.unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
.spawn(wal_service::task_main(
conf.clone(),
pg_listener,
Scope::SafekeeperData,
conf.enable_tls_wal_service_api
.then(|| tls_server_config.clone())
.flatten(),
global_timelines.clone(),
))
// wrap with task name for error reporting
.map(|res| ("WAL service main".to_owned(), res));
tasks_handles.push(Box::pin(wal_service_handle));
let global_timelines_ = global_timelines.clone();
let timeline_housekeeping_handle = current_thread_rt
.as_ref()
.unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
.spawn(async move {
const TOMBSTONE_TTL: Duration = Duration::from_secs(3600 * 24);
loop {
tokio::time::sleep(TOMBSTONE_TTL).await;
global_timelines_.housekeeping(&TOMBSTONE_TTL);
}
})
.map(|res| ("Timeline map housekeeping".to_owned(), res));
tasks_handles.push(Box::pin(timeline_housekeeping_handle));
/* BEGIN_HADRON */
// Spawn global disk usage watcher task, if a global disk usage limit is specified.
let interval = conf.global_disk_check_interval;
let data_dir = conf.workdir.clone();
// Use the safekeeper data directory to compute filesystem capacity. This only runs once on startup, so
// there is little point to continue if we can't have the proper protections in place.
let fs_capacity_bytes = get_filesystem_capacity(data_dir.as_std_path())
.expect("Failed to get filesystem capacity for data directory");
let limit: u64 = (conf.max_global_disk_usage_ratio * fs_capacity_bytes as f64) as u64;
if limit > 0 {
let disk_usage_watch_handle = BACKGROUND_RUNTIME
.handle()
.spawn(async move {
// Use Tokio interval to preserve fixed cadence between filesystem utilization checks
let mut ticker = tokio::time::interval(interval);
ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
loop {
ticker.tick().await;
let data_dir_clone = data_dir.clone();
let check_start = Instant::now();
let usage = tokio::task::spawn_blocking(move || {
get_filesystem_usage(data_dir_clone.as_std_path())
})
.await
.unwrap_or(0);
let elapsed = check_start.elapsed().as_secs_f64();
GLOBAL_DISK_UTIL_CHECK_SECONDS.observe(elapsed);
if usage > limit {
warn!(
"Global disk usage exceeded limit. Usage: {} bytes, limit: {} bytes",
usage, limit
);
}
GLOBAL_DISK_LIMIT_EXCEEDED.store(usage > limit, Ordering::Relaxed);
}
})
.map(|res| ("Global disk usage watcher".to_string(), res));
tasks_handles.push(Box::pin(disk_usage_watch_handle));
}
/* END_HADRON */
if let Some(pg_listener_tenant_only) = pg_listener_tenant_only {
let wal_service_handle = current_thread_rt
.as_ref()
.unwrap_or_else(|| WAL_SERVICE_RUNTIME.handle())
.spawn(wal_service::task_main(
conf.clone(),
pg_listener_tenant_only,
Scope::Tenant,
conf.enable_tls_wal_service_api
.then(|| tls_server_config.clone())
.flatten(),
global_timelines.clone(),
))
// wrap with task name for error reporting
.map(|res| ("WAL service tenant only main".to_owned(), res));
tasks_handles.push(Box::pin(wal_service_handle));
}
let http_handle = current_thread_rt
.as_ref()
.unwrap_or_else(|| HTTP_RUNTIME.handle())
.spawn(http::task_main_http(
conf.clone(),
http_listener,
global_timelines.clone(),
))
.map(|res| ("HTTP service main".to_owned(), res));
tasks_handles.push(Box::pin(http_handle));
if let Some(https_listener) = https_listener {
let https_handle = current_thread_rt
.as_ref()
.unwrap_or_else(|| HTTP_RUNTIME.handle())
.spawn(http::task_main_https(
conf.clone(),
https_listener,
tls_server_config.expect("tls_server_config is set earlier if https is enabled"),
global_timelines.clone(),
))
.map(|res| ("HTTPS service main".to_owned(), res));
tasks_handles.push(Box::pin(https_handle));
}
let broker_task_handle = current_thread_rt
.as_ref()
.unwrap_or_else(|| BROKER_RUNTIME.handle())
.spawn(
broker::task_main(conf.clone(), global_timelines.clone())
.instrument(info_span!("broker")),
)
.map(|res| ("broker main".to_owned(), res));
tasks_handles.push(Box::pin(broker_task_handle));
/* BEGIN_HADRON */
if conf.force_metric_collection_on_scrape {
let metrics_handle = current_thread_rt
.as_ref()
.unwrap_or_else(|| BACKGROUND_RUNTIME.handle())
.spawn(async move {
let mut interval: tokio::time::Interval =
tokio::time::interval(METRICS_COLLECTION_INTERVAL);
loop {
interval.tick().await;
tokio::task::spawn_blocking(|| {
METRICS_COLLECTOR.run_once(true);
});
}
})
.map(|res| ("broker main".to_owned(), res));
tasks_handles.push(Box::pin(metrics_handle));
}
/* END_HADRON */
set_build_info_metric(GIT_VERSION, BUILD_TAG);
// TODO: update tokio-stream, convert to real async Stream with
// SignalStream, map it to obtain missing signal name, combine streams into
// single stream we can easily sit on.
let mut sigquit_stream = signal(SignalKind::quit())?;
let mut sigint_stream = signal(SignalKind::interrupt())?;
let mut sigterm_stream = signal(SignalKind::terminate())?;
// Notify systemd that we are ready. This is important as currently loading
// timelines takes significant time (~30s in busy regions).
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/http/mod.rs | safekeeper/src/http/mod.rs | pub mod routes;
use std::sync::Arc;
pub use routes::make_router;
pub use safekeeper_api::models;
use tokio_util::sync::CancellationToken;
use crate::{GlobalTimelines, SafeKeeperConf};
pub async fn task_main_http(
conf: Arc<SafeKeeperConf>,
http_listener: std::net::TcpListener,
global_timelines: Arc<GlobalTimelines>,
) -> anyhow::Result<()> {
let router = make_router(conf, global_timelines)
.build()
.map_err(|err| anyhow::anyhow!(err))?;
let service = Arc::new(
http_utils::RequestServiceBuilder::new(router).map_err(|err| anyhow::anyhow!(err))?,
);
let server = http_utils::server::Server::new(service, http_listener, None)?;
server.serve(CancellationToken::new()).await?;
Ok(()) // unreachable
}
pub async fn task_main_https(
conf: Arc<SafeKeeperConf>,
https_listener: std::net::TcpListener,
tls_config: Arc<rustls::ServerConfig>,
global_timelines: Arc<GlobalTimelines>,
) -> anyhow::Result<()> {
let tls_acceptor = tokio_rustls::TlsAcceptor::from(tls_config);
let router = make_router(conf, global_timelines)
.build()
.map_err(|err| anyhow::anyhow!(err))?;
let service = Arc::new(
http_utils::RequestServiceBuilder::new(router).map_err(|err| anyhow::anyhow!(err))?,
);
let server = http_utils::server::Server::new(service, https_listener, Some(tls_acceptor))?;
server.serve(CancellationToken::new()).await?;
Ok(()) // unreachable
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/src/http/routes.rs | safekeeper/src/http/routes.rs | use std::collections::HashMap;
use std::fmt;
use std::io::Write as _;
use std::str::FromStr;
use std::sync::Arc;
use http_utils::endpoint::{
self, ChannelWriter, auth_middleware, check_permission_with, profile_cpu_handler,
profile_heap_handler, prometheus_metrics_handler, request_span,
};
use http_utils::error::ApiError;
use http_utils::failpoints::failpoints_handler;
use http_utils::json::{json_request, json_response};
use http_utils::request::{ensure_no_body, parse_query_param, parse_request_param};
use http_utils::{RequestExt, RouterBuilder};
use hyper::{Body, Request, Response, StatusCode};
use pem::Pem;
use postgres_ffi::WAL_SEGMENT_SIZE;
use safekeeper_api::models::{
AcceptorStateStatus, PullTimelineRequest, SafekeeperStatus, SkTimelineInfo, TenantDeleteResult,
TermSwitchApiEntry, TimelineCopyRequest, TimelineCreateRequest, TimelineDeleteResult,
TimelineStatus, TimelineTermBumpRequest,
};
use safekeeper_api::{ServerInfo, membership, models};
use storage_broker::proto::{SafekeeperTimelineInfo, TenantTimelineId as ProtoTenantTimelineId};
use tokio::sync::mpsc;
use tokio::task;
use tokio_stream::wrappers::ReceiverStream;
use tokio_util::sync::CancellationToken;
use tracing::{Instrument, info_span};
use utils::auth::SwappableJwtAuth;
use utils::id::{TenantId, TenantTimelineId, TimelineId};
use utils::lsn::Lsn;
use crate::debug_dump::TimelineDigestRequest;
use crate::hadron::{get_filesystem_capacity, get_filesystem_usage};
use crate::safekeeper::TermLsn;
use crate::timelines_global_map::DeleteOrExclude;
use crate::{
GlobalTimelines, SafeKeeperConf, copy_timeline, debug_dump, patch_control_file, pull_timeline,
};
use serde_json::json;
/// Healthcheck handler.
async fn status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let conf = get_conf(&request);
let status = SafekeeperStatus { id: conf.my_id };
json_response(StatusCode::OK, status)
}
fn get_conf(request: &Request<Body>) -> &SafeKeeperConf {
request
.data::<Arc<SafeKeeperConf>>()
.expect("unknown state type")
.as_ref()
}
fn get_global_timelines(request: &Request<Body>) -> Arc<GlobalTimelines> {
request
.data::<Arc<GlobalTimelines>>()
.expect("unknown state type")
.clone()
}
fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
check_permission_with(request, |claims| {
crate::auth::check_permission(claims, tenant_id)
})
}
/// Deactivates all timelines for the tenant and removes its data directory.
/// See `timeline_delete_handler`.
async fn tenant_delete_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id = parse_request_param(&request, "tenant_id")?;
let only_local = parse_query_param(&request, "only_local")?.unwrap_or(false);
check_permission(&request, Some(tenant_id))?;
ensure_no_body(&mut request).await?;
let global_timelines = get_global_timelines(&request);
let action = if only_local {
DeleteOrExclude::DeleteLocal
} else {
DeleteOrExclude::Delete
};
let delete_info = global_timelines
.delete_all_for_tenant(&tenant_id, action)
.await
.map_err(ApiError::InternalServerError)?;
let response_body: TenantDeleteResult = delete_info
.iter()
.map(|(ttid, resp)| (format!("{}", ttid.timeline_id), *resp))
.collect::<HashMap<String, TimelineDeleteResult>>();
json_response(StatusCode::OK, response_body)
}
async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let request_data: TimelineCreateRequest = json_request(&mut request).await?;
let ttid = TenantTimelineId {
tenant_id: request_data.tenant_id,
timeline_id: request_data.timeline_id,
};
check_permission(&request, Some(ttid.tenant_id))?;
let server_info = ServerInfo {
pg_version: request_data.pg_version,
system_id: request_data.system_id.unwrap_or(0),
wal_seg_size: request_data.wal_seg_size.unwrap_or(WAL_SEGMENT_SIZE as u32),
};
let global_timelines = get_global_timelines(&request);
global_timelines
.create(
ttid,
request_data.mconf,
server_info,
request_data.start_lsn,
request_data.commit_lsn.unwrap_or(request_data.start_lsn),
)
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, ())
}
async fn utilization_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let global_timelines = get_global_timelines(&request);
let utilization = global_timelines.get_timeline_counts();
json_response(StatusCode::OK, utilization)
}
/// Returns filesystem capacity and current utilization for the safekeeper data directory.
async fn filesystem_usage_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let conf = get_conf(&request);
let path = conf.workdir.as_std_path();
let capacity = get_filesystem_capacity(path).map_err(ApiError::InternalServerError)?;
let usage = get_filesystem_usage(path);
let resp = json!({
"data_dir": path,
"capacity_bytes": capacity,
"usage_bytes": usage,
});
json_response(StatusCode::OK, resp)
}
/// List all (not deleted) timelines.
/// Note: it is possible to do the same with debug_dump.
async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let global_timelines = get_global_timelines(&request);
let res: Vec<TenantTimelineId> = global_timelines
.get_all()
.iter()
.map(|tli| tli.ttid)
.collect();
json_response(StatusCode::OK, res)
}
impl From<TermSwitchApiEntry> for TermLsn {
fn from(api_val: TermSwitchApiEntry) -> Self {
TermLsn {
term: api_val.term,
lsn: api_val.lsn,
}
}
}
/// Report info about timeline.
async fn timeline_status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
let (inmem, state) = tli.get_state().await;
let flush_lsn = tli.get_flush_lsn().await;
let last_log_term = state.acceptor_state.get_last_log_term(flush_lsn);
let term_history = state
.acceptor_state
.term_history
.0
.into_iter()
.map(|ts| TermSwitchApiEntry {
term: ts.term,
lsn: ts.lsn,
})
.collect();
let acc_state = AcceptorStateStatus {
term: state.acceptor_state.term,
epoch: last_log_term,
term_history,
};
let conf = get_conf(&request);
// Note: we report in memory values which can be lost.
let status = TimelineStatus {
tenant_id: ttid.tenant_id,
timeline_id: ttid.timeline_id,
mconf: state.mconf,
acceptor_state: acc_state,
pg_info: state.server,
flush_lsn,
timeline_start_lsn: state.timeline_start_lsn,
local_start_lsn: state.local_start_lsn,
commit_lsn: inmem.commit_lsn,
backup_lsn: inmem.backup_lsn,
peer_horizon_lsn: inmem.peer_horizon_lsn,
remote_consistent_lsn: inmem.remote_consistent_lsn,
peers: tli.get_peers(conf).await,
walsenders: tli.get_walsenders().get_all_public(),
walreceivers: tli.get_walreceivers().get_all(),
};
json_response(StatusCode::OK, status)
}
/// Deactivates the timeline and removes its data directory.
async fn timeline_delete_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
let only_local = parse_query_param(&request, "only_local")?.unwrap_or(false);
check_permission(&request, Some(ttid.tenant_id))?;
ensure_no_body(&mut request).await?;
let global_timelines = get_global_timelines(&request);
let action = if only_local {
DeleteOrExclude::DeleteLocal
} else {
DeleteOrExclude::Delete
};
let resp = global_timelines
.delete_or_exclude(&ttid, action)
.await
.map_err(ApiError::from)?;
json_response(StatusCode::OK, resp)
}
/// Pull timeline from peer safekeeper instances.
async fn timeline_pull_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let data: PullTimelineRequest = json_request(&mut request).await?;
let conf = get_conf(&request);
let global_timelines = get_global_timelines(&request);
let ca_certs = conf
.ssl_ca_certs
.iter()
.map(Pem::contents)
.map(reqwest::Certificate::from_der)
.collect::<Result<Vec<_>, _>>()
.map_err(|e| {
ApiError::InternalServerError(anyhow::anyhow!("failed to parse CA certs: {e}"))
})?;
let resp = pull_timeline::handle_request(
data,
conf.sk_auth_token.clone(),
ca_certs,
global_timelines,
false,
)
.await?;
json_response(StatusCode::OK, resp)
}
/// Stream tar archive with all timeline data.
async fn timeline_snapshot_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let destination = parse_request_param(&request, "destination_id")?;
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
let storage = global_timelines.get_wal_backup().get_storage();
// To stream the body use wrap_stream which wants Stream of Result<Bytes>,
// so create the chan and write to it in another task.
let (tx, rx) = mpsc::channel(1);
let conf = get_conf(&request);
task::spawn(pull_timeline::stream_snapshot(
tli,
conf.my_id,
destination,
tx,
storage,
));
let rx_stream = ReceiverStream::new(rx);
let body = Body::wrap_stream(rx_stream);
let response = Response::builder()
.status(200)
.header(hyper::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
Ok(response)
}
/// Error type for delete_or_exclude: either generation conflict or something
/// internal.
#[derive(thiserror::Error, Debug)]
pub enum DeleteOrExcludeError {
#[error("refused to switch into excluding mconf {requested}, current: {current}")]
Conflict {
requested: membership::Configuration,
current: membership::Configuration,
},
#[error(transparent)]
Other(#[from] anyhow::Error),
}
/// Convert DeleteOrExcludeError to ApiError.
impl From<DeleteOrExcludeError> for ApiError {
fn from(de: DeleteOrExcludeError) -> ApiError {
match de {
DeleteOrExcludeError::Conflict {
requested: _,
current: _,
} => ApiError::Conflict(de.to_string()),
DeleteOrExcludeError::Other(e) => ApiError::InternalServerError(e),
}
}
}
/// Remove timeline locally after this node has been excluded from the
/// membership configuration. The body is the same as in the membership endpoint
/// -- conf where node is excluded -- and in principle single ep could be used
/// for both actions, but since this is a data deletion op let's keep them
/// separate.
async fn timeline_exclude_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let global_timelines = get_global_timelines(&request);
let data: models::TimelineMembershipSwitchRequest = json_request(&mut request).await?;
let my_id = get_conf(&request).my_id;
// If request doesn't exclude us, membership switch endpoint should be used
// instead.
if data.mconf.contains(my_id) {
return Err(ApiError::Forbidden(format!(
"refused to exclude timeline with {}, node {} is member of it",
data.mconf, my_id
)));
}
let action = DeleteOrExclude::Exclude(data.mconf);
let resp = global_timelines
.delete_or_exclude(&ttid, action)
.await
.map_err(ApiError::from)?;
json_response(StatusCode::OK, resp)
}
/// Consider switching timeline membership configuration to the provided one.
async fn timeline_membership_handler(
mut request: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
let data: models::TimelineMembershipSwitchRequest = json_request(&mut request).await?;
let my_id = get_conf(&request).my_id;
// If request excludes us, exclude endpoint should be used instead.
if !data.mconf.contains(my_id) {
return Err(ApiError::Forbidden(format!(
"refused to switch into {}, node {} is not a member of it",
data.mconf, my_id
)));
}
let req_gen = data.mconf.generation;
let response = tli
.membership_switch(data.mconf)
.await
.map_err(ApiError::InternalServerError)?;
// Return 409 if request was ignored.
if req_gen == response.current_conf.generation {
json_response(StatusCode::OK, response)
} else {
Err(ApiError::Conflict(format!(
"request to switch into {} ignored, current generation {}",
req_gen, response.current_conf.generation
)))
}
}
async fn timeline_copy_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let request_data: TimelineCopyRequest = json_request(&mut request).await?;
let source_ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "source_timeline_id")?,
);
let global_timelines = get_global_timelines(&request);
let wal_backup = global_timelines.get_wal_backup();
let storage = wal_backup
.get_storage()
.ok_or(ApiError::BadRequest(anyhow::anyhow!(
"Remote Storage is not configured"
)))?;
copy_timeline::handle_request(copy_timeline::Request{
source_ttid,
until_lsn: request_data.until_lsn,
destination_ttid: TenantTimelineId::new(source_ttid.tenant_id, request_data.target_timeline_id),
}, global_timelines, storage)
.instrument(info_span!("copy_timeline", from=%source_ttid, to=%request_data.target_timeline_id, until_lsn=%request_data.until_lsn))
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, ())
}
async fn patch_control_file_handler(
mut request: Request<Body>,
) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
let patch_request: patch_control_file::Request = json_request(&mut request).await?;
let response = patch_control_file::handle_request(tli, patch_request)
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, response)
}
/// Force persist control file.
async fn timeline_checkpoint_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid)?;
tli.write_shared_state()
.await
.sk
.state_mut()
.flush()
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, ())
}
async fn timeline_digest_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let global_timelines = get_global_timelines(&request);
let from_lsn: Option<Lsn> = parse_query_param(&request, "from_lsn")?;
let until_lsn: Option<Lsn> = parse_query_param(&request, "until_lsn")?;
let request = TimelineDigestRequest {
from_lsn: from_lsn.ok_or(ApiError::BadRequest(anyhow::anyhow!(
"from_lsn is required"
)))?,
until_lsn: until_lsn.ok_or(ApiError::BadRequest(anyhow::anyhow!(
"until_lsn is required"
)))?,
};
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
let tli = tli
.wal_residence_guard()
.await
.map_err(ApiError::InternalServerError)?;
let response = debug_dump::calculate_digest(&tli, request)
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, response)
}
/// Unevict timeline and remove uploaded partial segment(s) from the remote storage.
/// Successfull response returns list of segments existed before the deletion.
/// Aimed for one-off usage not normally needed.
async fn timeline_backup_partial_reset(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
let response = tli
.backup_partial_reset()
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, response)
}
/// Make term at least as high as one in request. If one in request is None,
/// increment current one.
async fn timeline_term_bump_handler(
mut request: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let request_data: TimelineTermBumpRequest = json_request(&mut request).await?;
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
let response = tli
.term_bump(request_data.term)
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, response)
}
/// Used only in tests to hand craft required data.
async fn record_safekeeper_info(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let ttid = TenantTimelineId::new(
parse_request_param(&request, "tenant_id")?,
parse_request_param(&request, "timeline_id")?,
);
check_permission(&request, Some(ttid.tenant_id))?;
let sk_info: SkTimelineInfo = json_request(&mut request).await?;
let proto_sk_info = SafekeeperTimelineInfo {
safekeeper_id: 0,
tenant_timeline_id: Some(ProtoTenantTimelineId {
tenant_id: ttid.tenant_id.as_ref().to_owned(),
timeline_id: ttid.timeline_id.as_ref().to_owned(),
}),
term: sk_info.term.unwrap_or(0),
last_log_term: sk_info.last_log_term.unwrap_or(0),
flush_lsn: sk_info.flush_lsn.0,
commit_lsn: sk_info.commit_lsn.0,
remote_consistent_lsn: sk_info.remote_consistent_lsn.0,
peer_horizon_lsn: sk_info.peer_horizon_lsn.0,
safekeeper_connstr: sk_info.safekeeper_connstr.unwrap_or_else(|| "".to_owned()),
http_connstr: sk_info.http_connstr.unwrap_or_else(|| "".to_owned()),
https_connstr: sk_info.https_connstr,
backup_lsn: sk_info.backup_lsn.0,
local_start_lsn: sk_info.local_start_lsn.0,
availability_zone: None,
standby_horizon: sk_info.standby_horizon.0,
};
let global_timelines = get_global_timelines(&request);
let tli = global_timelines.get(ttid).map_err(ApiError::from)?;
tli.record_safekeeper_info(proto_sk_info)
.await
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, ())
}
fn parse_kv_str<E: fmt::Display, T: FromStr<Err = E>>(k: &str, v: &str) -> Result<T, ApiError> {
v.parse()
.map_err(|e| ApiError::BadRequest(anyhow::anyhow!("cannot parse {k}: {e}")))
}
/// Dump debug info about all available safekeeper state.
async fn dump_debug_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permission(&request, None)?;
ensure_no_body(&mut request).await?;
let mut dump_all: Option<bool> = None;
let mut dump_control_file: Option<bool> = None;
let mut dump_memory: Option<bool> = None;
let mut dump_disk_content: Option<bool> = None;
let mut dump_term_history: Option<bool> = None;
let mut dump_wal_last_modified: Option<bool> = None;
let mut tenant_id: Option<TenantId> = None;
let mut timeline_id: Option<TimelineId> = None;
let query = request.uri().query().unwrap_or("");
let mut values = url::form_urlencoded::parse(query.as_bytes());
for (k, v) in &mut values {
match k.as_ref() {
"dump_all" => dump_all = Some(parse_kv_str(&k, &v)?),
"dump_control_file" => dump_control_file = Some(parse_kv_str(&k, &v)?),
"dump_memory" => dump_memory = Some(parse_kv_str(&k, &v)?),
"dump_disk_content" => dump_disk_content = Some(parse_kv_str(&k, &v)?),
"dump_term_history" => dump_term_history = Some(parse_kv_str(&k, &v)?),
"dump_wal_last_modified" => dump_wal_last_modified = Some(parse_kv_str(&k, &v)?),
"tenant_id" => tenant_id = Some(parse_kv_str(&k, &v)?),
"timeline_id" => timeline_id = Some(parse_kv_str(&k, &v)?),
_ => Err(ApiError::BadRequest(anyhow::anyhow!(
"Unknown query parameter: {}",
k
)))?,
}
}
let dump_all = dump_all.unwrap_or(false);
let dump_control_file = dump_control_file.unwrap_or(dump_all);
let dump_memory = dump_memory.unwrap_or(dump_all);
let dump_disk_content = dump_disk_content.unwrap_or(dump_all);
let dump_term_history = dump_term_history.unwrap_or(true);
let dump_wal_last_modified = dump_wal_last_modified.unwrap_or(dump_all);
let global_timelines = get_global_timelines(&request);
let args = debug_dump::Args {
dump_all,
dump_control_file,
dump_memory,
dump_disk_content,
dump_term_history,
dump_wal_last_modified,
tenant_id,
timeline_id,
};
let resp = debug_dump::build(args, global_timelines)
.await
.map_err(ApiError::InternalServerError)?;
let started_at = std::time::Instant::now();
let (tx, rx) = mpsc::channel(1);
let body = Body::wrap_stream(ReceiverStream::new(rx));
let mut writer = ChannelWriter::new(128 * 1024, tx);
let response = Response::builder()
.status(200)
.header(hyper::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
let span = info_span!("blocking");
tokio::task::spawn_blocking(move || {
let _span = span.entered();
let res = serde_json::to_writer(&mut writer, &resp)
.map_err(std::io::Error::from)
.and_then(|_| writer.flush());
match res {
Ok(()) => {
tracing::info!(
bytes = writer.flushed_bytes(),
elapsed_ms = started_at.elapsed().as_millis(),
"responded /v1/debug_dump"
);
}
Err(e) => {
tracing::warn!("failed to write out /v1/debug_dump response: {e:#}");
// semantics of this error are quite... unclear. we want to error the stream out to
// abort the response to somehow notify the client that we failed.
//
// though, most likely the reason for failure is that the receiver is already gone.
drop(
writer
.tx
.blocking_send(Err(std::io::ErrorKind::BrokenPipe.into())),
);
}
}
});
Ok(response)
}
/// Safekeeper http router.
pub fn make_router(
conf: Arc<SafeKeeperConf>,
global_timelines: Arc<GlobalTimelines>,
) -> RouterBuilder<hyper::Body, ApiError> {
let mut router = endpoint::make_router();
if conf.http_auth.is_some() {
router = router.middleware(auth_middleware(|request| {
const ALLOWLIST_ROUTES: &[&str] =
&["/v1/status", "/metrics", "/profile/cpu", "/profile/heap"];
if ALLOWLIST_ROUTES.contains(&request.uri().path()) {
None
} else {
// Option<Arc<SwappableJwtAuth>> is always provided as data below, hence unwrap().
request
.data::<Option<Arc<SwappableJwtAuth>>>()
.unwrap()
.as_deref()
}
}))
}
let force_metric_collection_on_scrape = conf.force_metric_collection_on_scrape;
let prometheus_metrics_handler_wrapper =
move |req| prometheus_metrics_handler(req, force_metric_collection_on_scrape);
// NB: on any changes do not forget to update the OpenAPI spec
// located nearby (/safekeeper/src/http/openapi_spec.yaml).
let auth = conf.http_auth.clone();
router
.data(conf)
.data(global_timelines)
.data(auth)
.get("/metrics", move |r| {
request_span(r, prometheus_metrics_handler_wrapper)
})
.get("/profile/cpu", |r| request_span(r, profile_cpu_handler))
.get("/profile/heap", |r| request_span(r, profile_heap_handler))
.get("/v1/status", |r| request_span(r, status_handler))
.put("/v1/failpoints", |r| {
request_span(r, move |r| async {
check_permission(&r, None)?;
let cancel = CancellationToken::new();
failpoints_handler(r, cancel).await
})
})
.get("/v1/utilization", |r| request_span(r, utilization_handler))
/* BEGIN_HADRON */
.get("/v1/debug/filesystem_usage", |r| {
request_span(r, filesystem_usage_handler)
})
/* END_HADRON */
.delete("/v1/tenant/:tenant_id", |r| {
request_span(r, tenant_delete_handler)
})
// Will be used in the future instead of implicit timeline creation
.post("/v1/tenant/timeline", |r| {
request_span(r, timeline_create_handler)
})
.get("/v1/tenant/timeline", |r| {
request_span(r, timeline_list_handler)
})
.get("/v1/tenant/:tenant_id/timeline/:timeline_id", |r| {
request_span(r, timeline_status_handler)
})
.delete("/v1/tenant/:tenant_id/timeline/:timeline_id", |r| {
request_span(r, timeline_delete_handler)
})
.post("/v1/pull_timeline", |r| {
request_span(r, timeline_pull_handler)
})
.put("/v1/tenant/:tenant_id/timeline/:timeline_id/exclude", |r| {
request_span(r, timeline_exclude_handler)
})
.get(
"/v1/tenant/:tenant_id/timeline/:timeline_id/snapshot/:destination_id",
|r| request_span(r, timeline_snapshot_handler),
)
.put(
"/v1/tenant/:tenant_id/timeline/:timeline_id/membership",
|r| request_span(r, timeline_membership_handler),
)
.post(
"/v1/tenant/:tenant_id/timeline/:source_timeline_id/copy",
|r| request_span(r, timeline_copy_handler),
)
.patch(
"/v1/tenant/:tenant_id/timeline/:timeline_id/control_file",
|r| request_span(r, patch_control_file_handler),
)
.post(
"/v1/tenant/:tenant_id/timeline/:timeline_id/checkpoint",
|r| request_span(r, timeline_checkpoint_handler),
)
.get("/v1/tenant/:tenant_id/timeline/:timeline_id/digest", |r| {
request_span(r, timeline_digest_handler)
})
.post(
"/v1/tenant/:tenant_id/timeline/:timeline_id/backup_partial_reset",
|r| request_span(r, timeline_backup_partial_reset),
)
.post(
"/v1/tenant/:tenant_id/timeline/:timeline_id/term_bump",
|r| request_span(r, timeline_term_bump_handler),
)
.post("/v1/record_safekeeper_info/:tenant_id/:timeline_id", |r| {
request_span(r, record_safekeeper_info)
})
.get("/v1/debug_dump", |r| request_span(r, dump_debug_handler))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_term_switch_entry_api_serialize() {
let state = AcceptorStateStatus {
term: 1,
epoch: 1,
term_history: vec![TermSwitchApiEntry {
term: 1,
lsn: Lsn(0x16FFDDDD),
}],
};
let json = serde_json::to_string(&state).unwrap();
assert_eq!(
json,
"{\"term\":1,\"epoch\":1,\"term_history\":[{\"term\":1,\"lsn\":\"0/16FFDDDD\"}]}"
);
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/random_test.rs | safekeeper/tests/random_test.rs | use rand::Rng;
use tracing::{info, warn};
use crate::walproposer_sim::log::{init_logger, init_tracing_logger};
use crate::walproposer_sim::simulation::{TestConfig, generate_network_opts, generate_schedule};
use crate::walproposer_sim::simulation_logs::validate_events;
pub mod walproposer_sim;
// Generates 500 random seeds and runs a schedule for each of them.
// If you see this test fail, please report the last seed to the
// @safekeeper team.
#[test]
fn test_random_schedules() -> anyhow::Result<()> {
let clock = init_logger();
let mut config = TestConfig::new(Some(clock));
for _ in 0..500 {
let seed: u64 = rand::rng().random();
config.network = generate_network_opts(seed);
let test = config.start(seed);
warn!("Running test with seed {}", seed);
let schedule = generate_schedule(seed);
test.run_schedule(&schedule).unwrap();
validate_events(test.world.take_events());
test.world.deallocate();
}
Ok(())
}
// After you found a seed that fails, you can insert this seed here
// and run the test to see the full debug output.
#[test]
fn test_one_schedule() -> anyhow::Result<()> {
let clock = init_tracing_logger(true);
let mut config = TestConfig::new(Some(clock));
let seed = 11047466935058776390;
config.network = generate_network_opts(seed);
info!("network: {:?}", config.network);
let test = config.start(seed);
warn!("Running test with seed {}", seed);
let schedule = generate_schedule(seed);
info!("schedule: {:?}", schedule);
test.run_schedule(&schedule).unwrap();
validate_events(test.world.take_events());
test.world.deallocate();
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/simple_test.rs | safekeeper/tests/simple_test.rs | use tracing::info;
use utils::lsn::Lsn;
use crate::walproposer_sim::log::init_logger;
use crate::walproposer_sim::simulation::TestConfig;
pub mod walproposer_sim;
// Check that first start of sync_safekeepers() returns 0/0 on empty safekeepers.
#[test]
fn sync_empty_safekeepers() {
let clock = init_logger();
let config = TestConfig::new(Some(clock));
let test = config.start(1337);
let lsn = test.sync_safekeepers().unwrap();
assert_eq!(lsn, Lsn(0));
info!("Sucessfully synced empty safekeepers at 0/0");
let lsn = test.sync_safekeepers().unwrap();
assert_eq!(lsn, Lsn(0));
info!("Sucessfully synced (again) empty safekeepers at 0/0");
}
// Check that there are no panics when we are writing and streaming WAL to safekeepers.
#[test]
fn run_walproposer_generate_wal() {
let clock = init_logger();
let config = TestConfig::new(Some(clock));
let test = config.start(1337);
let lsn = test.sync_safekeepers().unwrap();
assert_eq!(lsn, Lsn(0));
info!("Sucessfully synced empty safekeepers at 0/0");
let mut wp = test.launch_walproposer(lsn);
// wait for walproposer to start
test.poll_for_duration(30);
// just write some WAL
for _ in 0..100 {
wp.write_tx(1);
test.poll_for_duration(5);
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/misc_test.rs | safekeeper/tests/misc_test.rs | use std::sync::Arc;
use tracing::{info, warn};
use utils::lsn::Lsn;
use crate::walproposer_sim::log::{init_logger, init_tracing_logger};
use crate::walproposer_sim::simulation::{
Schedule, TestAction, TestConfig, generate_network_opts, generate_schedule,
};
pub mod walproposer_sim;
// Test that simulation supports restarting (crashing) safekeepers.
#[test]
fn crash_safekeeper() {
let clock = init_logger();
let config = TestConfig::new(Some(clock));
let test = config.start(1337);
let lsn = test.sync_safekeepers().unwrap();
assert_eq!(lsn, Lsn(0));
info!("Sucessfully synced empty safekeepers at 0/0");
let mut wp = test.launch_walproposer(lsn);
// Write some WAL and crash safekeeper 0 without waiting for replication.
test.poll_for_duration(30);
wp.write_tx(3);
test.servers[0].restart();
// Wait some time, so that walproposer can reconnect.
test.poll_for_duration(2000);
}
// Test that walproposer can be crashed (stopped).
#[test]
fn test_simple_restart() {
let clock = init_logger();
let config = TestConfig::new(Some(clock));
let test = config.start(1337);
let lsn = test.sync_safekeepers().unwrap();
assert_eq!(lsn, Lsn(0));
info!("Sucessfully synced empty safekeepers at 0/0");
let mut wp = test.launch_walproposer(lsn);
test.poll_for_duration(30);
wp.write_tx(3);
test.poll_for_duration(100);
wp.stop();
drop(wp);
let lsn = test.sync_safekeepers().unwrap();
info!("Sucessfully synced safekeepers at {}", lsn);
}
// Test runnning a simple schedule, restarting everything a several times.
#[test]
fn test_simple_schedule() -> anyhow::Result<()> {
let clock = init_logger();
let mut config = TestConfig::new(Some(clock));
config.network.keepalive_timeout = Some(100);
let test = config.start(1337);
let schedule: Schedule = vec![
(0, TestAction::RestartWalProposer),
(50, TestAction::WriteTx(5)),
(100, TestAction::RestartSafekeeper(0)),
(100, TestAction::WriteTx(5)),
(110, TestAction::RestartSafekeeper(1)),
(110, TestAction::WriteTx(5)),
(120, TestAction::RestartSafekeeper(2)),
(120, TestAction::WriteTx(5)),
(201, TestAction::RestartWalProposer),
(251, TestAction::RestartSafekeeper(0)),
(251, TestAction::RestartSafekeeper(1)),
(251, TestAction::RestartSafekeeper(2)),
(251, TestAction::WriteTx(5)),
(255, TestAction::WriteTx(5)),
(1000, TestAction::WriteTx(5)),
];
test.run_schedule(&schedule)?;
info!("Test finished, stopping all threads");
test.world.deallocate();
Ok(())
}
// Test that simulation can process 10^4 transactions.
#[test]
fn test_many_tx() -> anyhow::Result<()> {
let clock = init_logger();
let config = TestConfig::new(Some(clock));
let test = config.start(1337);
let mut schedule: Schedule = vec![];
for i in 0..100 {
schedule.push((i * 10, TestAction::WriteTx(100)));
}
test.run_schedule(&schedule)?;
info!("Test finished, stopping all threads");
test.world.stop_all();
let events = test.world.take_events();
info!("Events: {:?}", events);
let last_commit_lsn = events
.iter()
.filter_map(|event| {
if event.data.starts_with("commit_lsn;") {
let lsn: u64 = event.data.split(';').nth(1).unwrap().parse().unwrap();
return Some(lsn);
}
None
})
.next_back()
.unwrap();
let initdb_lsn = 21623024;
let diff = last_commit_lsn - initdb_lsn;
info!("Last commit lsn: {}, diff: {}", last_commit_lsn, diff);
// each tx is at least 8 bytes, it's written a 100 times for in a loop for 100 times
assert!(diff > 100 * 100 * 8);
Ok(())
}
// Checks that we don't have nasty circular dependencies, preventing Arc from deallocating.
// This test doesn't really assert anything, you need to run it manually to check if there
// is any issue.
#[test]
fn test_res_dealloc() -> anyhow::Result<()> {
let clock = init_tracing_logger(true);
let mut config = TestConfig::new(Some(clock));
let seed = 123456;
config.network = generate_network_opts(seed);
let test = config.start(seed);
warn!("Running test with seed {}", seed);
let schedule = generate_schedule(seed);
info!("schedule: {:?}", schedule);
test.run_schedule(&schedule).unwrap();
test.world.stop_all();
let world = test.world.clone();
drop(test);
info!("world strong count: {}", Arc::strong_count(&world));
world.deallocate();
info!("world strong count: {}", Arc::strong_count(&world));
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/simulation.rs | safekeeper/tests/walproposer_sim/simulation.rs | use std::cell::Cell;
use std::str::FromStr;
use std::sync::Arc;
use desim::executor::{self, ExternalHandle};
use desim::node_os::NodeOs;
use desim::options::{Delay, NetworkOptions};
use desim::proto::{AnyMessage, NodeEvent};
use desim::world::{Node, World};
use rand::{Rng, SeedableRng};
use tracing::{debug, info_span, warn};
use utils::id::TenantTimelineId;
use utils::lsn::Lsn;
use walproposer::walproposer::{Config, Wrapper};
use super::log::SimClock;
use super::safekeeper_disk::SafekeeperDisk;
use super::walproposer_api;
use super::walproposer_disk::DiskWalProposer;
use crate::walproposer_sim::safekeeper::run_server;
use crate::walproposer_sim::walproposer_api::SimulationApi;
/// Simulated safekeeper node.
pub struct SafekeeperNode {
pub node: Arc<Node>,
pub id: u32,
pub disk: Arc<SafekeeperDisk>,
pub thread: Cell<ExternalHandle>,
}
impl SafekeeperNode {
/// Create and start a safekeeper at the specified Node.
pub fn new(node: Arc<Node>) -> Self {
let disk = Arc::new(SafekeeperDisk::new());
let thread = Cell::new(SafekeeperNode::launch(disk.clone(), node.clone()));
Self {
id: node.id,
node,
disk,
thread,
}
}
fn launch(disk: Arc<SafekeeperDisk>, node: Arc<Node>) -> ExternalHandle {
// start the server thread
node.launch(move |os| {
run_server(os, disk).expect("server should finish without errors");
})
}
/// Restart the safekeeper.
pub fn restart(&self) {
let new_thread = SafekeeperNode::launch(self.disk.clone(), self.node.clone());
let old_thread = self.thread.replace(new_thread);
old_thread.crash_stop();
}
}
/// Simulated walproposer node.
pub struct WalProposer {
thread: ExternalHandle,
node: Arc<Node>,
disk: Arc<DiskWalProposer>,
sync_safekeepers: bool,
}
impl WalProposer {
/// Generic start function for both modes.
fn start(
os: NodeOs,
disk: Arc<DiskWalProposer>,
ttid: TenantTimelineId,
addrs: Vec<String>,
lsn: Option<Lsn>,
) {
let sync_safekeepers = lsn.is_none();
let _enter = if sync_safekeepers {
info_span!("sync", started = executor::now()).entered()
} else {
info_span!("walproposer", started = executor::now()).entered()
};
os.log_event(format!("started;walproposer;{}", sync_safekeepers as i32));
let config = Config {
ttid,
safekeepers_list: addrs,
safekeeper_conninfo_options: String::new(),
safekeeper_reconnect_timeout: 1000,
safekeeper_connection_timeout: 5000,
sync_safekeepers,
};
let args = walproposer_api::Args {
os,
config: config.clone(),
disk,
redo_start_lsn: lsn,
};
let api = SimulationApi::new(args);
let wp = Wrapper::new(Box::new(api), config);
wp.start();
}
/// Start walproposer in a sync_safekeepers mode.
pub fn launch_sync(ttid: TenantTimelineId, addrs: Vec<String>, node: Arc<Node>) -> Self {
debug!("sync_safekeepers started at node {}", node.id);
let disk = DiskWalProposer::new();
let disk_wp = disk.clone();
// start the client thread
let handle = node.launch(move |os| {
WalProposer::start(os, disk_wp, ttid, addrs, None);
});
Self {
thread: handle,
node,
disk,
sync_safekeepers: true,
}
}
/// Start walproposer in a normal mode.
pub fn launch_walproposer(
ttid: TenantTimelineId,
addrs: Vec<String>,
node: Arc<Node>,
lsn: Lsn,
) -> Self {
debug!("walproposer started at node {}", node.id);
let disk = DiskWalProposer::new();
disk.lock().reset_to(lsn);
let disk_wp = disk.clone();
// start the client thread
let handle = node.launch(move |os| {
WalProposer::start(os, disk_wp, ttid, addrs, Some(lsn));
});
Self {
thread: handle,
node,
disk,
sync_safekeepers: false,
}
}
pub fn write_tx(&mut self, cnt: usize) {
let start_lsn = self.disk.lock().flush_rec_ptr();
for _ in 0..cnt {
self.disk
.lock()
.insert_logical_message(c"prefix", b"message");
}
let end_lsn = self.disk.lock().flush_rec_ptr();
// log event
self.node
.log_event(format!("write_wal;{};{};{}", start_lsn.0, end_lsn.0, cnt));
// now we need to set "Latch" in walproposer
self.node
.node_events()
.send(NodeEvent::Internal(AnyMessage::Just32(0)));
}
pub fn stop(&self) {
self.thread.crash_stop();
}
}
/// Holds basic simulation settings, such as network options.
pub struct TestConfig {
pub network: NetworkOptions,
pub timeout: u64,
pub clock: Option<SimClock>,
}
impl TestConfig {
/// Create a new TestConfig with default settings.
pub fn new(clock: Option<SimClock>) -> Self {
Self {
network: NetworkOptions {
keepalive_timeout: Some(2000),
connect_delay: Delay {
min: 1,
max: 5,
fail_prob: 0.0,
},
send_delay: Delay {
min: 1,
max: 5,
fail_prob: 0.0,
},
},
timeout: 1_000 * 10,
clock,
}
}
/// Start a new simulation with the specified seed.
pub fn start(&self, seed: u64) -> Test {
let world = Arc::new(World::new(seed, Arc::new(self.network.clone())));
if let Some(clock) = &self.clock {
clock.set_clock(world.clock());
}
let servers = [
SafekeeperNode::new(world.new_node()),
SafekeeperNode::new(world.new_node()),
SafekeeperNode::new(world.new_node()),
];
let server_ids = [servers[0].id, servers[1].id, servers[2].id];
let safekeepers_addrs = server_ids.map(|id| format!("node:{id}")).to_vec();
let ttid = TenantTimelineId::generate();
Test {
world,
servers,
sk_list: safekeepers_addrs,
ttid,
timeout: self.timeout,
}
}
}
/// Holds simulation state.
pub struct Test {
pub world: Arc<World>,
pub servers: [SafekeeperNode; 3],
pub sk_list: Vec<String>,
pub ttid: TenantTimelineId,
pub timeout: u64,
}
impl Test {
/// Start a sync_safekeepers thread and wait for it to finish.
pub fn sync_safekeepers(&self) -> anyhow::Result<Lsn> {
let wp = self.launch_sync_safekeepers();
// poll until exit or timeout
let time_limit = self.timeout;
while self.world.step() && self.world.now() < time_limit && !wp.thread.is_finished() {}
if !wp.thread.is_finished() {
anyhow::bail!("timeout or idle stuck");
}
let res = wp.thread.result();
if res.0 != 0 {
anyhow::bail!("non-zero exitcode: {:?}", res);
}
let lsn = Lsn::from_str(&res.1)?;
Ok(lsn)
}
/// Spawn a new sync_safekeepers thread.
pub fn launch_sync_safekeepers(&self) -> WalProposer {
WalProposer::launch_sync(self.ttid, self.sk_list.clone(), self.world.new_node())
}
/// Spawn a new walproposer thread.
pub fn launch_walproposer(&self, lsn: Lsn) -> WalProposer {
let lsn = if lsn.0 == 0 {
// usual LSN after basebackup
Lsn(21623024)
} else {
lsn
};
WalProposer::launch_walproposer(self.ttid, self.sk_list.clone(), self.world.new_node(), lsn)
}
/// Execute the simulation for the specified duration.
pub fn poll_for_duration(&self, duration: u64) {
let time_limit = std::cmp::min(self.world.now() + duration, self.timeout);
while self.world.step() && self.world.now() < time_limit {}
}
/// Execute the simulation together with events defined in some schedule.
pub fn run_schedule(&self, schedule: &Schedule) -> anyhow::Result<()> {
// scheduling empty events so that world will stop in those points
{
let clock = self.world.clock();
let now = self.world.now();
for (time, _) in schedule {
if *time < now {
continue;
}
clock.schedule_fake(*time - now);
}
}
let mut wp = self.launch_sync_safekeepers();
let mut skipped_tx = 0;
let mut started_tx = 0;
let mut schedule_ptr = 0;
loop {
if wp.sync_safekeepers && wp.thread.is_finished() {
let res = wp.thread.result();
if res.0 != 0 {
warn!("sync non-zero exitcode: {:?}", res);
debug!("restarting sync_safekeepers");
// restart the sync_safekeepers
wp = self.launch_sync_safekeepers();
continue;
}
let lsn = Lsn::from_str(&res.1)?;
debug!("sync_safekeepers finished at LSN {}", lsn);
wp = self.launch_walproposer(lsn);
debug!("walproposer started at thread {}", wp.thread.id());
}
let now = self.world.now();
while schedule_ptr < schedule.len() && schedule[schedule_ptr].0 <= now {
if now != schedule[schedule_ptr].0 {
warn!("skipped event {:?} at {}", schedule[schedule_ptr], now);
}
let action = &schedule[schedule_ptr].1;
match action {
TestAction::WriteTx(size) => {
if !wp.sync_safekeepers && !wp.thread.is_finished() {
started_tx += *size;
wp.write_tx(*size);
debug!("written {} transactions", size);
} else {
skipped_tx += size;
debug!("skipped {} transactions", size);
}
}
TestAction::RestartSafekeeper(id) => {
debug!("restarting safekeeper {}", id);
self.servers[*id].restart();
}
TestAction::RestartWalProposer => {
debug!("restarting sync_safekeepers");
wp.stop();
wp = self.launch_sync_safekeepers();
}
}
schedule_ptr += 1;
}
if schedule_ptr == schedule.len() {
break;
}
let next_event_time = schedule[schedule_ptr].0;
// poll until the next event
if wp.thread.is_finished() {
while self.world.step() && self.world.now() < next_event_time {}
} else {
while self.world.step()
&& self.world.now() < next_event_time
&& !wp.thread.is_finished()
{}
}
}
debug!(
"finished schedule, total steps: {}",
self.world.get_thread_step_count()
);
debug!("skipped_tx: {}", skipped_tx);
debug!("started_tx: {}", started_tx);
Ok(())
}
}
#[derive(Debug, Clone)]
pub enum TestAction {
WriteTx(usize),
RestartSafekeeper(usize),
RestartWalProposer,
}
pub type Schedule = Vec<(u64, TestAction)>;
pub fn generate_schedule(seed: u64) -> Schedule {
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
let mut schedule = Vec::new();
let mut time = 0;
let cnt = rng.random_range(1..100);
for _ in 0..cnt {
time += rng.random_range(0..500);
let action = match rng.random_range(0..3) {
0 => TestAction::WriteTx(rng.random_range(1..10)),
1 => TestAction::RestartSafekeeper(rng.random_range(0..3)),
2 => TestAction::RestartWalProposer,
_ => unreachable!(),
};
schedule.push((time, action));
}
schedule
}
pub fn generate_network_opts(seed: u64) -> NetworkOptions {
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
let timeout = rng.random_range(100..2000);
let max_delay = rng.random_range(1..2 * timeout);
let min_delay = rng.random_range(1..=max_delay);
let max_fail_prob = rng.random_range(0.0..0.9);
let connect_fail_prob = rng.random_range(0.0..max_fail_prob);
let send_fail_prob = rng.random_range(0.0..connect_fail_prob);
NetworkOptions {
keepalive_timeout: Some(timeout),
connect_delay: Delay {
min: min_delay,
max: max_delay,
fail_prob: connect_fail_prob,
},
send_delay: Delay {
min: min_delay,
max: max_delay,
fail_prob: send_fail_prob,
},
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/walproposer_disk.rs | safekeeper/tests/walproposer_sim/walproposer_disk.rs | use std::ffi::CStr;
use std::sync::Arc;
use parking_lot::{Mutex, MutexGuard};
use postgres_ffi::v16::wal_generator::{LogicalMessageGenerator, WalGenerator};
use utils::lsn::Lsn;
use super::block_storage::BlockStorage;
/// Simulation implementation of walproposer WAL storage.
pub struct DiskWalProposer {
state: Mutex<State>,
}
impl DiskWalProposer {
pub fn new() -> Arc<DiskWalProposer> {
Arc::new(DiskWalProposer {
state: Mutex::new(State {
internal_available_lsn: Lsn(0),
prev_lsn: Lsn(0),
disk: BlockStorage::new(),
wal_generator: WalGenerator::new(LogicalMessageGenerator::new(c"", &[]), Lsn(0)),
}),
})
}
pub fn lock(&self) -> MutexGuard<State> {
self.state.lock()
}
}
pub struct State {
// flush_lsn
internal_available_lsn: Lsn,
// needed for WAL generation
prev_lsn: Lsn,
// actual WAL storage
disk: BlockStorage,
// WAL record generator
wal_generator: WalGenerator<LogicalMessageGenerator>,
}
impl State {
pub fn read(&self, pos: u64, buf: &mut [u8]) {
self.disk.read(pos, buf);
// TODO: fail on reading uninitialized data
}
pub fn write(&mut self, pos: u64, buf: &[u8]) {
self.disk.write(pos, buf);
}
/// Update the internal available LSN to the given value.
pub fn reset_to(&mut self, lsn: Lsn) {
self.internal_available_lsn = lsn;
self.prev_lsn = Lsn(0); // Safekeeper doesn't care if this is omitted
self.wal_generator.lsn = self.internal_available_lsn;
self.wal_generator.prev_lsn = self.prev_lsn;
}
/// Get current LSN.
pub fn flush_rec_ptr(&self) -> Lsn {
self.internal_available_lsn
}
/// Inserts a logical record in the WAL at the current LSN.
pub fn insert_logical_message(&mut self, prefix: &CStr, msg: &[u8]) {
let (_, record) = self.wal_generator.append_logical_message(prefix, msg);
self.disk.write(self.internal_available_lsn.into(), &record);
self.prev_lsn = self.internal_available_lsn;
self.internal_available_lsn += record.len() as u64;
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/safekeeper_disk.rs | safekeeper/tests/walproposer_sim/safekeeper_disk.rs | use std::collections::HashMap;
use std::ops::Deref;
use std::sync::Arc;
use std::time::Instant;
use anyhow::Result;
use bytes::{Buf, BytesMut};
use futures::future::BoxFuture;
use parking_lot::Mutex;
use postgres_ffi::waldecoder::WalStreamDecoder;
use postgres_ffi::{PgMajorVersion, XLogSegNo};
use safekeeper::metrics::WalStorageMetrics;
use safekeeper::state::TimelinePersistentState;
use safekeeper::{control_file, wal_storage};
use tracing::{debug, info};
use utils::id::TenantTimelineId;
use utils::lsn::Lsn;
use super::block_storage::BlockStorage;
/// All safekeeper state that is usually saved to disk.
pub struct SafekeeperDisk {
pub timelines: Mutex<HashMap<TenantTimelineId, Arc<TimelineDisk>>>,
}
impl Default for SafekeeperDisk {
fn default() -> Self {
Self::new()
}
}
impl SafekeeperDisk {
pub fn new() -> Self {
SafekeeperDisk {
timelines: Mutex::new(HashMap::new()),
}
}
pub fn put_state(
&self,
ttid: &TenantTimelineId,
state: TimelinePersistentState,
) -> Arc<TimelineDisk> {
self.timelines
.lock()
.entry(*ttid)
.and_modify(|e| {
let mut mu = e.state.lock();
*mu = state.clone();
})
.or_insert_with(|| {
Arc::new(TimelineDisk {
state: Mutex::new(state),
wal: Mutex::new(BlockStorage::new()),
})
})
.clone()
}
}
/// Control file state and WAL storage.
pub struct TimelineDisk {
pub state: Mutex<TimelinePersistentState>,
pub wal: Mutex<BlockStorage>,
}
/// Implementation of `control_file::Storage` trait.
pub struct DiskStateStorage {
persisted_state: TimelinePersistentState,
disk: Arc<TimelineDisk>,
last_persist_at: Instant,
}
impl DiskStateStorage {
pub fn new(disk: Arc<TimelineDisk>) -> Self {
let guard = disk.state.lock();
let state = guard.clone();
drop(guard);
DiskStateStorage {
persisted_state: state,
disk,
last_persist_at: Instant::now(),
}
}
}
impl control_file::Storage for DiskStateStorage {
/// Persist safekeeper state on disk and update internal state.
async fn persist(&mut self, s: &TimelinePersistentState) -> Result<()> {
self.persisted_state = s.clone();
*self.disk.state.lock() = s.clone();
Ok(())
}
/// Timestamp of last persist.
fn last_persist_at(&self) -> Instant {
// TODO: don't rely on it in tests
self.last_persist_at
}
}
impl Deref for DiskStateStorage {
type Target = TimelinePersistentState;
fn deref(&self) -> &Self::Target {
&self.persisted_state
}
}
/// Implementation of `wal_storage::Storage` trait.
pub struct DiskWALStorage {
/// Written to disk, but possibly still in the cache and not fully persisted.
/// Also can be ahead of record_lsn, if happen to be in the middle of a WAL record.
write_lsn: Lsn,
/// The LSN of the last WAL record written to disk. Still can be not fully flushed.
write_record_lsn: Lsn,
/// The LSN of the last WAL record flushed to disk.
flush_record_lsn: Lsn,
/// Decoder is required for detecting boundaries of WAL records.
decoder: WalStreamDecoder,
/// Bytes of WAL records that are not yet written to disk.
unflushed_bytes: BytesMut,
/// Contains BlockStorage for WAL.
disk: Arc<TimelineDisk>,
}
impl DiskWALStorage {
pub fn new(disk: Arc<TimelineDisk>, state: &TimelinePersistentState) -> Result<Self> {
let write_lsn = if state.commit_lsn == Lsn(0) {
Lsn(0)
} else {
Self::find_end_of_wal(disk.clone(), state.commit_lsn)?
};
let flush_lsn = write_lsn;
Ok(DiskWALStorage {
write_lsn,
write_record_lsn: flush_lsn,
flush_record_lsn: flush_lsn,
decoder: WalStreamDecoder::new(flush_lsn, PgMajorVersion::PG16),
unflushed_bytes: BytesMut::new(),
disk,
})
}
fn find_end_of_wal(disk: Arc<TimelineDisk>, start_lsn: Lsn) -> Result<Lsn> {
let mut buf = [0; 8192];
let mut pos = start_lsn.0;
let mut decoder = WalStreamDecoder::new(start_lsn, PgMajorVersion::PG16);
let mut result = start_lsn;
loop {
disk.wal.lock().read(pos, &mut buf);
pos += buf.len() as u64;
decoder.feed_bytes(&buf);
loop {
match decoder.poll_decode() {
Ok(Some(record)) => result = record.0,
Err(e) => {
debug!(
"find_end_of_wal reached end at {:?}, decode error: {:?}",
result, e
);
return Ok(result);
}
Ok(None) => break, // need more data
}
}
}
}
}
impl wal_storage::Storage for DiskWALStorage {
// Last written LSN.
fn write_lsn(&self) -> Lsn {
self.write_lsn
}
/// LSN of last durably stored WAL record.
fn flush_lsn(&self) -> Lsn {
self.flush_record_lsn
}
async fn initialize_first_segment(&mut self, _init_lsn: Lsn) -> Result<()> {
Ok(())
}
/// Write piece of WAL from buf to disk, but not necessarily sync it.
async fn write_wal(&mut self, startpos: Lsn, buf: &[u8]) -> Result<()> {
if self.write_lsn != startpos {
panic!("write_wal called with wrong startpos");
}
self.unflushed_bytes.extend_from_slice(buf);
self.write_lsn += buf.len() as u64;
if self.decoder.available() != startpos {
info!(
"restart decoder from {} to {}",
self.decoder.available(),
startpos,
);
self.decoder = WalStreamDecoder::new(startpos, PgMajorVersion::PG16);
}
self.decoder.feed_bytes(buf);
loop {
match self.decoder.poll_decode()? {
None => break, // no full record yet
Some((lsn, _rec)) => {
self.write_record_lsn = lsn;
}
}
}
Ok(())
}
/// Truncate WAL at specified LSN, which must be the end of WAL record.
async fn truncate_wal(&mut self, end_pos: Lsn) -> Result<()> {
if self.write_lsn != Lsn(0) && end_pos > self.write_lsn {
panic!(
"truncate_wal called on non-written WAL, write_lsn={}, end_pos={}",
self.write_lsn, end_pos
);
}
self.flush_wal().await?;
// write zeroes to disk from end_pos until self.write_lsn
let buf = [0; 8192];
let mut pos = end_pos.0;
while pos < self.write_lsn.0 {
self.disk.wal.lock().write(pos, &buf);
pos += buf.len() as u64;
}
self.write_lsn = end_pos;
self.write_record_lsn = end_pos;
self.flush_record_lsn = end_pos;
self.unflushed_bytes.clear();
self.decoder = WalStreamDecoder::new(end_pos, PgMajorVersion::PG16);
Ok(())
}
/// Durably store WAL on disk, up to the last written WAL record.
async fn flush_wal(&mut self) -> Result<()> {
if self.flush_record_lsn == self.write_record_lsn {
// no need to do extra flush
return Ok(());
}
let num_bytes = self.write_record_lsn.0 - self.flush_record_lsn.0;
self.disk.wal.lock().write(
self.flush_record_lsn.0,
&self.unflushed_bytes[..num_bytes as usize],
);
self.unflushed_bytes.advance(num_bytes as usize);
self.flush_record_lsn = self.write_record_lsn;
Ok(())
}
/// Remove all segments <= given segno. Returns function doing that as we
/// want to perform it without timeline lock.
fn remove_up_to(&self, _segno_up_to: XLogSegNo) -> BoxFuture<'static, anyhow::Result<()>> {
Box::pin(async move { Ok(()) })
}
/// Release resources associated with the storage -- technically, close FDs.
/// Currently we don't remove timelines until restart (#3146), so need to
/// spare descriptors. This would be useful for temporary tli detach as
/// well.
fn close(&mut self) {}
/// Get metrics for this timeline.
fn get_metrics(&self) -> WalStorageMetrics {
WalStorageMetrics::default()
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/log.rs | safekeeper/tests/walproposer_sim/log.rs | use std::fmt;
use std::sync::Arc;
use desim::time::Timing;
use once_cell::sync::OnceCell;
use parking_lot::Mutex;
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::time::FormatTime;
/// SimClock can be plugged into tracing logger to print simulation time.
#[derive(Clone)]
pub struct SimClock {
clock_ptr: Arc<Mutex<Option<Arc<Timing>>>>,
}
impl Default for SimClock {
fn default() -> Self {
SimClock {
clock_ptr: Arc::new(Mutex::new(None)),
}
}
}
impl SimClock {
pub fn set_clock(&self, clock: Arc<Timing>) {
*self.clock_ptr.lock() = Some(clock);
}
}
impl FormatTime for SimClock {
fn format_time(&self, w: &mut Writer<'_>) -> fmt::Result {
let clock = self.clock_ptr.lock();
if let Some(clock) = clock.as_ref() {
let now = clock.now();
write!(w, "[{now}]")
} else {
write!(w, "[?]")
}
}
}
static LOGGING_DONE: OnceCell<SimClock> = OnceCell::new();
/// Returns ptr to clocks attached to tracing logger to update them when the
/// world is (re)created.
pub fn init_tracing_logger(debug_enabled: bool) -> SimClock {
LOGGING_DONE
.get_or_init(|| {
let clock = SimClock::default();
let base_logger = tracing_subscriber::fmt()
.with_target(false)
// prefix log lines with simulated time timestamp
.with_timer(clock.clone())
// .with_ansi(true) TODO
.with_max_level(match debug_enabled {
true => tracing::Level::DEBUG,
false => tracing::Level::WARN,
})
.with_writer(std::io::stdout);
base_logger.init();
// logging::replace_panic_hook_with_tracing_panic_hook().forget();
if !debug_enabled {
std::panic::set_hook(Box::new(|_| {}));
}
clock
})
.clone()
}
pub fn init_logger() -> SimClock {
// RUST_TRACEBACK envvar controls whether we print all logs or only warnings.
let debug_enabled = std::env::var("RUST_TRACEBACK").is_ok();
init_tracing_logger(debug_enabled)
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/safekeeper.rs | safekeeper/tests/walproposer_sim/safekeeper.rs | //! Safekeeper communication endpoint to WAL proposer (compute node).
//! Gets messages from the network, passes them down to consensus module and
//! sends replies back.
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use anyhow::{Result, bail};
use bytes::{Bytes, BytesMut};
use camino::Utf8PathBuf;
use desim::executor::{self, PollSome};
use desim::network::TCP;
use desim::node_os::NodeOs;
use desim::proto::{AnyMessage, NetEvent, NodeEvent};
use http::Uri;
use safekeeper::SafeKeeperConf;
use safekeeper::safekeeper::{
ProposerAcceptorMessage, SK_PROTO_VERSION_3, SafeKeeper, UNKNOWN_SERVER_VERSION,
};
use safekeeper::state::{TimelinePersistentState, TimelineState};
use safekeeper::timeline::TimelineError;
use safekeeper::wal_storage::Storage;
use safekeeper_api::ServerInfo;
use safekeeper_api::membership::Configuration;
use tracing::{debug, info_span, warn};
use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId};
use utils::lsn::Lsn;
use super::safekeeper_disk::{DiskStateStorage, DiskWALStorage, SafekeeperDisk, TimelineDisk};
struct SharedState {
sk: SafeKeeper<DiskStateStorage, DiskWALStorage>,
disk: Arc<TimelineDisk>,
}
struct GlobalMap {
timelines: HashMap<TenantTimelineId, SharedState>,
conf: SafeKeeperConf,
disk: Arc<SafekeeperDisk>,
}
impl GlobalMap {
/// Restores global state from disk.
fn new(disk: Arc<SafekeeperDisk>, conf: SafeKeeperConf) -> Result<Self> {
let mut timelines = HashMap::new();
for (&ttid, disk) in disk.timelines.lock().iter() {
debug!("loading timeline {}", ttid);
let state = disk.state.lock().clone();
if state.server.wal_seg_size == 0 {
bail!(TimelineError::UninitializedWalSegSize(ttid));
}
if state.server.pg_version == UNKNOWN_SERVER_VERSION {
bail!(TimelineError::UninitialinzedPgVersion(ttid));
}
if state.commit_lsn < state.local_start_lsn {
bail!(
"commit_lsn {} is smaller than local_start_lsn {}",
state.commit_lsn,
state.local_start_lsn
);
}
let control_store = DiskStateStorage::new(disk.clone());
let wal_store = DiskWALStorage::new(disk.clone(), &control_store)?;
let sk = SafeKeeper::new(TimelineState::new(control_store), wal_store, conf.my_id)?;
timelines.insert(
ttid,
SharedState {
sk,
disk: disk.clone(),
},
);
}
Ok(Self {
timelines,
conf,
disk,
})
}
fn create(&mut self, ttid: TenantTimelineId, server_info: ServerInfo) -> Result<()> {
if self.timelines.contains_key(&ttid) {
bail!("timeline {} already exists", ttid);
}
debug!("creating new timeline {}", ttid);
let commit_lsn = Lsn::INVALID;
let local_start_lsn = Lsn::INVALID;
let state = TimelinePersistentState::new(
&ttid,
Configuration::empty(),
server_info,
commit_lsn,
local_start_lsn,
)?;
let disk_timeline = self.disk.put_state(&ttid, state);
let control_store = DiskStateStorage::new(disk_timeline.clone());
let wal_store = DiskWALStorage::new(disk_timeline.clone(), &control_store)?;
let sk = SafeKeeper::new(
TimelineState::new(control_store),
wal_store,
self.conf.my_id,
)?;
self.timelines.insert(
ttid,
SharedState {
sk,
disk: disk_timeline,
},
);
Ok(())
}
fn get(&mut self, ttid: &TenantTimelineId) -> &mut SharedState {
self.timelines.get_mut(ttid).expect("timeline must exist")
}
fn has_tli(&self, ttid: &TenantTimelineId) -> bool {
self.timelines.contains_key(ttid)
}
}
/// State of a single connection to walproposer.
struct ConnState {
tcp: TCP,
greeting: bool,
ttid: TenantTimelineId,
flush_pending: bool,
runtime: tokio::runtime::Runtime,
}
pub fn run_server(os: NodeOs, disk: Arc<SafekeeperDisk>) -> Result<()> {
let _enter = info_span!("safekeeper", id = os.id()).entered();
debug!("started server");
os.log_event("started;safekeeper".to_owned());
let conf = SafeKeeperConf {
workdir: Utf8PathBuf::from("."),
my_id: NodeId(os.id() as u64),
listen_pg_addr: String::new(),
listen_http_addr: String::new(),
listen_https_addr: None,
no_sync: false,
broker_endpoint: "/".parse::<Uri>().unwrap(),
broker_keepalive_interval: Duration::from_secs(0),
heartbeat_timeout: Duration::from_secs(0),
remote_storage: None,
max_offloader_lag_bytes: 0,
/* BEGIN_HADRON */
max_reelect_offloader_lag_bytes: 0,
max_timeline_disk_usage_bytes: 0,
/* END_HADRON */
wal_backup_enabled: false,
listen_pg_addr_tenant_only: None,
advertise_pg_addr: None,
availability_zone: None,
peer_recovery_enabled: false,
backup_parallel_jobs: 0,
pg_auth: None,
pg_tenant_only_auth: None,
http_auth: None,
sk_auth_token: None,
current_thread_runtime: false,
walsenders_keep_horizon: false,
partial_backup_timeout: Duration::from_secs(0),
disable_periodic_broker_push: false,
enable_offload: false,
delete_offloaded_wal: false,
control_file_save_interval: Duration::from_secs(1),
partial_backup_concurrency: 1,
eviction_min_resident: Duration::ZERO,
wal_reader_fanout: false,
max_delta_for_fanout: None,
ssl_key_file: Utf8PathBuf::from(""),
ssl_cert_file: Utf8PathBuf::from(""),
ssl_cert_reload_period: Duration::ZERO,
ssl_ca_certs: Vec::new(),
use_https_safekeeper_api: false,
enable_tls_wal_service_api: false,
force_metric_collection_on_scrape: true,
/* BEGIN_HADRON */
enable_pull_timeline_on_startup: false,
advertise_pg_addr_tenant_only: None,
hcc_base_url: None,
global_disk_check_interval: Duration::from_secs(10),
max_global_disk_usage_ratio: 0.0,
/* END_HADRON */
};
let mut global = GlobalMap::new(disk, conf.clone())?;
let mut conns: HashMap<usize, ConnState> = HashMap::new();
for (&_ttid, shared_state) in global.timelines.iter_mut() {
let flush_lsn = shared_state.sk.wal_store.flush_lsn();
let commit_lsn = shared_state.sk.state.commit_lsn;
os.log_event(format!("tli_loaded;{};{}", flush_lsn.0, commit_lsn.0));
}
let node_events = os.node_events();
let mut epoll_vec: Vec<Box<dyn PollSome>> = vec![];
let mut epoll_idx: Vec<usize> = vec![];
// TODO: batch events processing (multiple events per tick)
loop {
epoll_vec.clear();
epoll_idx.clear();
// node events channel
epoll_vec.push(Box::new(node_events.clone()));
epoll_idx.push(0);
// tcp connections
for conn in conns.values() {
epoll_vec.push(Box::new(conn.tcp.recv_chan()));
epoll_idx.push(conn.tcp.connection_id());
}
// waiting for the next message
let index = executor::epoll_chans(&epoll_vec, -1).unwrap();
if index == 0 {
// got a new connection
match node_events.must_recv() {
NodeEvent::Accept(tcp) => {
conns.insert(
tcp.connection_id(),
ConnState {
tcp,
greeting: false,
ttid: TenantTimelineId::empty(),
flush_pending: false,
runtime: tokio::runtime::Builder::new_current_thread().build()?,
},
);
}
NodeEvent::Internal(_) => unreachable!(),
}
continue;
}
let connection_id = epoll_idx[index];
let conn = conns.get_mut(&connection_id).unwrap();
let mut next_event = Some(conn.tcp.recv_chan().must_recv());
loop {
let event = match next_event {
Some(event) => event,
None => break,
};
match event {
NetEvent::Message(msg) => {
let res = conn.process_any(msg, &mut global);
if res.is_err() {
let e = res.unwrap_err();
let estr = e.to_string();
if !estr.contains("finished processing START_REPLICATION") {
warn!("conn {:?} error: {:?}", connection_id, e);
panic!("unexpected error at safekeeper: {e:#}");
}
conns.remove(&connection_id);
break;
}
}
NetEvent::Closed => {
// TODO: remove from conns?
}
}
next_event = conn.tcp.recv_chan().try_recv();
}
conns.retain(|_, conn| {
let res = conn.flush(&mut global);
if res.is_err() {
debug!("conn {:?} error: {:?}", conn.tcp, res);
}
res.is_ok()
});
}
}
impl ConnState {
/// Process a message from the network. It can be START_REPLICATION request or a valid ProposerAcceptorMessage message.
fn process_any(&mut self, any: AnyMessage, global: &mut GlobalMap) -> Result<()> {
if let AnyMessage::Bytes(copy_data) = any {
let repl_prefix = b"START_REPLICATION ";
if !self.greeting && copy_data.starts_with(repl_prefix) {
self.process_start_replication(copy_data.slice(repl_prefix.len()..), global)?;
bail!("finished processing START_REPLICATION")
}
let msg = ProposerAcceptorMessage::parse(copy_data, SK_PROTO_VERSION_3)?;
debug!("got msg: {:?}", msg);
self.process(msg, global)
} else {
bail!("unexpected message, expected AnyMessage::Bytes");
}
}
/// Process START_REPLICATION request.
fn process_start_replication(
&mut self,
copy_data: Bytes,
global: &mut GlobalMap,
) -> Result<()> {
// format is "<tenant_id> <timeline_id> <start_lsn> <end_lsn>"
let str = String::from_utf8(copy_data.to_vec())?;
let mut parts = str.split(' ');
let tenant_id = parts.next().unwrap().parse::<TenantId>()?;
let timeline_id = parts.next().unwrap().parse::<TimelineId>()?;
let start_lsn = parts.next().unwrap().parse::<u64>()?;
let end_lsn = parts.next().unwrap().parse::<u64>()?;
let ttid = TenantTimelineId::new(tenant_id, timeline_id);
let shared_state = global.get(&ttid);
// read bytes from start_lsn to end_lsn
let mut buf = vec![0; (end_lsn - start_lsn) as usize];
shared_state.disk.wal.lock().read(start_lsn, &mut buf);
// send bytes to the client
self.tcp.send(AnyMessage::Bytes(Bytes::from(buf)));
Ok(())
}
/// Get or create a timeline.
fn init_timeline(
&mut self,
ttid: TenantTimelineId,
server_info: ServerInfo,
global: &mut GlobalMap,
) -> Result<()> {
self.ttid = ttid;
if global.has_tli(&ttid) {
return Ok(());
}
global.create(ttid, server_info)
}
/// Process a ProposerAcceptorMessage.
fn process(&mut self, msg: ProposerAcceptorMessage, global: &mut GlobalMap) -> Result<()> {
if !self.greeting {
self.greeting = true;
match msg {
ProposerAcceptorMessage::Greeting(ref greeting) => {
tracing::info!(
"start handshake with walproposer {:?} {:?}",
self.tcp,
greeting
);
let server_info = ServerInfo {
pg_version: greeting.pg_version,
system_id: greeting.system_id,
wal_seg_size: greeting.wal_seg_size,
};
let ttid = TenantTimelineId::new(greeting.tenant_id, greeting.timeline_id);
self.init_timeline(ttid, server_info, global)?
}
_ => {
bail!("unexpected message {msg:?} instead of greeting");
}
}
}
let tli = global.get(&self.ttid);
match msg {
ProposerAcceptorMessage::AppendRequest(append_request) => {
self.flush_pending = true;
self.process_sk_msg(
tli,
&ProposerAcceptorMessage::NoFlushAppendRequest(append_request),
)?;
}
other => {
self.process_sk_msg(tli, &other)?;
}
}
Ok(())
}
/// Process FlushWAL if needed.
fn flush(&mut self, global: &mut GlobalMap) -> Result<()> {
// TODO: try to add extra flushes in simulation, to verify that extra flushes don't break anything
if !self.flush_pending {
return Ok(());
}
self.flush_pending = false;
let shared_state = global.get(&self.ttid);
self.process_sk_msg(shared_state, &ProposerAcceptorMessage::FlushWAL)
}
/// Make safekeeper process a message and send a reply to the TCP
fn process_sk_msg(
&mut self,
shared_state: &mut SharedState,
msg: &ProposerAcceptorMessage,
) -> Result<()> {
let mut reply = self.runtime.block_on(shared_state.sk.process_msg(msg))?;
if let Some(reply) = &mut reply {
// TODO: if this is AppendResponse, fill in proper hot standby feedback and disk consistent lsn
let mut buf = BytesMut::with_capacity(128);
reply.serialize(&mut buf, SK_PROTO_VERSION_3)?;
self.tcp.send(AnyMessage::Bytes(buf.into()));
}
Ok(())
}
}
impl Drop for ConnState {
fn drop(&mut self) {
debug!("dropping conn: {:?}", self.tcp);
if !std::thread::panicking() {
self.tcp.close();
}
// TODO: clean up non-fsynced WAL
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/walproposer_api.rs | safekeeper/tests/walproposer_sim/walproposer_api.rs | use std::cell::{RefCell, RefMut, UnsafeCell};
use std::ffi::CStr;
use std::sync::Arc;
use bytes::Bytes;
use desim::executor::{self, PollSome};
use desim::network::TCP;
use desim::node_os::NodeOs;
use desim::proto::{AnyMessage, NetEvent, NodeEvent};
use desim::world::NodeId;
use tracing::debug;
use utils::lsn::Lsn;
use walproposer::api_bindings::Level;
use walproposer::bindings::{
NeonWALReadResult, SafekeeperStateDesiredEvents, WL_SOCKET_READABLE, WL_SOCKET_WRITEABLE,
};
use walproposer::walproposer::{ApiImpl, Config};
use super::walproposer_disk::DiskWalProposer;
/// Special state for each wp->sk connection.
struct SafekeeperConn {
host: String,
port: String,
node_id: NodeId,
// socket is Some(..) equals to connection is established
socket: Option<TCP>,
// connection is in progress
is_connecting: bool,
// START_WAL_PUSH is in progress
is_start_wal_push: bool,
// pointer to Safekeeper in walproposer for callbacks
raw_ptr: *mut walproposer::bindings::Safekeeper,
}
impl SafekeeperConn {
pub fn new(host: String, port: String) -> Self {
// port number is the same as NodeId
let port_num = port.parse::<u32>().unwrap();
Self {
host,
port,
node_id: port_num,
socket: None,
is_connecting: false,
is_start_wal_push: false,
raw_ptr: std::ptr::null_mut(),
}
}
}
/// Simulation version of a postgres WaitEventSet. At pos 0 there is always
/// a special NodeEvents channel, which is used as a latch.
struct EventSet {
os: NodeOs,
// all pollable channels, 0 is always NodeEvent channel
chans: Vec<Box<dyn PollSome>>,
// 0 is always nullptr
sk_ptrs: Vec<*mut walproposer::bindings::Safekeeper>,
// event mask for each channel
masks: Vec<u32>,
}
impl EventSet {
pub fn new(os: NodeOs) -> Self {
let node_events = os.node_events();
Self {
os,
chans: vec![Box::new(node_events)],
sk_ptrs: vec![std::ptr::null_mut()],
masks: vec![WL_SOCKET_READABLE],
}
}
/// Leaves all readable channels at the beginning of the array.
fn sort_readable(&mut self) -> usize {
let mut cnt = 1;
for i in 1..self.chans.len() {
if self.masks[i] & WL_SOCKET_READABLE != 0 {
self.chans.swap(i, cnt);
self.sk_ptrs.swap(i, cnt);
self.masks.swap(i, cnt);
cnt += 1;
}
}
cnt
}
fn update_event_set(&mut self, conn: &SafekeeperConn, event_mask: u32) {
let index = self
.sk_ptrs
.iter()
.position(|&ptr| ptr == conn.raw_ptr)
.expect("safekeeper should exist in event set");
self.masks[index] = event_mask;
}
fn add_safekeeper(&mut self, sk: &SafekeeperConn, event_mask: u32) {
for ptr in self.sk_ptrs.iter() {
assert!(*ptr != sk.raw_ptr);
}
self.chans.push(Box::new(
sk.socket
.as_ref()
.expect("socket should not be closed")
.recv_chan(),
));
self.sk_ptrs.push(sk.raw_ptr);
self.masks.push(event_mask);
}
fn remove_safekeeper(&mut self, sk: &SafekeeperConn) {
let index = self.sk_ptrs.iter().position(|&ptr| ptr == sk.raw_ptr);
if index.is_none() {
debug!("remove_safekeeper: sk={:?} not found", sk.raw_ptr);
return;
}
let index = index.unwrap();
self.chans.remove(index);
self.sk_ptrs.remove(index);
self.masks.remove(index);
// to simulate the actual behaviour
self.refresh_event_set();
}
/// Updates all masks to match the result of a SafekeeperStateDesiredEvents.
fn refresh_event_set(&mut self) {
for (i, mask) in self.masks.iter_mut().enumerate() {
if i == 0 {
continue;
}
let mut mask_sk: u32 = 0;
let mut mask_nwr: u32 = 0;
unsafe { SafekeeperStateDesiredEvents(self.sk_ptrs[i], &mut mask_sk, &mut mask_nwr) };
if mask_sk != *mask {
debug!(
"refresh_event_set: sk={:?}, old_mask={:#b}, new_mask={:#b}",
self.sk_ptrs[i], *mask, mask_sk
);
*mask = mask_sk;
}
}
}
/// Wait for events on all channels.
fn wait(&mut self, timeout_millis: i64) -> walproposer::walproposer::WaitResult {
// all channels are always writeable
for (i, mask) in self.masks.iter().enumerate() {
if *mask & WL_SOCKET_WRITEABLE != 0 {
return walproposer::walproposer::WaitResult::Network(
self.sk_ptrs[i],
WL_SOCKET_WRITEABLE,
);
}
}
let cnt = self.sort_readable();
let slice = &self.chans[0..cnt];
match executor::epoll_chans(slice, timeout_millis) {
None => walproposer::walproposer::WaitResult::Timeout,
Some(0) => {
let msg = self.os.node_events().must_recv();
match msg {
NodeEvent::Internal(AnyMessage::Just32(0)) => {
// got a notification about new WAL available
}
NodeEvent::Internal(_) => unreachable!(),
NodeEvent::Accept(_) => unreachable!(),
}
walproposer::walproposer::WaitResult::Latch
}
Some(index) => walproposer::walproposer::WaitResult::Network(
self.sk_ptrs[index],
WL_SOCKET_READABLE,
),
}
}
}
/// This struct handles all calls from walproposer into walproposer_api.
pub struct SimulationApi {
os: NodeOs,
safekeepers: RefCell<Vec<SafekeeperConn>>,
disk: Arc<DiskWalProposer>,
redo_start_lsn: Option<Lsn>,
last_logged_commit_lsn: u64,
shmem: UnsafeCell<walproposer::bindings::WalproposerShmemState>,
config: Config,
event_set: RefCell<Option<EventSet>>,
}
pub struct Args {
pub os: NodeOs,
pub config: Config,
pub disk: Arc<DiskWalProposer>,
pub redo_start_lsn: Option<Lsn>,
}
impl SimulationApi {
pub fn new(args: Args) -> Self {
// initialize connection state for each safekeeper
let sk_conns = args
.config
.safekeepers_list
.iter()
.map(|s| {
SafekeeperConn::new(
s.split(':').next().unwrap().to_string(),
s.split(':').nth(1).unwrap().to_string(),
)
})
.collect::<Vec<_>>();
Self {
os: args.os,
safekeepers: RefCell::new(sk_conns),
disk: args.disk,
redo_start_lsn: args.redo_start_lsn,
last_logged_commit_lsn: 0,
shmem: UnsafeCell::new(walproposer::api_bindings::empty_shmem()),
config: args.config,
event_set: RefCell::new(None),
}
}
/// Get SafekeeperConn for the given Safekeeper.
fn get_conn(&self, sk: &mut walproposer::bindings::Safekeeper) -> RefMut<'_, SafekeeperConn> {
let sk_port = unsafe { CStr::from_ptr(sk.port).to_str().unwrap() };
let state = self.safekeepers.borrow_mut();
RefMut::map(state, |v| {
v.iter_mut()
.find(|conn| conn.port == sk_port)
.expect("safekeeper conn not found by port")
})
}
}
impl ApiImpl for SimulationApi {
fn get_current_timestamp(&self) -> i64 {
debug!("get_current_timestamp");
// PG TimestampTZ is microseconds, but simulation unit is assumed to be
// milliseconds, so add 10^3
self.os.now() as i64 * 1000
}
fn update_donor(&self, donor: &mut walproposer::bindings::Safekeeper, donor_lsn: u64) {
let mut shmem = unsafe { *self.get_shmem_state() };
shmem.propEpochStartLsn.value = donor_lsn;
shmem.donor_conninfo = donor.conninfo;
}
fn conn_status(
&self,
_: &mut walproposer::bindings::Safekeeper,
) -> walproposer::bindings::WalProposerConnStatusType {
debug!("conn_status");
// break the connection with a 10% chance
if self.os.random(100) < 10 {
walproposer::bindings::WalProposerConnStatusType_WP_CONNECTION_BAD
} else {
walproposer::bindings::WalProposerConnStatusType_WP_CONNECTION_OK
}
}
fn conn_connect_start(&self, sk: &mut walproposer::bindings::Safekeeper) {
debug!("conn_connect_start");
let mut conn = self.get_conn(sk);
assert!(conn.socket.is_none());
let socket = self.os.open_tcp(conn.node_id);
conn.socket = Some(socket);
conn.raw_ptr = sk;
conn.is_connecting = true;
}
fn conn_connect_poll(
&self,
_: &mut walproposer::bindings::Safekeeper,
) -> walproposer::bindings::WalProposerConnectPollStatusType {
debug!("conn_connect_poll");
// TODO: break the connection here
walproposer::bindings::WalProposerConnectPollStatusType_WP_CONN_POLLING_OK
}
fn conn_send_query(&self, sk: &mut walproposer::bindings::Safekeeper, query: &str) -> bool {
debug!("conn_send_query: {}", query);
self.get_conn(sk).is_start_wal_push = true;
true
}
fn conn_get_query_result(
&self,
_: &mut walproposer::bindings::Safekeeper,
) -> walproposer::bindings::WalProposerExecStatusType {
debug!("conn_get_query_result");
// TODO: break the connection here
walproposer::bindings::WalProposerExecStatusType_WP_EXEC_SUCCESS_COPYBOTH
}
fn conn_async_read(
&self,
sk: &mut walproposer::bindings::Safekeeper,
vec: &mut Vec<u8>,
) -> walproposer::bindings::PGAsyncReadResult {
debug!("conn_async_read");
let mut conn = self.get_conn(sk);
let socket = if let Some(socket) = conn.socket.as_mut() {
socket
} else {
// socket is already closed
return walproposer::bindings::PGAsyncReadResult_PG_ASYNC_READ_FAIL;
};
let msg = socket.recv_chan().try_recv();
match msg {
None => {
// no message is ready
walproposer::bindings::PGAsyncReadResult_PG_ASYNC_READ_TRY_AGAIN
}
Some(NetEvent::Closed) => {
// connection is closed
debug!("conn_async_read: connection is closed");
conn.socket = None;
walproposer::bindings::PGAsyncReadResult_PG_ASYNC_READ_FAIL
}
Some(NetEvent::Message(msg)) => {
// got a message
let b = match msg {
desim::proto::AnyMessage::Bytes(b) => b,
_ => unreachable!(),
};
vec.extend_from_slice(&b);
walproposer::bindings::PGAsyncReadResult_PG_ASYNC_READ_SUCCESS
}
}
}
fn conn_blocking_write(&self, sk: &mut walproposer::bindings::Safekeeper, buf: &[u8]) -> bool {
let mut conn = self.get_conn(sk);
debug!("conn_blocking_write to {}: {:?}", conn.node_id, buf);
let socket = conn.socket.as_mut().unwrap();
socket.send(desim::proto::AnyMessage::Bytes(Bytes::copy_from_slice(buf)));
true
}
fn conn_async_write(
&self,
sk: &mut walproposer::bindings::Safekeeper,
buf: &[u8],
) -> walproposer::bindings::PGAsyncWriteResult {
let mut conn = self.get_conn(sk);
debug!("conn_async_write to {}: {:?}", conn.node_id, buf);
if let Some(socket) = conn.socket.as_mut() {
socket.send(desim::proto::AnyMessage::Bytes(Bytes::copy_from_slice(buf)));
} else {
// connection is already closed
debug!("conn_async_write: writing to a closed socket!");
// TODO: maybe we should return error here?
}
walproposer::bindings::PGAsyncWriteResult_PG_ASYNC_WRITE_SUCCESS
}
fn wal_reader_allocate(&self, _: &mut walproposer::bindings::Safekeeper) -> NeonWALReadResult {
debug!("wal_reader_allocate");
walproposer::bindings::NeonWALReadResult_NEON_WALREAD_SUCCESS
}
fn wal_read(
&self,
_sk: &mut walproposer::bindings::Safekeeper,
buf: &mut [u8],
startpos: u64,
) -> NeonWALReadResult {
self.disk.lock().read(startpos, buf);
walproposer::bindings::NeonWALReadResult_NEON_WALREAD_SUCCESS
}
fn init_event_set(&self, _: &mut walproposer::bindings::WalProposer) {
debug!("init_event_set");
let new_event_set = EventSet::new(self.os.clone());
let old_event_set = self.event_set.replace(Some(new_event_set));
assert!(old_event_set.is_none());
}
fn update_event_set(&self, sk: &mut walproposer::bindings::Safekeeper, event_mask: u32) {
debug!(
"update_event_set, sk={:?}, events_mask={:#b}",
sk as *mut walproposer::bindings::Safekeeper, event_mask
);
let conn = self.get_conn(sk);
self.event_set
.borrow_mut()
.as_mut()
.unwrap()
.update_event_set(&conn, event_mask);
}
fn add_safekeeper_event_set(
&self,
sk: &mut walproposer::bindings::Safekeeper,
event_mask: u32,
) {
debug!(
"add_safekeeper_event_set, sk={:?}, events_mask={:#b}",
sk as *mut walproposer::bindings::Safekeeper, event_mask
);
self.event_set
.borrow_mut()
.as_mut()
.unwrap()
.add_safekeeper(&self.get_conn(sk), event_mask);
}
fn rm_safekeeper_event_set(&self, sk: &mut walproposer::bindings::Safekeeper) {
debug!(
"rm_safekeeper_event_set, sk={:?}",
sk as *mut walproposer::bindings::Safekeeper,
);
self.event_set
.borrow_mut()
.as_mut()
.unwrap()
.remove_safekeeper(&self.get_conn(sk));
}
fn active_state_update_event_set(&self, sk: &mut walproposer::bindings::Safekeeper) {
debug!("active_state_update_event_set");
assert!(sk.state == walproposer::bindings::SafekeeperState_SS_ACTIVE);
self.event_set
.borrow_mut()
.as_mut()
.unwrap()
.refresh_event_set();
}
fn wal_reader_events(&self, _sk: &mut walproposer::bindings::Safekeeper) -> u32 {
0
}
fn wait_event_set(
&self,
_: &mut walproposer::bindings::WalProposer,
timeout_millis: i64,
) -> walproposer::walproposer::WaitResult {
// TODO: handle multiple stages as part of the simulation (e.g. connect, start_wal_push, etc)
let mut conns = self.safekeepers.borrow_mut();
for conn in conns.iter_mut() {
if conn.socket.is_some() && conn.is_connecting {
conn.is_connecting = false;
debug!("wait_event_set, connecting to {}:{}", conn.host, conn.port);
return walproposer::walproposer::WaitResult::Network(
conn.raw_ptr,
WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE,
);
}
if conn.socket.is_some() && conn.is_start_wal_push {
conn.is_start_wal_push = false;
debug!(
"wait_event_set, start wal push to {}:{}",
conn.host, conn.port
);
return walproposer::walproposer::WaitResult::Network(
conn.raw_ptr,
WL_SOCKET_READABLE,
);
}
}
drop(conns);
let res = self
.event_set
.borrow_mut()
.as_mut()
.unwrap()
.wait(timeout_millis);
debug!(
"wait_event_set, timeout_millis={}, res={:?}",
timeout_millis, res,
);
res
}
fn strong_random(&self, buf: &mut [u8]) -> bool {
debug!("strong_random");
buf.fill(0);
true
}
fn finish_sync_safekeepers(&self, lsn: u64) -> ! {
debug!("finish_sync_safekeepers, lsn={}", lsn);
executor::exit(0, Lsn(lsn).to_string());
}
fn log_internal(&self, _wp: &mut walproposer::bindings::WalProposer, level: Level, msg: &str) {
debug!("wp_log[{}] {}", level, msg);
if level == Level::Fatal || level == Level::Panic {
if msg.contains("rejects our connection request with term") {
// collected quorum with lower term, then got rejected by next connected safekeeper
executor::exit(1, msg.to_owned());
}
if msg.contains("collected propTermStartLsn") && msg.contains(", but basebackup LSN ") {
// sync-safekeepers collected wrong quorum, walproposer collected another quorum
executor::exit(1, msg.to_owned());
}
if msg.contains("failed to download WAL for logical replicaiton") {
// Recovery connection broken and recovery was failed
executor::exit(1, msg.to_owned());
}
if msg.contains("missing majority of votes, collected") {
// Voting bug when safekeeper disconnects after voting
executor::exit(1, msg.to_owned());
}
panic!("unknown FATAL error from walproposer: {msg}");
}
}
fn after_election(&self, wp: &mut walproposer::bindings::WalProposer) {
let prop_lsn = wp.propTermStartLsn;
let prop_term = wp.propTerm;
let mut prev_lsn: u64 = 0;
let mut prev_term: u64 = 0;
unsafe {
let history = wp.propTermHistory.entries;
let len = wp.propTermHistory.n_entries as usize;
if len > 1 {
let entry = *history.wrapping_add(len - 2);
prev_lsn = entry.lsn;
prev_term = entry.term;
}
}
let msg = format!("prop_elected;{prop_lsn};{prop_term};{prev_lsn};{prev_term}");
debug!(msg);
self.os.log_event(msg);
}
fn get_redo_start_lsn(&self) -> u64 {
debug!("get_redo_start_lsn -> {:?}", self.redo_start_lsn);
self.redo_start_lsn.expect("redo_start_lsn is not set").0
}
fn get_shmem_state(&self) -> *mut walproposer::bindings::WalproposerShmemState {
self.shmem.get()
}
fn start_streaming(
&self,
startpos: u64,
callback: &walproposer::walproposer::StreamingCallback,
) {
let disk = &self.disk;
let disk_lsn = disk.lock().flush_rec_ptr().0;
debug!("start_streaming at {} (disk_lsn={})", startpos, disk_lsn);
if startpos < disk_lsn {
debug!(
"startpos < disk_lsn, it means we wrote some transaction even before streaming started"
);
}
assert!(startpos <= disk_lsn);
let mut broadcasted = Lsn(startpos);
loop {
let available = disk.lock().flush_rec_ptr();
assert!(available >= broadcasted);
callback.broadcast(broadcasted, available);
broadcasted = available;
callback.poll();
}
}
fn process_safekeeper_feedback(
&mut self,
wp: &mut walproposer::bindings::WalProposer,
_sk: &mut walproposer::bindings::Safekeeper,
) {
debug!("process_safekeeper_feedback, commit_lsn={}", wp.commitLsn);
if wp.commitLsn > self.last_logged_commit_lsn {
self.os.log_event(format!("commit_lsn;{}", wp.commitLsn));
self.last_logged_commit_lsn = wp.commitLsn;
}
}
fn get_flush_rec_ptr(&self) -> u64 {
let lsn = self.disk.lock().flush_rec_ptr();
debug!("get_flush_rec_ptr: {}", lsn);
lsn.0
}
fn recovery_download(
&self,
wp: &mut walproposer::bindings::WalProposer,
sk: &mut walproposer::bindings::Safekeeper,
) -> bool {
let mut startpos = wp.truncateLsn;
let endpos = wp.propTermStartLsn;
if startpos == endpos {
debug!("recovery_download: nothing to download");
return true;
}
debug!("recovery_download from {} to {}", startpos, endpos,);
let replication_prompt = format!(
"START_REPLICATION {} {} {} {}",
self.config.ttid.tenant_id, self.config.ttid.timeline_id, startpos, endpos,
);
let async_conn = self.get_conn(sk);
let conn = self.os.open_tcp(async_conn.node_id);
conn.send(desim::proto::AnyMessage::Bytes(replication_prompt.into()));
let chan = conn.recv_chan();
while startpos < endpos {
let event = chan.recv();
match event {
NetEvent::Closed => {
debug!("connection closed in recovery");
break;
}
NetEvent::Message(AnyMessage::Bytes(b)) => {
debug!("got recovery bytes from safekeeper");
self.disk.lock().write(startpos, &b);
startpos += b.len() as u64;
}
NetEvent::Message(_) => unreachable!(),
}
}
debug!("recovery finished at {}", startpos);
startpos == endpos
}
fn conn_finish(&self, sk: &mut walproposer::bindings::Safekeeper) {
let mut conn = self.get_conn(sk);
debug!("conn_finish to {}", conn.node_id);
if let Some(socket) = conn.socket.as_mut() {
socket.close();
} else {
// connection is already closed
}
conn.socket = None;
}
fn conn_error_message(&self, _sk: &mut walproposer::bindings::Safekeeper) -> String {
"connection is closed, probably".into()
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/mod.rs | safekeeper/tests/walproposer_sim/mod.rs | pub mod block_storage;
pub mod log;
pub mod safekeeper;
pub mod safekeeper_disk;
pub mod simulation;
pub mod simulation_logs;
pub mod walproposer_api;
pub mod walproposer_disk;
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/block_storage.rs | safekeeper/tests/walproposer_sim/block_storage.rs | use std::collections::HashMap;
const BLOCK_SIZE: usize = 8192;
/// A simple in-memory implementation of a block storage. Can be used to implement external
/// storage in tests.
pub struct BlockStorage {
blocks: HashMap<u64, [u8; BLOCK_SIZE]>,
}
impl Default for BlockStorage {
fn default() -> Self {
Self::new()
}
}
impl BlockStorage {
pub fn new() -> Self {
BlockStorage {
blocks: HashMap::new(),
}
}
pub fn read(&self, pos: u64, buf: &mut [u8]) {
let mut buf_offset = 0;
let mut storage_pos = pos;
while buf_offset < buf.len() {
let block_id = storage_pos / BLOCK_SIZE as u64;
let block = self.blocks.get(&block_id).unwrap_or(&[0; BLOCK_SIZE]);
let block_offset = storage_pos % BLOCK_SIZE as u64;
let block_len = BLOCK_SIZE as u64 - block_offset;
let buf_len = buf.len() - buf_offset;
let copy_len = std::cmp::min(block_len as usize, buf_len);
buf[buf_offset..buf_offset + copy_len]
.copy_from_slice(&block[block_offset as usize..block_offset as usize + copy_len]);
buf_offset += copy_len;
storage_pos += copy_len as u64;
}
}
pub fn write(&mut self, pos: u64, buf: &[u8]) {
let mut buf_offset = 0;
let mut storage_pos = pos;
while buf_offset < buf.len() {
let block_id = storage_pos / BLOCK_SIZE as u64;
let block = self.blocks.entry(block_id).or_insert([0; BLOCK_SIZE]);
let block_offset = storage_pos % BLOCK_SIZE as u64;
let block_len = BLOCK_SIZE as u64 - block_offset;
let buf_len = buf.len() - buf_offset;
let copy_len = std::cmp::min(block_len as usize, buf_len);
block[block_offset as usize..block_offset as usize + copy_len]
.copy_from_slice(&buf[buf_offset..buf_offset + copy_len]);
buf_offset += copy_len;
storage_pos += copy_len as u64
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/tests/walproposer_sim/simulation_logs.rs | safekeeper/tests/walproposer_sim/simulation_logs.rs | use desim::proto::SimEvent;
use tracing::debug;
#[derive(Debug, Clone, PartialEq, Eq)]
enum NodeKind {
Unknown,
Safekeeper,
WalProposer,
}
impl Default for NodeKind {
fn default() -> Self {
Self::Unknown
}
}
/// Simulation state of walproposer/safekeeper, derived from the simulation logs.
#[derive(Clone, Debug, Default)]
struct NodeInfo {
kind: NodeKind,
// walproposer
is_sync: bool,
term: u64,
epoch_lsn: u64,
// safekeeper
commit_lsn: u64,
flush_lsn: u64,
}
impl NodeInfo {
fn init_kind(&mut self, kind: NodeKind) {
if self.kind == NodeKind::Unknown {
self.kind = kind;
} else {
assert!(self.kind == kind);
}
}
fn started(&mut self, data: &str) {
let mut parts = data.split(';');
assert!(parts.next().unwrap() == "started");
match parts.next().unwrap() {
"safekeeper" => {
self.init_kind(NodeKind::Safekeeper);
}
"walproposer" => {
self.init_kind(NodeKind::WalProposer);
let is_sync: u8 = parts.next().unwrap().parse().unwrap();
self.is_sync = is_sync != 0;
}
_ => unreachable!(),
}
}
}
/// Global state of the simulation, derived from the simulation logs.
#[derive(Debug, Default)]
struct GlobalState {
nodes: Vec<NodeInfo>,
commit_lsn: u64,
write_lsn: u64,
max_write_lsn: u64,
written_wal: u64,
written_records: u64,
}
impl GlobalState {
fn new() -> Self {
Default::default()
}
fn get(&mut self, id: u32) -> &mut NodeInfo {
let id = id as usize;
if id >= self.nodes.len() {
self.nodes.resize(id + 1, NodeInfo::default());
}
&mut self.nodes[id]
}
}
/// Try to find inconsistencies in the simulation log.
pub fn validate_events(events: Vec<SimEvent>) {
const INITDB_LSN: u64 = 21623024;
let hook = std::panic::take_hook();
scopeguard::defer_on_success! {
std::panic::set_hook(hook);
};
let mut state = GlobalState::new();
state.max_write_lsn = INITDB_LSN;
for event in events {
debug!("{:?}", event);
let node = state.get(event.node);
if event.data.starts_with("started;") {
node.started(&event.data);
continue;
}
assert!(node.kind != NodeKind::Unknown);
// drop reference to unlock state
let mut node = node.clone();
let mut parts = event.data.split(';');
match node.kind {
NodeKind::Safekeeper => match parts.next().unwrap() {
"tli_loaded" => {
let flush_lsn: u64 = parts.next().unwrap().parse().unwrap();
let commit_lsn: u64 = parts.next().unwrap().parse().unwrap();
node.flush_lsn = flush_lsn;
node.commit_lsn = commit_lsn;
}
_ => unreachable!(),
},
NodeKind::WalProposer => {
match parts.next().unwrap() {
"prop_elected" => {
let prop_lsn: u64 = parts.next().unwrap().parse().unwrap();
let prop_term: u64 = parts.next().unwrap().parse().unwrap();
let prev_lsn: u64 = parts.next().unwrap().parse().unwrap();
let prev_term: u64 = parts.next().unwrap().parse().unwrap();
assert!(prop_lsn >= prev_lsn);
assert!(prop_term >= prev_term);
assert!(prop_lsn >= state.commit_lsn);
if prop_lsn > state.write_lsn {
assert!(prop_lsn <= state.max_write_lsn);
debug!(
"moving write_lsn up from {} to {}",
state.write_lsn, prop_lsn
);
state.write_lsn = prop_lsn;
}
if prop_lsn < state.write_lsn {
debug!(
"moving write_lsn down from {} to {}",
state.write_lsn, prop_lsn
);
state.write_lsn = prop_lsn;
}
node.epoch_lsn = prop_lsn;
node.term = prop_term;
}
"write_wal" => {
assert!(!node.is_sync);
let start_lsn: u64 = parts.next().unwrap().parse().unwrap();
let end_lsn: u64 = parts.next().unwrap().parse().unwrap();
let cnt: u64 = parts.next().unwrap().parse().unwrap();
let size = end_lsn - start_lsn;
state.written_wal += size;
state.written_records += cnt;
// TODO: If we allow writing WAL before winning the election
assert!(start_lsn >= state.commit_lsn);
assert!(end_lsn >= start_lsn);
// assert!(start_lsn == state.write_lsn);
state.write_lsn = end_lsn;
if end_lsn > state.max_write_lsn {
state.max_write_lsn = end_lsn;
}
}
"commit_lsn" => {
let lsn: u64 = parts.next().unwrap().parse().unwrap();
assert!(lsn >= state.commit_lsn);
state.commit_lsn = lsn;
}
_ => unreachable!(),
}
}
_ => unreachable!(),
}
// update the node in the state struct
*state.get(event.node) = node;
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/benches/receive_wal.rs | safekeeper/benches/receive_wal.rs | //! WAL ingestion benchmarks.
use std::io::Write as _;
use bytes::BytesMut;
use camino_tempfile::tempfile;
use criterion::{BatchSize, Bencher, Criterion, criterion_group, criterion_main};
use itertools::Itertools as _;
use postgres_ffi::v17::wal_generator::{LogicalMessageGenerator, WalGenerator};
use pprof::criterion::{Output, PProfProfiler};
use safekeeper::receive_wal::{self, WalAcceptor};
use safekeeper::safekeeper::{
AcceptorProposerMessage, AppendRequest, AppendRequestHeader, ProposerAcceptorMessage,
};
use safekeeper::test_utils::Env;
use safekeeper_api::membership::SafekeeperGeneration as Generation;
use tokio::io::AsyncWriteExt as _;
use utils::id::{NodeId, TenantTimelineId};
use utils::lsn::Lsn;
const KB: usize = 1024;
const MB: usize = 1024 * KB;
const GB: usize = 1024 * MB;
/// Use jemalloc and enable profiling, to mirror bin/safekeeper.rs.
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[allow(non_upper_case_globals)]
#[unsafe(export_name = "malloc_conf")]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:21\0";
// Register benchmarks with Criterion.
criterion_group!(
name = benches;
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_process_msg,
bench_wal_acceptor,
bench_wal_acceptor_throughput,
bench_file_write,
bench_bytes_reserve,
);
criterion_main!(benches);
/// Benchmarks SafeKeeper::process_msg() as time per message and throughput. Each message is an
/// AppendRequest with a single WAL record containing an XlLogicalMessage of varying size. When
/// measuring throughput, only the logical message payload is considered, excluding
/// segment/page/record headers.
fn bench_process_msg(c: &mut Criterion) {
let mut g = c.benchmark_group("process_msg");
for fsync in [false, true] {
for commit in [false, true] {
for size in [8, KB, 8 * KB, 128 * KB, MB] {
// Kind of weird to change the group throughput per benchmark, but it's the only way
// to vary it per benchmark. It works.
g.throughput(criterion::Throughput::Bytes(size as u64));
g.bench_function(format!("fsync={fsync}/commit={commit}/size={size}"), |b| {
run_bench(b, size, fsync, commit).unwrap()
});
}
}
}
// The actual benchmark. If commit is true, advance the commit LSN on every message.
fn run_bench(b: &mut Bencher, size: usize, fsync: bool, commit: bool) -> anyhow::Result<()> {
let runtime = tokio::runtime::Builder::new_current_thread() // single is fine, sync IO only
.enable_all()
.build()?;
// Construct the payload. The prefix counts towards the payload (including NUL terminator).
let prefix = c"p";
let prefixlen = prefix.to_bytes_with_nul().len();
assert!(size >= prefixlen);
let message = vec![0; size - prefixlen];
let walgen = &mut WalGenerator::new(LogicalMessageGenerator::new(prefix, &message), Lsn(0));
// Set up the Safekeeper.
let env = Env::new(fsync)?;
let mut safekeeper = runtime.block_on(env.make_safekeeper(
NodeId(1),
TenantTimelineId::generate(),
Lsn(0),
))?;
b.iter_batched_ref(
// Pre-construct WAL records and requests. Criterion will batch them.
|| {
let (lsn, record) = walgen.next().expect("endless WAL");
ProposerAcceptorMessage::AppendRequest(AppendRequest {
h: AppendRequestHeader {
generation: Generation::new(0),
term: 1,
begin_lsn: lsn,
end_lsn: lsn + record.len() as u64,
commit_lsn: if commit { lsn } else { Lsn(0) }, // commit previous record
truncate_lsn: Lsn(0),
},
wal_data: record,
})
},
// Benchmark message processing (time per message).
|msg| {
runtime
.block_on(safekeeper.process_msg(msg))
.expect("message failed")
},
BatchSize::SmallInput, // automatically determine a batch size
);
Ok(())
}
}
/// Benchmarks WalAcceptor message processing time by sending it a batch of WAL records and waiting
/// for it to confirm that the last LSN has been flushed to storage. We pipeline a bunch of messages
/// instead of measuring each individual message to amortize costs (e.g. fsync), which is more
/// realistic. Records are XlLogicalMessage with a tiny payload (~64 bytes per record including
/// headers). Records are pre-constructed to avoid skewing the benchmark.
///
/// TODO: add benchmarks with in-memory storage, see comment on `Env::make_safekeeper()`:
fn bench_wal_acceptor(c: &mut Criterion) {
let mut g = c.benchmark_group("wal_acceptor");
for fsync in [false, true] {
for n in [1, 100, 10000] {
g.bench_function(format!("fsync={fsync}/n={n}"), |b| {
run_bench(b, n, fsync).unwrap()
});
}
}
/// The actual benchmark. n is the number of WAL records to send in a pipelined batch.
fn run_bench(b: &mut Bencher, n: usize, fsync: bool) -> anyhow::Result<()> {
let runtime = tokio::runtime::Runtime::new()?; // needs multithreaded
let env = Env::new(fsync)?;
let walgen =
&mut WalGenerator::new(LogicalMessageGenerator::new(c"prefix", b"message"), Lsn(0));
// Create buffered channels that can fit all requests, to avoid blocking on channels.
let (msg_tx, msg_rx) = tokio::sync::mpsc::channel(n);
let (reply_tx, mut reply_rx) = tokio::sync::mpsc::channel(n);
// Spawn the WalAcceptor task.
runtime.block_on(async {
// TODO: WalAcceptor doesn't actually need a full timeline, only
// Safekeeper::process_msg(). Consider decoupling them to simplify the setup.
let tli = env
.make_timeline(NodeId(1), TenantTimelineId::generate(), Lsn(0))
.await?
.wal_residence_guard()
.await?;
WalAcceptor::spawn(tli, msg_rx, reply_tx, Some(0));
anyhow::Ok(())
})?;
b.iter_batched(
// Pre-construct a batch of WAL records and requests.
|| {
walgen
.take(n)
.map(|(lsn, record)| AppendRequest {
h: AppendRequestHeader {
generation: Generation::new(0),
term: 1,
begin_lsn: lsn,
end_lsn: lsn + record.len() as u64,
commit_lsn: Lsn(0),
truncate_lsn: Lsn(0),
},
wal_data: record,
})
.collect_vec()
},
// Benchmark batch ingestion (time per batch).
|reqs| {
runtime.block_on(async {
let final_lsn = reqs.last().unwrap().h.end_lsn;
// Stuff all the messages into the buffered channel to pipeline them.
for req in reqs {
let msg = ProposerAcceptorMessage::AppendRequest(req);
msg_tx.send(msg).await.expect("send failed");
}
// Wait for the last message to get flushed.
while let Some(reply) = reply_rx.recv().await {
if let AcceptorProposerMessage::AppendResponse(resp) = reply {
if resp.flush_lsn >= final_lsn {
return;
}
}
}
panic!("disconnected")
})
},
BatchSize::PerIteration, // only run one request batch at a time
);
Ok(())
}
}
/// Benchmarks WalAcceptor throughput by sending 1 GB of data with varying message sizes and waiting
/// for the last LSN to be flushed to storage. Only the actual message payload counts towards
/// throughput, headers are excluded and considered overhead. Records are XlLogicalMessage.
///
/// To avoid running out of memory, messages are constructed during the benchmark.
fn bench_wal_acceptor_throughput(c: &mut Criterion) {
const VOLUME: usize = GB; // NB: excludes message/page/segment headers and padding
let mut g = c.benchmark_group("wal_acceptor_throughput");
g.sample_size(10);
g.throughput(criterion::Throughput::Bytes(VOLUME as u64));
for fsync in [false, true] {
for commit in [false, true] {
for size in [KB, 8 * KB, 128 * KB, MB] {
assert_eq!(VOLUME % size, 0, "volume must be divisible by size");
let count = VOLUME / size;
g.bench_function(format!("fsync={fsync}/commit={commit}/size={size}"), |b| {
run_bench(b, count, size, fsync, commit).unwrap()
});
}
}
}
/// The actual benchmark. size is the payload size per message, count is the number of messages.
/// If commit is true, advance the commit LSN on each message.
fn run_bench(
b: &mut Bencher,
count: usize,
size: usize,
fsync: bool,
commit: bool,
) -> anyhow::Result<()> {
let runtime = tokio::runtime::Runtime::new()?; // needs multithreaded
// Construct the payload. The prefix counts towards the payload (including NUL terminator).
let prefix = c"p";
let prefixlen = prefix.to_bytes_with_nul().len();
assert!(size >= prefixlen);
let message = vec![0; size - prefixlen];
let walgen = &mut WalGenerator::new(LogicalMessageGenerator::new(prefix, &message), Lsn(0));
// Construct and spawn the WalAcceptor task.
let env = Env::new(fsync)?;
let (msg_tx, msg_rx) = tokio::sync::mpsc::channel(receive_wal::MSG_QUEUE_SIZE);
let (reply_tx, mut reply_rx) = tokio::sync::mpsc::channel(receive_wal::REPLY_QUEUE_SIZE);
runtime.block_on(async {
let tli = env
.make_timeline(NodeId(1), TenantTimelineId::generate(), Lsn(0))
.await?
.wal_residence_guard()
.await?;
WalAcceptor::spawn(tli, msg_rx, reply_tx, Some(0));
anyhow::Ok(())
})?;
// Ingest the WAL.
b.iter(|| {
runtime.block_on(async {
let reqgen = walgen.take(count).map(|(lsn, record)| AppendRequest {
h: AppendRequestHeader {
generation: Generation::new(0),
term: 1,
begin_lsn: lsn,
end_lsn: lsn + record.len() as u64,
commit_lsn: if commit { lsn } else { Lsn(0) }, // commit previous record
truncate_lsn: Lsn(0),
},
wal_data: record,
});
// Send requests.
for req in reqgen {
_ = reply_rx.try_recv(); // discard any replies, to avoid blocking
let msg = ProposerAcceptorMessage::AppendRequest(req);
msg_tx.send(msg).await.expect("send failed");
}
// Wait for last message to get flushed.
while let Some(reply) = reply_rx.recv().await {
if let AcceptorProposerMessage::AppendResponse(resp) = reply {
if resp.flush_lsn >= walgen.lsn {
return;
}
}
}
panic!("disconnected")
})
});
Ok(())
}
}
/// Benchmarks OS write throughput by appending blocks of a given size to a file. This is intended
/// to compare Tokio and stdlib writes, and give a baseline for optimal WAL throughput.
fn bench_file_write(c: &mut Criterion) {
let mut g = c.benchmark_group("file_write");
for kind in ["stdlib", "tokio"] {
for fsync in [false, true] {
for size in [8, KB, 8 * KB, 128 * KB, MB] {
// Kind of weird to change the group throughput per benchmark, but it's the only way to
// vary it per benchmark. It works.
g.throughput(criterion::Throughput::Bytes(size as u64));
g.bench_function(
format!("{kind}/fsync={fsync}/size={size}"),
|b| match kind {
"stdlib" => run_bench_stdlib(b, size, fsync).unwrap(),
"tokio" => run_bench_tokio(b, size, fsync).unwrap(),
name => panic!("unknown kind {name}"),
},
);
}
}
}
fn run_bench_stdlib(b: &mut Bencher, size: usize, fsync: bool) -> anyhow::Result<()> {
let mut file = tempfile()?;
let buf = vec![0u8; size];
b.iter(|| {
file.write_all(&buf).unwrap();
file.flush().unwrap();
if fsync {
file.sync_data().unwrap();
}
});
Ok(())
}
fn run_bench_tokio(b: &mut Bencher, size: usize, fsync: bool) -> anyhow::Result<()> {
let runtime = tokio::runtime::Runtime::new()?; // needs multithreaded
let mut file = tokio::fs::File::from_std(tempfile()?);
let buf = vec![0u8; size];
b.iter(|| {
runtime.block_on(async {
file.write_all(&buf).await.unwrap();
file.flush().await.unwrap();
if fsync {
file.sync_data().await.unwrap();
}
})
});
Ok(())
}
}
/// Benchmarks the cost of memory allocations when receiving WAL messages. This emulates the logic
/// in FeMessage::parse, which extends the read buffer. It is primarily intended to test jemalloc.
fn bench_bytes_reserve(c: &mut Criterion) {
let mut g = c.benchmark_group("bytes_reserve");
for size in [1, 64, KB, 8 * KB, 128 * KB] {
g.throughput(criterion::Throughput::Bytes(size as u64));
g.bench_function(format!("size={size}"), |b| run_bench(b, size).unwrap());
}
fn run_bench(b: &mut Bencher, size: usize) -> anyhow::Result<()> {
let mut bytes = BytesMut::new();
let data = vec![0; size];
b.iter(|| {
bytes.reserve(size);
bytes.extend_from_slice(&data);
bytes.split_to(size).freeze();
});
Ok(())
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/client/src/mgmt_api.rs | safekeeper/client/src/mgmt_api.rs | //! Safekeeper http client.
//!
//! Partially copied from pageserver client; some parts might be better to be
//! united.
use std::error::Error as _;
use http_utils::error::HttpErrorBody;
use reqwest::{IntoUrl, Method, Response, StatusCode};
use safekeeper_api::models::{
self, PullTimelineRequest, PullTimelineResponse, SafekeeperStatus, SafekeeperUtilization,
TimelineCreateRequest,
};
use utils::id::{NodeId, TenantId, TimelineId};
use utils::logging::SecretString;
#[derive(Debug, Clone)]
pub struct Client {
mgmt_api_endpoint: String,
authorization_header: Option<SecretString>,
client: reqwest::Client,
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// Failed to receive body (reqwest error).
#[error("receive body: {0}{}", .0.source().map(|e| format!(": {e}")).unwrap_or_default())]
ReceiveBody(reqwest::Error),
/// Status is not ok, but failed to parse body as `HttpErrorBody`.
#[error("receive error body: {0}")]
ReceiveErrorBody(String),
/// Status is not ok; parsed error in body as `HttpErrorBody`.
#[error("safekeeper API: {1}")]
ApiError(StatusCode, String),
#[error("Cancelled")]
Cancelled,
#[error("request timed out: {0}")]
Timeout(String),
}
pub type Result<T> = std::result::Result<T, Error>;
pub trait ResponseErrorMessageExt: Sized {
fn error_from_body(self) -> impl std::future::Future<Output = Result<Self>> + Send;
}
/// If status is not ok, try to extract error message from the body.
impl ResponseErrorMessageExt for reqwest::Response {
async fn error_from_body(self) -> Result<Self> {
let status = self.status();
if status.is_success() {
return Ok(self);
}
let url = self.url().to_owned();
Err(match self.json::<HttpErrorBody>().await {
Ok(HttpErrorBody { msg }) => Error::ApiError(status, msg),
Err(_) => {
Error::ReceiveErrorBody(format!("http error ({}) at {}.", status.as_u16(), url))
}
})
}
}
impl Client {
pub fn new(
client: reqwest::Client,
mgmt_api_endpoint: String,
jwt: Option<SecretString>,
) -> Self {
Self {
mgmt_api_endpoint,
authorization_header: jwt
.map(|jwt| SecretString::from(format!("Bearer {}", jwt.get_contents()))),
client,
}
}
pub async fn create_timeline(&self, req: &TimelineCreateRequest) -> Result<reqwest::Response> {
let uri = format!("{}/v1/tenant/timeline", self.mgmt_api_endpoint);
let resp = self.post(&uri, req).await?;
Ok(resp)
}
pub async fn pull_timeline(&self, req: &PullTimelineRequest) -> Result<PullTimelineResponse> {
let uri = format!("{}/v1/pull_timeline", self.mgmt_api_endpoint);
let resp = self.post(&uri, req).await?;
resp.json().await.map_err(Error::ReceiveBody)
}
pub async fn exclude_timeline(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
req: &models::TimelineMembershipSwitchRequest,
) -> Result<models::TimelineDeleteResult> {
let uri = format!(
"{}/v1/tenant/{}/timeline/{}/exclude",
self.mgmt_api_endpoint, tenant_id, timeline_id
);
let resp = self.put(&uri, req).await?;
resp.json().await.map_err(Error::ReceiveBody)
}
pub async fn delete_timeline(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
) -> Result<models::TimelineDeleteResult> {
let uri = format!(
"{}/v1/tenant/{}/timeline/{}",
self.mgmt_api_endpoint, tenant_id, timeline_id
);
let resp = self
.request_maybe_body(Method::DELETE, &uri, None::<()>)
.await?;
resp.json().await.map_err(Error::ReceiveBody)
}
pub async fn switch_timeline_membership(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
req: &models::TimelineMembershipSwitchRequest,
) -> Result<models::TimelineMembershipSwitchResponse> {
let uri = format!(
"{}/v1/tenant/{}/timeline/{}/membership",
self.mgmt_api_endpoint, tenant_id, timeline_id
);
let resp = self.put(&uri, req).await?;
resp.json().await.map_err(Error::ReceiveBody)
}
pub async fn delete_tenant(&self, tenant_id: TenantId) -> Result<models::TenantDeleteResult> {
let uri = format!("{}/v1/tenant/{}", self.mgmt_api_endpoint, tenant_id);
let resp = self
.request_maybe_body(Method::DELETE, &uri, None::<()>)
.await?;
resp.json().await.map_err(Error::ReceiveBody)
}
pub async fn bump_timeline_term(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
req: &models::TimelineTermBumpRequest,
) -> Result<models::TimelineTermBumpResponse> {
let uri = format!(
"{}/v1/tenant/{}/timeline/{}/term_bump",
self.mgmt_api_endpoint, tenant_id, timeline_id
);
let resp = self.post(&uri, req).await?;
resp.json().await.map_err(Error::ReceiveBody)
}
pub async fn timeline_status(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
) -> Result<Response> {
let uri = format!(
"{}/v1/tenant/{}/timeline/{}",
self.mgmt_api_endpoint, tenant_id, timeline_id
);
self.get(&uri).await
}
pub async fn snapshot(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
stream_to: NodeId,
) -> Result<reqwest::Response> {
let uri = format!(
"{}/v1/tenant/{}/timeline/{}/snapshot/{}",
self.mgmt_api_endpoint, tenant_id, timeline_id, stream_to.0
);
self.get(&uri).await
}
pub async fn status(&self) -> Result<SafekeeperStatus> {
let uri = format!("{}/v1/status", self.mgmt_api_endpoint);
let resp = self.get(&uri).await?;
resp.json().await.map_err(Error::ReceiveBody)
}
pub async fn utilization(&self) -> Result<SafekeeperUtilization> {
let uri = format!("{}/v1/utilization", self.mgmt_api_endpoint);
let resp = self.get(&uri).await?;
resp.json().await.map_err(Error::ReceiveBody)
}
async fn post<B: serde::Serialize, U: IntoUrl>(
&self,
uri: U,
body: B,
) -> Result<reqwest::Response> {
self.request(Method::POST, uri, body).await
}
async fn put<B: serde::Serialize, U: IntoUrl>(
&self,
uri: U,
body: B,
) -> Result<reqwest::Response> {
self.request(Method::PUT, uri, body).await
}
async fn get<U: IntoUrl>(&self, uri: U) -> Result<reqwest::Response> {
self.request(Method::GET, uri, ()).await
}
/// Send the request and check that the status code is good.
async fn request<B: serde::Serialize, U: reqwest::IntoUrl>(
&self,
method: Method,
uri: U,
body: B,
) -> Result<reqwest::Response> {
self.request_maybe_body(method, uri, Some(body)).await
}
/// Send the request and check that the status code is good, with an optional body.
async fn request_maybe_body<B: serde::Serialize, U: reqwest::IntoUrl>(
&self,
method: Method,
uri: U,
body: Option<B>,
) -> Result<reqwest::Response> {
let res = self.request_noerror(method, uri, body).await?;
let response = res.error_from_body().await?;
Ok(response)
}
/// Just send the request.
async fn request_noerror<B: serde::Serialize, U: reqwest::IntoUrl>(
&self,
method: Method,
uri: U,
body: Option<B>,
) -> Result<reqwest::Response> {
let mut req = self.client.request(method, uri);
if let Some(value) = &self.authorization_header {
req = req.header(reqwest::header::AUTHORIZATION, value.get_contents())
}
if let Some(body) = body {
req = req.json(&body);
}
req.send().await.map_err(Error::ReceiveBody)
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/safekeeper/client/src/lib.rs | safekeeper/client/src/lib.rs | pub mod mgmt_api;
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/test_runner/pg_clients/rust/tokio-postgres/src/main.rs | test_runner/pg_clients/rust/tokio-postgres/src/main.rs | use std::env::VarError;
use tokio_postgres;
fn get_env(key: &str) -> String {
match std::env::var(key) {
Ok(val) => val,
Err(VarError::NotPresent) => panic!("{key} env variable not set"),
Err(VarError::NotUnicode(_)) => panic!("{key} is not valid unicode"),
}
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), tokio_postgres::Error> {
let host = get_env("NEON_HOST");
let database = get_env("NEON_DATABASE");
let user = get_env("NEON_USER");
let password = get_env("NEON_PASSWORD");
let url = format!("postgresql://{user}:{password}@{host}/{database}");
// Use the native TLS implementation (Neon requires TLS)
let tls_connector =
postgres_native_tls::MakeTlsConnector::new(native_tls::TlsConnector::new().unwrap());
// Connect to the database.
let (client, connection) = tokio_postgres::connect(&url, tls_connector).await?;
// The connection object performs the actual communication with the database,
// so spawn it off to run on its own.
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let result = client.query("SELECT 1", &[]).await?;
let value: i32 = result[0].get(0);
assert_eq!(value, 1);
println!("{value}");
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/workspace_hack/build.rs | workspace_hack/build.rs | // A build script is required for cargo to consider build dependencies.
fn main() {}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/workspace_hack/src/lib.rs | workspace_hack/src/lib.rs | // This is a stub lib.rs.
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/storage_broker/build.rs | storage_broker/build.rs | fn main() -> Result<(), Box<dyn std::error::Error>> {
// Generate rust code from .proto protobuf.
//
// Note: we previously tried to use deterministic location at proto/ for
// easy location, but apparently interference with cachepot sometimes fails
// the build then. Anyway, per cargo docs build script shouldn't output to
// anywhere but $OUT_DIR.
tonic_build::compile_protos("proto/broker.proto")
.unwrap_or_else(|e| panic!("failed to compile protos {e:?}"));
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/storage_broker/src/lib.rs | storage_broker/src/lib.rs | use std::time::Duration;
use proto::TenantTimelineId as ProtoTenantTimelineId;
use proto::broker_service_client::BrokerServiceClient;
use tonic::Status;
use tonic::codegen::StdError;
use tonic::transport::{Channel, Endpoint};
use utils::id::{TenantId, TenantTimelineId, TimelineId};
// Code generated by protobuf.
pub mod proto {
// Tonic does derives as `#[derive(Clone, PartialEq, ::prost::Message)]`
// we don't use these types for anything but broker data transmission,
// so it's ok to ignore this one.
#![allow(clippy::derive_partial_eq_without_eq)]
tonic::include_proto!("storage_broker");
}
pub mod metrics;
// Re-exports to avoid direct tonic dependency in user crates.
pub use hyper::Uri;
pub use tonic::transport::{Certificate, ClientTlsConfig};
pub use tonic::{Code, Request, Streaming};
pub const DEFAULT_LISTEN_ADDR: &str = "127.0.0.1:50051";
pub const DEFAULT_ENDPOINT: &str = const_format::formatcp!("http://{DEFAULT_LISTEN_ADDR}");
pub const DEFAULT_KEEPALIVE_INTERVAL: &str = "5000 ms";
pub const DEFAULT_CONNECT_TIMEOUT: Duration = Duration::from_millis(5000);
// BrokerServiceClient charged with tonic provided Channel transport; helps to
// avoid depending on tonic directly in user crates.
pub type BrokerClientChannel = BrokerServiceClient<Channel>;
// Create connection object configured to run TLS if schema starts with https://
// and plain text otherwise. Connection is lazy, only endpoint sanity is
// validated here.
//
// NB: this function is not async, but still must be run on a tokio runtime thread
// because that's a requirement of tonic_endpoint.connect_lazy()'s Channel::new call.
pub fn connect<U>(
endpoint: U,
keepalive_interval: Duration,
tls_config: ClientTlsConfig,
) -> anyhow::Result<BrokerClientChannel>
where
U: std::convert::TryInto<Uri>,
U::Error: std::error::Error + Send + Sync + 'static,
{
let uri: Uri = endpoint.try_into()?;
let mut tonic_endpoint: Endpoint = uri.into();
// If schema starts with https, start encrypted connection; do plain text
// otherwise.
if let Some("https") = tonic_endpoint.uri().scheme_str() {
// if there's no default provider and both ring+aws-lc-rs are enabled
// this the tls settings on tonic will not work.
// erroring is ok.
rustls::crypto::ring::default_provider()
.install_default()
.ok();
tonic_endpoint = tonic_endpoint.tls_config(tls_config)?;
}
tonic_endpoint = tonic_endpoint
.http2_keep_alive_interval(keepalive_interval)
.keep_alive_while_idle(true)
.connect_timeout(DEFAULT_CONNECT_TIMEOUT);
// keep_alive_timeout is 20s by default on both client and server side
let channel = tonic_endpoint.connect_lazy();
Ok(BrokerClientChannel::new(channel))
}
impl BrokerClientChannel {
/// Create a new client to the given endpoint, but don't actually connect until the first request.
pub async fn connect_lazy<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect_lazy();
Ok(Self::new(conn))
}
}
// parse variable length bytes from protobuf
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result<TenantTimelineId, Status> {
let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id)
.map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {e}")))?;
let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id)
.map_err(|e| Status::new(Code::InvalidArgument, format!("malformed timeline_id: {e}")))?;
Ok(TenantTimelineId {
tenant_id,
timeline_id,
})
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/storage_broker/src/metrics.rs | storage_broker/src/metrics.rs | //! Broker metrics.
use metrics::{IntCounter, IntGauge, register_int_counter, register_int_gauge};
use once_cell::sync::Lazy;
pub static NUM_PUBS: Lazy<IntGauge> = Lazy::new(|| {
register_int_gauge!("storage_broker_active_publishers", "Number of publications")
.expect("Failed to register metric")
});
pub static NUM_SUBS_TIMELINE: Lazy<IntGauge> = Lazy::new(|| {
register_int_gauge!(
"storage_broker_per_timeline_active_subscribers",
"Number of subsciptions to particular tenant timeline id"
)
.expect("Failed to register metric")
});
pub static NUM_SUBS_ALL: Lazy<IntGauge> = Lazy::new(|| {
register_int_gauge!(
"storage_broker_all_keys_active_subscribers",
"Number of subsciptions to all keys"
)
.expect("Failed to register metric")
});
pub static PROCESSED_MESSAGES_TOTAL: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"storage_broker_processed_messages_total",
"Number of messages received by storage broker, before routing and broadcasting"
)
.expect("Failed to register metric")
});
pub static BROADCASTED_MESSAGES_TOTAL: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"storage_broker_broadcasted_messages_total",
"Number of messages broadcasted (sent over network) to subscribers"
)
.expect("Failed to register metric")
});
pub static BROADCAST_DROPPED_MESSAGES_TOTAL: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"storage_broker_broadcast_dropped_messages_total",
"Number of messages dropped due to channel capacity overflow"
)
.expect("Failed to register metric")
});
pub static PUBLISHED_ONEOFF_MESSAGES_TOTAL: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"storage_broker_published_oneoff_messages_total",
"Number of one-off messages sent via PublishOne method"
)
.expect("Failed to register metric")
});
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/storage_broker/src/bin/storage_broker.rs | storage_broker/src/bin/storage_broker.rs | //! Simple pub-sub based on grpc (tonic) and Tokio broadcast channel for storage
//! nodes messaging.
//!
//! Subscriptions to 1) single timeline 2) all timelines are possible. We could
//! add subscription to the set of timelines to save grpc streams, but testing
//! shows many individual streams is also ok.
//!
//! Message is dropped if subscriber can't consume it, not affecting other
//! subscribers.
//!
//! Only safekeeper message is supported, but it is not hard to add something
//! else with generics.
use std::collections::HashMap;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use bytes::Bytes;
use camino::Utf8PathBuf;
use clap::{Parser, command};
use futures::future::OptionFuture;
use futures_core::Stream;
use futures_util::StreamExt;
use http_body_util::combinators::BoxBody;
use http_body_util::{Empty, Full};
use http_utils::tls_certs::ReloadingCertificateResolver;
use hyper::body::Incoming;
use hyper::header::CONTENT_TYPE;
use hyper::service::service_fn;
use hyper::{Method, StatusCode};
use hyper_util::rt::{TokioExecutor, TokioIo, TokioTimer};
use metrics::{Encoder, TextEncoder};
use parking_lot::RwLock;
use storage_broker::metrics::{
BROADCAST_DROPPED_MESSAGES_TOTAL, BROADCASTED_MESSAGES_TOTAL, NUM_PUBS, NUM_SUBS_ALL,
NUM_SUBS_TIMELINE, PROCESSED_MESSAGES_TOTAL, PUBLISHED_ONEOFF_MESSAGES_TOTAL,
};
use storage_broker::proto::broker_service_server::{BrokerService, BrokerServiceServer};
use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey as ProtoSubscriptionKey;
use storage_broker::proto::{
FilterTenantTimelineId, MessageType, SafekeeperDiscoveryRequest, SafekeeperDiscoveryResponse,
SafekeeperTimelineInfo, SubscribeByFilterRequest, SubscribeSafekeeperInfoRequest, TypedMessage,
};
use storage_broker::{DEFAULT_KEEPALIVE_INTERVAL, parse_proto_ttid};
use tokio::net::TcpListener;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use tokio::time;
use tonic::codegen::Service;
use tonic::{Code, Request, Response, Status};
use tracing::*;
use utils::id::TenantTimelineId;
use utils::logging::{self, LogFormat};
use utils::sentry_init::init_sentry;
use utils::signals::ShutdownSignals;
use utils::{project_build_tag, project_git_version};
project_git_version!(GIT_VERSION);
project_build_tag!(BUILD_TAG);
const DEFAULT_CHAN_SIZE: usize = 32;
const DEFAULT_ALL_KEYS_CHAN_SIZE: usize = 16384;
const DEFAULT_SSL_KEY_FILE: &str = "server.key";
const DEFAULT_SSL_CERT_FILE: &str = "server.crt";
const DEFAULT_SSL_CERT_RELOAD_PERIOD: &str = "60s";
#[derive(Parser, Debug)]
#[command(version = GIT_VERSION, about = "Broker for neon storage nodes communication", long_about = None)]
#[clap(group(
clap::ArgGroup::new("listen-addresses")
.required(true)
.multiple(true)
.args(&["listen_addr", "listen_https_addr"]),
))]
struct Args {
/// Endpoint to listen HTTP on.
#[arg(short, long)]
listen_addr: Option<SocketAddr>,
/// Endpoint to listen HTTPS on.
#[arg(long)]
listen_https_addr: Option<SocketAddr>,
/// Size of the queue to the per timeline subscriber.
#[arg(long, default_value_t = DEFAULT_CHAN_SIZE)]
timeline_chan_size: usize,
/// Size of the queue to the all keys subscriber.
#[arg(long, default_value_t = DEFAULT_ALL_KEYS_CHAN_SIZE)]
all_keys_chan_size: usize,
/// HTTP/2 keepalive interval.
#[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_KEEPALIVE_INTERVAL)]
http2_keepalive_interval: Duration,
/// Format for logging, either 'plain' or 'json'.
#[arg(long, default_value = "plain")]
log_format: String,
/// Path to a file with certificate's private key for https API.
#[arg(long, default_value = DEFAULT_SSL_KEY_FILE)]
ssl_key_file: Utf8PathBuf,
/// Path to a file with a X509 certificate for https API.
#[arg(long, default_value = DEFAULT_SSL_CERT_FILE)]
ssl_cert_file: Utf8PathBuf,
/// Period to reload certificate and private key from files.
#[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_SSL_CERT_RELOAD_PERIOD)]
ssl_cert_reload_period: Duration,
}
/// Id of publisher for registering in maps
type PubId = u64;
/// Id of subscriber for registering in maps
type SubId = u64;
/// Single enum type for all messages.
#[derive(Clone, Debug, PartialEq)]
#[allow(clippy::enum_variant_names)]
enum Message {
SafekeeperTimelineInfo(SafekeeperTimelineInfo),
SafekeeperDiscoveryRequest(SafekeeperDiscoveryRequest),
SafekeeperDiscoveryResponse(SafekeeperDiscoveryResponse),
}
impl Message {
/// Convert proto message to internal message.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn from(proto_msg: TypedMessage) -> Result<Self, Status> {
match proto_msg.r#type() {
MessageType::SafekeeperTimelineInfo => Ok(Message::SafekeeperTimelineInfo(
proto_msg.safekeeper_timeline_info.ok_or_else(|| {
Status::new(Code::InvalidArgument, "missing safekeeper_timeline_info")
})?,
)),
MessageType::SafekeeperDiscoveryRequest => Ok(Message::SafekeeperDiscoveryRequest(
proto_msg.safekeeper_discovery_request.ok_or_else(|| {
Status::new(
Code::InvalidArgument,
"missing safekeeper_discovery_request",
)
})?,
)),
MessageType::SafekeeperDiscoveryResponse => Ok(Message::SafekeeperDiscoveryResponse(
proto_msg.safekeeper_discovery_response.ok_or_else(|| {
Status::new(
Code::InvalidArgument,
"missing safekeeper_discovery_response",
)
})?,
)),
MessageType::Unknown => Err(Status::new(
Code::InvalidArgument,
format!("invalid message type: {:?}", proto_msg.r#type),
)),
}
}
/// Get the tenant_timeline_id from the message.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn tenant_timeline_id(&self) -> Result<Option<TenantTimelineId>, Status> {
match self {
Message::SafekeeperTimelineInfo(msg) => Ok(msg
.tenant_timeline_id
.as_ref()
.map(parse_proto_ttid)
.transpose()?),
Message::SafekeeperDiscoveryRequest(msg) => Ok(msg
.tenant_timeline_id
.as_ref()
.map(parse_proto_ttid)
.transpose()?),
Message::SafekeeperDiscoveryResponse(msg) => Ok(msg
.tenant_timeline_id
.as_ref()
.map(parse_proto_ttid)
.transpose()?),
}
}
/// Convert internal message to the protobuf struct.
pub fn as_typed_message(&self) -> TypedMessage {
let mut res = TypedMessage {
r#type: self.message_type() as i32,
..Default::default()
};
match self {
Message::SafekeeperTimelineInfo(msg) => {
res.safekeeper_timeline_info = Some(msg.clone())
}
Message::SafekeeperDiscoveryRequest(msg) => {
res.safekeeper_discovery_request = Some(msg.clone())
}
Message::SafekeeperDiscoveryResponse(msg) => {
res.safekeeper_discovery_response = Some(msg.clone())
}
}
res
}
/// Get the message type.
pub fn message_type(&self) -> MessageType {
match self {
Message::SafekeeperTimelineInfo(_) => MessageType::SafekeeperTimelineInfo,
Message::SafekeeperDiscoveryRequest(_) => MessageType::SafekeeperDiscoveryRequest,
Message::SafekeeperDiscoveryResponse(_) => MessageType::SafekeeperDiscoveryResponse,
}
}
}
#[derive(Copy, Clone, Debug)]
enum SubscriptionKey {
All,
Timeline(TenantTimelineId),
}
impl SubscriptionKey {
/// Parse protobuf subkey (protobuf doesn't have fixed size bytes, we get vectors).
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn from_proto_subscription_key(key: ProtoSubscriptionKey) -> Result<Self, Status> {
match key {
ProtoSubscriptionKey::All(_) => Ok(SubscriptionKey::All),
ProtoSubscriptionKey::TenantTimelineId(proto_ttid) => {
Ok(SubscriptionKey::Timeline(parse_proto_ttid(&proto_ttid)?))
}
}
}
/// Parse from FilterTenantTimelineId
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn from_proto_filter_tenant_timeline_id(
opt: Option<&FilterTenantTimelineId>,
) -> Result<Self, Status> {
if opt.is_none() {
return Ok(SubscriptionKey::All);
}
let f = opt.unwrap();
if !f.enabled {
return Ok(SubscriptionKey::All);
}
let ttid =
parse_proto_ttid(f.tenant_timeline_id.as_ref().ok_or_else(|| {
Status::new(Code::InvalidArgument, "missing tenant_timeline_id")
})?)?;
Ok(SubscriptionKey::Timeline(ttid))
}
}
/// Channel to timeline subscribers.
struct ChanToTimelineSub {
chan: broadcast::Sender<Message>,
/// Tracked separately to know when delete the shmem entry. receiver_count()
/// is unhandy for that as unregistering and dropping the receiver side
/// happens at different moments.
num_subscribers: u64,
}
struct SharedState {
next_pub_id: PubId,
num_pubs: i64,
next_sub_id: SubId,
num_subs_to_timelines: i64,
chans_to_timeline_subs: HashMap<TenantTimelineId, ChanToTimelineSub>,
num_subs_to_all: i64,
chan_to_all_subs: broadcast::Sender<Message>,
}
impl SharedState {
pub fn new(all_keys_chan_size: usize) -> Self {
SharedState {
next_pub_id: 0,
num_pubs: 0,
next_sub_id: 0,
num_subs_to_timelines: 0,
chans_to_timeline_subs: HashMap::new(),
num_subs_to_all: 0,
chan_to_all_subs: broadcast::channel(all_keys_chan_size).0,
}
}
// Register new publisher.
pub fn register_publisher(&mut self) -> PubId {
let pub_id = self.next_pub_id;
self.next_pub_id += 1;
self.num_pubs += 1;
NUM_PUBS.set(self.num_pubs);
pub_id
}
// Unregister publisher.
pub fn unregister_publisher(&mut self) {
self.num_pubs -= 1;
NUM_PUBS.set(self.num_pubs);
}
// Register new subscriber.
pub fn register_subscriber(
&mut self,
sub_key: SubscriptionKey,
timeline_chan_size: usize,
) -> (SubId, broadcast::Receiver<Message>) {
let sub_id = self.next_sub_id;
self.next_sub_id += 1;
let sub_rx = match sub_key {
SubscriptionKey::All => {
self.num_subs_to_all += 1;
NUM_SUBS_ALL.set(self.num_subs_to_all);
self.chan_to_all_subs.subscribe()
}
SubscriptionKey::Timeline(ttid) => {
self.num_subs_to_timelines += 1;
NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines);
// Create new broadcast channel for this key, or subscriber to
// the existing one.
let chan_to_timeline_sub =
self.chans_to_timeline_subs
.entry(ttid)
.or_insert(ChanToTimelineSub {
chan: broadcast::channel(timeline_chan_size).0,
num_subscribers: 0,
});
chan_to_timeline_sub.num_subscribers += 1;
chan_to_timeline_sub.chan.subscribe()
}
};
(sub_id, sub_rx)
}
// Unregister the subscriber.
pub fn unregister_subscriber(&mut self, sub_key: SubscriptionKey) {
match sub_key {
SubscriptionKey::All => {
self.num_subs_to_all -= 1;
NUM_SUBS_ALL.set(self.num_subs_to_all);
}
SubscriptionKey::Timeline(ttid) => {
self.num_subs_to_timelines -= 1;
NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines);
// Remove from the map, destroying the channel, if we are the
// last subscriber to this timeline.
// Missing entry is a bug; we must have registered.
let chan_to_timeline_sub = self
.chans_to_timeline_subs
.get_mut(&ttid)
.expect("failed to find sub entry in shmem during unregister");
chan_to_timeline_sub.num_subscribers -= 1;
if chan_to_timeline_sub.num_subscribers == 0 {
self.chans_to_timeline_subs.remove(&ttid);
}
}
}
}
}
// SharedState wrapper.
#[derive(Clone)]
struct Registry {
shared_state: Arc<RwLock<SharedState>>,
timeline_chan_size: usize,
}
impl Registry {
// Register new publisher in shared state.
pub fn register_publisher(&self, remote_addr: SocketAddr) -> Publisher {
let pub_id = self.shared_state.write().register_publisher();
info!("publication started id={} addr={:?}", pub_id, remote_addr);
Publisher {
id: pub_id,
registry: self.clone(),
remote_addr,
}
}
pub fn unregister_publisher(&self, publisher: &Publisher) {
self.shared_state.write().unregister_publisher();
info!(
"publication ended id={} addr={:?}",
publisher.id, publisher.remote_addr
);
}
// Register new subscriber in shared state.
pub fn register_subscriber(
&self,
sub_key: SubscriptionKey,
remote_addr: SocketAddr,
) -> Subscriber {
let (sub_id, sub_rx) = self
.shared_state
.write()
.register_subscriber(sub_key, self.timeline_chan_size);
info!(
"subscription started id={}, key={:?}, addr={:?}",
sub_id, sub_key, remote_addr
);
Subscriber {
id: sub_id,
key: sub_key,
sub_rx,
registry: self.clone(),
remote_addr,
}
}
// Unregister the subscriber
pub fn unregister_subscriber(&self, subscriber: &Subscriber) {
self.shared_state
.write()
.unregister_subscriber(subscriber.key);
info!(
"subscription ended id={}, key={:?}, addr={:?}",
subscriber.id, subscriber.key, subscriber.remote_addr
);
}
/// Send msg to relevant subscribers.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn send_msg(&self, msg: &Message) -> Result<(), Status> {
PROCESSED_MESSAGES_TOTAL.inc();
// send message to subscribers for everything
let shared_state = self.shared_state.read();
// Err means there is no subscribers, it is fine.
shared_state.chan_to_all_subs.send(msg.clone()).ok();
// send message to per timeline subscribers, if there is ttid
let ttid = msg.tenant_timeline_id()?;
if let Some(ttid) = ttid {
if let Some(subs) = shared_state.chans_to_timeline_subs.get(&ttid) {
// Err can't happen here, as tx is destroyed only after removing
// from the map the last subscriber along with tx.
subs.chan
.send(msg.clone())
.expect("rx is still in the map with zero subscribers");
}
}
Ok(())
}
}
// Private subscriber state.
struct Subscriber {
id: SubId,
key: SubscriptionKey,
// Subscriber receives messages from publishers here.
sub_rx: broadcast::Receiver<Message>,
// to unregister itself from shared state in Drop
registry: Registry,
// for logging
remote_addr: SocketAddr,
}
impl Drop for Subscriber {
fn drop(&mut self) {
self.registry.unregister_subscriber(self);
}
}
// Private publisher state
struct Publisher {
id: PubId,
registry: Registry,
// for logging
remote_addr: SocketAddr,
}
impl Publisher {
/// Send msg to relevant subscribers.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn send_msg(&mut self, msg: &Message) -> Result<(), Status> {
self.registry.send_msg(msg)
}
}
impl Drop for Publisher {
fn drop(&mut self) {
self.registry.unregister_publisher(self);
}
}
struct Broker {
registry: Registry,
}
#[tonic::async_trait]
impl BrokerService for Broker {
async fn publish_safekeeper_info(
&self,
request: Request<tonic::Streaming<SafekeeperTimelineInfo>>,
) -> Result<Response<()>, Status> {
let &RemoteAddr(remote_addr) = request
.extensions()
.get()
.expect("RemoteAddr inserted by handler");
let mut publisher = self.registry.register_publisher(remote_addr);
let mut stream = request.into_inner();
loop {
match stream.next().await {
Some(Ok(msg)) => publisher.send_msg(&Message::SafekeeperTimelineInfo(msg))?,
Some(Err(e)) => return Err(e), // grpc error from the stream
None => break, // closed stream
}
}
Ok(Response::new(()))
}
type SubscribeSafekeeperInfoStream =
Pin<Box<dyn Stream<Item = Result<SafekeeperTimelineInfo, Status>> + Send + 'static>>;
async fn subscribe_safekeeper_info(
&self,
request: Request<SubscribeSafekeeperInfoRequest>,
) -> Result<Response<Self::SubscribeSafekeeperInfoStream>, Status> {
let &RemoteAddr(remote_addr) = request
.extensions()
.get()
.expect("RemoteAddr inserted by handler");
let proto_key = request
.into_inner()
.subscription_key
.ok_or_else(|| Status::new(Code::InvalidArgument, "missing subscription key"))?;
let sub_key = SubscriptionKey::from_proto_subscription_key(proto_key)?;
let mut subscriber = self.registry.register_subscriber(sub_key, remote_addr);
// transform rx into stream with item = Result, as method result demands
let output = async_stream::try_stream! {
let mut warn_interval = time::interval(Duration::from_millis(1000));
let mut missed_msgs: u64 = 0;
loop {
match subscriber.sub_rx.recv().await {
Ok(info) => {
match info {
Message::SafekeeperTimelineInfo(info) => yield info,
_ => {},
}
BROADCASTED_MESSAGES_TOTAL.inc();
},
Err(RecvError::Lagged(skipped_msg)) => {
BROADCAST_DROPPED_MESSAGES_TOTAL.inc_by(skipped_msg);
missed_msgs += skipped_msg;
if (futures::poll!(Box::pin(warn_interval.tick()))).is_ready() {
warn!("subscription id={}, key={:?} addr={:?} dropped {} messages, channel is full",
subscriber.id, subscriber.key, subscriber.remote_addr, missed_msgs);
missed_msgs = 0;
}
}
Err(RecvError::Closed) => {
// can't happen, we never drop the channel while there is a subscriber
Err(Status::new(Code::Internal, "channel unexpectantly closed"))?;
}
}
}
};
Ok(Response::new(
Box::pin(output) as Self::SubscribeSafekeeperInfoStream
))
}
type SubscribeByFilterStream =
Pin<Box<dyn Stream<Item = Result<TypedMessage, Status>> + Send + 'static>>;
/// Subscribe to all messages, limited by a filter.
async fn subscribe_by_filter(
&self,
request: Request<SubscribeByFilterRequest>,
) -> std::result::Result<Response<Self::SubscribeByFilterStream>, Status> {
let &RemoteAddr(remote_addr) = request
.extensions()
.get()
.expect("RemoteAddr inserted by handler");
let proto_filter = request.into_inner();
let ttid_filter = proto_filter.tenant_timeline_id.as_ref();
let sub_key = SubscriptionKey::from_proto_filter_tenant_timeline_id(ttid_filter)?;
let types_set = proto_filter
.types
.iter()
.map(|t| t.r#type)
.collect::<std::collections::HashSet<_>>();
let mut subscriber = self.registry.register_subscriber(sub_key, remote_addr);
// transform rx into stream with item = Result, as method result demands
let output = async_stream::try_stream! {
let mut warn_interval = time::interval(Duration::from_millis(1000));
let mut missed_msgs: u64 = 0;
loop {
match subscriber.sub_rx.recv().await {
Ok(msg) => {
let msg_type = msg.message_type() as i32;
if types_set.contains(&msg_type) {
yield msg.as_typed_message();
BROADCASTED_MESSAGES_TOTAL.inc();
}
},
Err(RecvError::Lagged(skipped_msg)) => {
BROADCAST_DROPPED_MESSAGES_TOTAL.inc_by(skipped_msg);
missed_msgs += skipped_msg;
if (futures::poll!(Box::pin(warn_interval.tick()))).is_ready() {
warn!("subscription id={}, key={:?} addr={:?} dropped {} messages, channel is full",
subscriber.id, subscriber.key, subscriber.remote_addr, missed_msgs);
missed_msgs = 0;
}
}
Err(RecvError::Closed) => {
// can't happen, we never drop the channel while there is a subscriber
Err(Status::new(Code::Internal, "channel unexpectantly closed"))?;
}
}
}
};
Ok(Response::new(
Box::pin(output) as Self::SubscribeByFilterStream
))
}
/// Publish one message.
async fn publish_one(
&self,
request: Request<TypedMessage>,
) -> std::result::Result<Response<()>, Status> {
let msg = Message::from(request.into_inner())?;
PUBLISHED_ONEOFF_MESSAGES_TOTAL.inc();
self.registry.send_msg(&msg)?;
Ok(Response::new(()))
}
}
// We serve only metrics and healthcheck through http1.
async fn http1_handler(
req: hyper::Request<Incoming>,
) -> Result<hyper::Response<BoxBody<Bytes, Infallible>>, Infallible> {
let resp = match (req.method(), req.uri().path()) {
(&Method::GET, "/metrics") => {
let mut buffer = vec![];
let metrics = metrics::gather();
let encoder = TextEncoder::new();
encoder.encode(&metrics, &mut buffer).unwrap();
hyper::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, encoder.format_type())
.body(BoxBody::new(Full::new(Bytes::from(buffer))))
.unwrap()
}
(&Method::GET, "/status") => hyper::Response::builder()
.status(StatusCode::OK)
.body(BoxBody::new(Empty::new()))
.unwrap(),
_ => hyper::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(BoxBody::new(Empty::new()))
.unwrap(),
};
Ok(resp)
}
#[derive(Clone, Copy)]
struct RemoteAddr(SocketAddr);
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
// important to keep the order of:
// 1. init logging
// 2. tracing panic hook
// 3. sentry
logging::init(
LogFormat::from_config(&args.log_format)?,
logging::TracingErrorLayerEnablement::Disabled,
logging::Output::Stdout,
)?;
logging::replace_panic_hook_with_tracing_panic_hook().forget();
// initialize sentry if SENTRY_DSN is provided
let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]);
info!("version: {GIT_VERSION} build_tag: {BUILD_TAG}");
metrics::set_build_info_metric(GIT_VERSION, BUILD_TAG);
// On any shutdown signal, log receival and exit.
std::thread::spawn(move || {
ShutdownSignals::handle(|signal| {
info!("received {}, terminating", signal.name());
std::process::exit(0);
})
});
let registry = Registry {
shared_state: Arc::new(RwLock::new(SharedState::new(args.all_keys_chan_size))),
timeline_chan_size: args.timeline_chan_size,
};
let storage_broker_impl = Broker {
registry: registry.clone(),
};
let storage_broker_server = BrokerServiceServer::new(storage_broker_impl);
let http_listener = match &args.listen_addr {
Some(addr) => {
info!("listening HTTP on {}", addr);
Some(TcpListener::bind(addr).await?)
}
None => None,
};
let (https_listener, tls_acceptor) = match &args.listen_https_addr {
Some(addr) => {
let listener = TcpListener::bind(addr).await?;
let cert_resolver = ReloadingCertificateResolver::new(
"main",
&args.ssl_key_file,
&args.ssl_cert_file,
args.ssl_cert_reload_period,
)
.await?;
let mut tls_config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(cert_resolver);
// Tonic is HTTP/2 only and it negotiates it with ALPN.
tls_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
let acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config));
info!("listening HTTPS on {}", addr);
(Some(listener), Some(acceptor))
}
None => (None, None),
};
// grpc is served along with http1 for metrics on a single port, hence we
// don't use tonic's Server.
loop {
let (conn, is_https) = tokio::select! {
Some(conn) = OptionFuture::from(http_listener.as_ref().map(|l| l.accept())) => (conn, false),
Some(conn) = OptionFuture::from(https_listener.as_ref().map(|l| l.accept())) => (conn, true),
};
let (tcp_stream, addr) = match conn {
Ok(v) => v,
Err(e) => {
info!("couldn't accept connection: {e}");
continue;
}
};
let mut builder = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new());
builder.http1().timer(TokioTimer::new());
builder
.http2()
.timer(TokioTimer::new())
.keep_alive_interval(Some(args.http2_keepalive_interval))
// This matches the tonic server default. It allows us to support production-like workloads.
.max_concurrent_streams(None);
let storage_broker_server_cloned = storage_broker_server.clone();
let remote_addr = RemoteAddr(addr);
let service_fn_ = async move {
service_fn(move |mut req| {
// That's what tonic's MakeSvc.call does to pass conninfo to
// the request handler (and where its request.remote_addr()
// expects it to find).
req.extensions_mut().insert(remote_addr);
// Technically this second clone is not needed, but consume
// by async block is apparently unavoidable. BTW, error
// message is enigmatic, see
// https://github.com/rust-lang/rust/issues/68119
//
// We could get away without async block at all, but then we
// need to resort to futures::Either to merge the result,
// which doesn't caress an eye as well.
let mut storage_broker_server_svc = storage_broker_server_cloned.clone();
async move {
if req.headers().get("content-type").map(|x| x.as_bytes())
== Some(b"application/grpc")
{
let res_resp = storage_broker_server_svc.call(req).await;
// Grpc and http1 handlers have slightly different
// Response types: it is UnsyncBoxBody for the
// former one (not sure why) and plain hyper::Body
// for the latter. Both implement HttpBody though,
// and `Either` is used to merge them.
res_resp.map(|resp| resp.map(http_body_util::Either::Left))
} else {
let res_resp = http1_handler(req).await;
res_resp.map(|resp| resp.map(http_body_util::Either::Right))
}
}
})
}
.await;
let tls_acceptor = tls_acceptor.clone();
tokio::task::spawn(async move {
let res = if is_https {
let tls_acceptor =
tls_acceptor.expect("tls_acceptor is set together with https_listener");
let tls_stream = match tls_acceptor.accept(tcp_stream).await {
Ok(tls_stream) => tls_stream,
Err(e) => {
info!("error accepting TLS connection from {addr}: {e}");
return;
}
};
builder
.serve_connection(TokioIo::new(tls_stream), service_fn_)
.await
} else {
builder
.serve_connection(TokioIo::new(tcp_stream), service_fn_)
.await
};
if let Err(e) = res {
info!(%is_https, "error serving connection from {addr}: {e}");
}
});
}
}
#[cfg(test)]
mod tests {
use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId;
use tokio::sync::broadcast::error::TryRecvError;
use utils::id::{TenantId, TimelineId};
use super::*;
fn msg(timeline_id: Vec<u8>) -> Message {
Message::SafekeeperTimelineInfo(SafekeeperTimelineInfo {
safekeeper_id: 1,
tenant_timeline_id: Some(ProtoTenantTimelineId {
tenant_id: vec![0x00; 16],
timeline_id,
}),
term: 0,
last_log_term: 0,
flush_lsn: 1,
commit_lsn: 2,
backup_lsn: 3,
remote_consistent_lsn: 4,
peer_horizon_lsn: 5,
safekeeper_connstr: "neon-1-sk-1.local:7676".to_owned(),
http_connstr: "neon-1-sk-1.local:7677".to_owned(),
https_connstr: Some("neon-1-sk-1.local:7678".to_owned()),
local_start_lsn: 0,
availability_zone: None,
standby_horizon: 0,
})
}
fn tli_from_u64(i: u64) -> Vec<u8> {
let mut timeline_id = vec![0xFF; 8];
timeline_id.extend_from_slice(&i.to_be_bytes());
timeline_id
}
fn mock_addr() -> SocketAddr {
"127.0.0.1:8080".parse().unwrap()
}
#[tokio::test]
async fn test_registry() {
let registry = Registry {
shared_state: Arc::new(RwLock::new(SharedState::new(16))),
timeline_chan_size: 16,
};
// subscribe to timeline 2
let ttid_2 = TenantTimelineId {
tenant_id: TenantId::from_slice(&[0x00; 16]).unwrap(),
timeline_id: TimelineId::from_slice(&tli_from_u64(2)).unwrap(),
};
let sub_key_2 = SubscriptionKey::Timeline(ttid_2);
let mut subscriber_2 = registry.register_subscriber(sub_key_2, mock_addr());
let mut subscriber_all = registry.register_subscriber(SubscriptionKey::All, mock_addr());
// send two messages with different keys
let msg_1 = msg(tli_from_u64(1));
let msg_2 = msg(tli_from_u64(2));
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/storage_broker/benches/rps.rs | storage_broker/benches/rps.rs | use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant};
use clap::Parser;
use storage_broker::proto::{
FilterTenantTimelineId, MessageType, SafekeeperTimelineInfo, SubscribeByFilterRequest,
TenantTimelineId as ProtoTenantTimelineId, TypeSubscription, TypedMessage,
};
use storage_broker::{BrokerClientChannel, DEFAULT_ENDPOINT};
use tokio::time;
use tonic::Request;
const ABOUT: &str = r#"
A simple benchmarking tool for storage_broker. Creates specified number of per
timeline publishers and subscribers; each publisher continiously sends
messages, subscribers read them. Each second the tool outputs number of
messages summed across all subscribers and min number of messages
recevied by single subscriber.
For example,
cargo build -r -p storage_broker && target/release/storage_broker
cargo bench --bench rps -- -s 1 -p 1
"#;
#[derive(Parser, Debug)]
#[clap(author, version, about = ABOUT)]
struct Args {
/// Number of publishers
#[clap(short = 'p', long, value_parser, default_value_t = 1)]
num_pubs: u64,
/// Number of subscribers
#[clap(short = 's', long, value_parser, default_value_t = 1)]
num_subs: u64,
// Fake value to satisfy `cargo bench` passing it.
#[clap(long)]
bench: bool,
}
async fn progress_reporter(counters: Vec<Arc<AtomicU64>>) {
let mut interval = time::interval(Duration::from_millis(1000));
let mut c_old = counters.iter().map(|c| c.load(Ordering::Relaxed)).sum();
let mut c_min_old = counters
.iter()
.map(|c| c.load(Ordering::Relaxed))
.min()
.unwrap_or(0);
let mut started_at = None;
let mut skipped: u64 = 0;
loop {
interval.tick().await;
let c_new = counters.iter().map(|c| c.load(Ordering::Relaxed)).sum();
let c_min_new = counters
.iter()
.map(|c| c.load(Ordering::Relaxed))
.min()
.unwrap_or(0);
if c_new > 0 && started_at.is_none() {
started_at = Some(Instant::now());
skipped = c_new;
}
let avg_rps = started_at.map(|s| {
let dur = s.elapsed();
let dur_secs = dur.as_secs() as f64 + (dur.subsec_millis() as f64) / 1000.0;
let avg_rps = (c_new - skipped) as f64 / dur_secs;
(dur, avg_rps)
});
println!(
"sum rps {}, min rps {} total {}, total min {}, duration, avg sum rps {:?}",
c_new - c_old,
c_min_new - c_min_old,
c_new,
c_min_new,
avg_rps
);
c_old = c_new;
c_min_old = c_min_new;
}
}
fn tli_from_u64(i: u64) -> Vec<u8> {
let mut timeline_id = vec![0xFF; 8];
timeline_id.extend_from_slice(&i.to_be_bytes());
timeline_id
}
async fn subscribe(client: Option<BrokerClientChannel>, counter: Arc<AtomicU64>, i: u64) {
let mut client = match client {
Some(c) => c,
None => storage_broker::connect(
DEFAULT_ENDPOINT,
Duration::from_secs(5),
storage_broker::ClientTlsConfig::new(),
)
.unwrap(),
};
let ttid = ProtoTenantTimelineId {
tenant_id: vec![0xFF; 16],
timeline_id: tli_from_u64(i),
};
let request = SubscribeByFilterRequest {
types: vec![TypeSubscription {
r#type: MessageType::SafekeeperTimelineInfo.into(),
}],
tenant_timeline_id: Some(FilterTenantTimelineId {
enabled: true,
tenant_timeline_id: Some(ttid),
}),
};
let mut stream: tonic::Streaming<TypedMessage> = client
.subscribe_by_filter(request)
.await
.unwrap()
.into_inner();
while let Some(_feature) = stream.message().await.unwrap() {
counter.fetch_add(1, Ordering::Relaxed);
}
}
async fn publish(client: Option<BrokerClientChannel>, n_keys: u64) {
let mut client = match client {
Some(c) => c,
None => storage_broker::connect(
DEFAULT_ENDPOINT,
Duration::from_secs(5),
storage_broker::ClientTlsConfig::new(),
)
.unwrap(),
};
let mut counter: u64 = 0;
// create stream producing new values
let outbound = async_stream::stream! {
loop {
let info = SafekeeperTimelineInfo {
safekeeper_id: 1,
tenant_timeline_id: Some(ProtoTenantTimelineId {
tenant_id: vec![0xFF; 16],
timeline_id: tli_from_u64(counter % n_keys),
}),
term: 0,
last_log_term: 0,
flush_lsn: counter,
commit_lsn: 2,
backup_lsn: 3,
remote_consistent_lsn: 4,
peer_horizon_lsn: 5,
safekeeper_connstr: "zenith-1-sk-1.local:7676".to_owned(),
http_connstr: "zenith-1-sk-1.local:7677".to_owned(),
https_connstr: Some("zenith-1-sk-1.local:7678".to_owned()),
local_start_lsn: 0,
availability_zone: None,
standby_horizon: 0,
};
counter += 1;
yield info;
}
};
let response = client.publish_safekeeper_info(Request::new(outbound)).await;
println!("pub response is {response:?}");
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
let mut counters = Vec::with_capacity(args.num_subs as usize);
for _ in 0..args.num_subs {
counters.push(Arc::new(AtomicU64::new(0)));
}
let h = tokio::spawn(progress_reporter(counters.clone()));
let c = storage_broker::connect(
DEFAULT_ENDPOINT,
Duration::from_secs(5),
storage_broker::ClientTlsConfig::new(),
)
.unwrap();
for i in 0..args.num_subs {
let c = Some(c.clone());
tokio::spawn(subscribe(c, counters[i as usize].clone(), i));
}
for _i in 0..args.num_pubs {
let c = None;
tokio::spawn(publish(c, args.num_subs));
}
h.await?;
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/tenant_size_model/src/lib.rs | libs/tenant_size_model/src/lib.rs | //! Synthetic size calculation
#![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)]
mod calculation;
pub mod svg;
/// StorageModel is the input to the synthetic size calculation.
///
/// It represents a tree of timelines, with just the information that's needed
/// for the calculation. This doesn't track timeline names or where each timeline
/// begins and ends, for example. Instead, it consists of "points of interest"
/// on the timelines. A point of interest could be the timeline start or end point,
/// the oldest point on a timeline that needs to be retained because of PITR
/// cutoff, or snapshot points named by the user. For each such point, and the
/// edge connecting the points (implicit in Segment), we store information about
/// whether we need to be able to recover to the point, and if known, the logical
/// size at the point.
///
/// The segments must form a well-formed tree, with no loops.
#[derive(serde::Serialize)]
pub struct StorageModel {
pub segments: Vec<Segment>,
}
/// Segment represents one point in the tree of branches, *and* the edge that leads
/// to it (if any). We don't need separate structs for points and edges, because each
/// point can have only one parent.
///
/// When 'needed' is true, it means that we need to be able to reconstruct
/// any version between 'parent.lsn' and 'lsn'. If you want to represent that only
/// a single point is needed, create two Segments with the same lsn, and mark only
/// the child as needed.
///
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct Segment {
/// Previous segment index into ['Storage::segments`], if any.
pub parent: Option<usize>,
/// LSN at this point
pub lsn: u64,
/// Logical size at this node, if known.
pub size: Option<u64>,
/// If true, the segment from parent to this node is needed by `retention_period`
pub needed: bool,
}
/// Result of synthetic size calculation. Returned by StorageModel::calculate()
pub struct SizeResult {
pub total_size: u64,
// This has same length as the StorageModel::segments vector in the input.
// Each entry in this array corresponds to the entry with same index in
// StorageModel::segments.
pub segments: Vec<SegmentSizeResult>,
}
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct SegmentSizeResult {
pub method: SegmentMethod,
// calculated size of this subtree, using this method
pub accum_size: u64,
}
/// Different methods to retain history from a particular state
#[derive(Clone, Copy, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
pub enum SegmentMethod {
SnapshotHere, // A logical snapshot is needed after this segment
Wal, // Keep WAL leading up to this node
Skipped,
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/tenant_size_model/src/svg.rs | libs/tenant_size_model/src/svg.rs | use crate::{SegmentMethod, SegmentSizeResult, SizeResult, StorageModel};
use std::fmt::Write;
const SVG_WIDTH: f32 = 500.0;
/// Different branch kind for SVG drawing.
#[derive(PartialEq)]
pub enum SvgBranchKind {
Timeline,
Lease,
}
struct SvgDraw<'a> {
storage: &'a StorageModel,
branches: &'a [String],
seg_to_branch: &'a [(usize, SvgBranchKind)],
sizes: &'a [SegmentSizeResult],
// layout
xscale: f32,
min_lsn: u64,
seg_coordinates: Vec<(f32, f32)>,
}
fn draw_legend(result: &mut String) -> anyhow::Result<()> {
writeln!(
result,
"<circle cx=\"10\" cy=\"10\" r=\"5\" stroke=\"red\"/>"
)?;
writeln!(result, "<text x=\"20\" y=\"15\">logical snapshot</text>")?;
writeln!(
result,
"<line x1=\"5\" y1=\"30\" x2=\"15\" y2=\"30\" stroke-width=\"6\" stroke=\"black\" />"
)?;
writeln!(
result,
"<text x=\"20\" y=\"35\">WAL within retention period</text>"
)?;
writeln!(
result,
"<line x1=\"5\" y1=\"50\" x2=\"15\" y2=\"50\" stroke-width=\"3\" stroke=\"black\" />"
)?;
writeln!(
result,
"<text x=\"20\" y=\"55\">WAL retained to avoid copy</text>"
)?;
writeln!(
result,
"<line x1=\"5\" y1=\"70\" x2=\"15\" y2=\"70\" stroke-width=\"1\" stroke=\"gray\" />"
)?;
writeln!(result, "<text x=\"20\" y=\"75\">WAL not retained</text>")?;
writeln!(
result,
"<line x1=\"10\" y1=\"85\" x2=\"10\" y2=\"95\" stroke-width=\"3\" stroke=\"blue\" />"
)?;
writeln!(result, "<text x=\"20\" y=\"95\">LSN lease</text>")?;
Ok(())
}
pub fn draw_svg(
storage: &StorageModel,
branches: &[String],
seg_to_branch: &[(usize, SvgBranchKind)],
sizes: &SizeResult,
) -> anyhow::Result<String> {
let mut draw = SvgDraw {
storage,
branches,
seg_to_branch,
sizes: &sizes.segments,
xscale: 0.0,
min_lsn: 0,
seg_coordinates: Vec::new(),
};
let mut result = String::new();
writeln!(
result,
"<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" height=\"300\" width=\"500\">"
)?;
draw.calculate_svg_layout();
// Draw the tree
for (seg_id, _seg) in storage.segments.iter().enumerate() {
draw.draw_seg_phase1(seg_id, &mut result)?;
}
// Draw snapshots
for (seg_id, _seg) in storage.segments.iter().enumerate() {
draw.draw_seg_phase2(seg_id, &mut result)?;
}
draw_legend(&mut result)?;
write!(result, "</svg>")?;
Ok(result)
}
impl SvgDraw<'_> {
fn calculate_svg_layout(&mut self) {
// Find x scale
let segments = &self.storage.segments;
let min_lsn = segments.iter().map(|s| s.lsn).fold(u64::MAX, std::cmp::min);
let max_lsn = segments.iter().map(|s| s.lsn).fold(0, std::cmp::max);
// Start with 1 pixel = 1 byte. Double the scale until it fits into the image
let mut xscale = 1.0;
while (max_lsn - min_lsn) as f32 / xscale > SVG_WIDTH {
xscale *= 2.0;
}
// Layout the timelines on Y dimension.
// TODO
let mut y = 120.0;
let mut branch_y_coordinates = Vec::new();
for _branch in self.branches {
branch_y_coordinates.push(y);
y += 40.0;
}
// Calculate coordinates for each point
let seg_coordinates = std::iter::zip(segments, self.seg_to_branch)
.map(|(seg, (branch_id, _))| {
let x = (seg.lsn - min_lsn) as f32 / xscale;
let y = branch_y_coordinates[*branch_id];
(x, y)
})
.collect();
self.xscale = xscale;
self.min_lsn = min_lsn;
self.seg_coordinates = seg_coordinates;
}
/// Draws lines between points
fn draw_seg_phase1(&self, seg_id: usize, result: &mut String) -> anyhow::Result<()> {
let seg = &self.storage.segments[seg_id];
let wal_bytes = if let Some(parent_id) = seg.parent {
seg.lsn - self.storage.segments[parent_id].lsn
} else {
0
};
let style = match self.sizes[seg_id].method {
SegmentMethod::SnapshotHere => "stroke-width=\"1\" stroke=\"gray\"",
SegmentMethod::Wal if seg.needed && wal_bytes > 0 => {
"stroke-width=\"6\" stroke=\"black\""
}
SegmentMethod::Wal => "stroke-width=\"3\" stroke=\"black\"",
SegmentMethod::Skipped => "stroke-width=\"1\" stroke=\"gray\"",
};
if let Some(parent_id) = seg.parent {
let (x1, y1) = self.seg_coordinates[parent_id];
let (x2, y2) = self.seg_coordinates[seg_id];
writeln!(
result,
"<line x1=\"{x1}\" y1=\"{y1}\" x2=\"{x2}\" y2=\"{y2}\" {style}>",
)?;
writeln!(
result,
" <title>{wal_bytes} bytes of WAL (seg {seg_id})</title>"
)?;
writeln!(result, "</line>")?;
} else {
// draw a little dash to mark the starting point of this branch
let (x, y) = self.seg_coordinates[seg_id];
let (x1, y1) = (x, y - 5.0);
let (x2, y2) = (x, y + 5.0);
writeln!(
result,
"<line x1=\"{x1}\" y1=\"{y1}\" x2=\"{x2}\" y2=\"{y2}\" {style}>",
)?;
writeln!(result, " <title>(seg {seg_id})</title>")?;
writeln!(result, "</line>")?;
}
Ok(())
}
/// Draw circles where snapshots are taken
fn draw_seg_phase2(&self, seg_id: usize, result: &mut String) -> anyhow::Result<()> {
let seg = &self.storage.segments[seg_id];
// draw a snapshot point if it's needed
let (coord_x, coord_y) = self.seg_coordinates[seg_id];
let (_, kind) = &self.seg_to_branch[seg_id];
if kind == &SvgBranchKind::Lease {
let (x1, y1) = (coord_x, coord_y - 10.0);
let (x2, y2) = (coord_x, coord_y + 10.0);
let style = "stroke-width=\"3\" stroke=\"blue\"";
writeln!(
result,
"<line x1=\"{x1}\" y1=\"{y1}\" x2=\"{x2}\" y2=\"{y2}\" {style}>",
)?;
writeln!(result, " <title>leased lsn at {}</title>", seg.lsn)?;
writeln!(result, "</line>")?;
}
if self.sizes[seg_id].method == SegmentMethod::SnapshotHere {
writeln!(
result,
"<circle cx=\"{coord_x}\" cy=\"{coord_y}\" r=\"5\" stroke=\"red\">",
)?;
writeln!(
result,
" <title>logical size {}</title>",
seg.size.unwrap()
)?;
write!(result, "</circle>")?;
}
Ok(())
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/tenant_size_model/src/calculation.rs | libs/tenant_size_model/src/calculation.rs | use crate::{SegmentMethod, SegmentSizeResult, SizeResult, StorageModel};
//
// *-g--*---D--->
// /
// /
// / *---b----*-B--->
// / /
// / /
// -----*--e---*-----f----* C
// E \
// \
// *--a---*---A-->
//
// If A and B need to be retained, is it cheaper to store
// snapshot at C+a+b, or snapshots at A and B ?
//
// If D also needs to be retained, which is cheaper:
//
// 1. E+g+e+f+a+b
// 2. D+C+a+b
// 3. D+A+B
/// `Segment` which has had its size calculated.
#[derive(Clone, Debug)]
struct SegmentSize {
method: SegmentMethod,
// calculated size of this subtree, using this method
accum_size: u64,
seg_id: usize,
children: Vec<SegmentSize>,
}
struct SizeAlternatives {
/// cheapest alternative if parent is available.
incremental: SegmentSize,
/// cheapest alternative if parent node is not available
non_incremental: Option<SegmentSize>,
}
impl StorageModel {
pub fn calculate(&self) -> SizeResult {
// Build adjacency list. 'child_list' is indexed by segment id. Each entry
// contains a list of all child segments of the segment.
let mut roots: Vec<usize> = Vec::new();
let mut child_list: Vec<Vec<usize>> = Vec::new();
child_list.resize(self.segments.len(), Vec::new());
for (seg_id, seg) in self.segments.iter().enumerate() {
if let Some(parent_id) = seg.parent {
child_list[parent_id].push(seg_id);
} else {
roots.push(seg_id);
}
}
let mut segment_results = Vec::new();
segment_results.resize(
self.segments.len(),
SegmentSizeResult {
method: SegmentMethod::Skipped,
accum_size: 0,
},
);
let mut total_size = 0;
for root in roots {
if let Some(selected) = self.size_here(root, &child_list).non_incremental {
StorageModel::fill_selected_sizes(&selected, &mut segment_results);
total_size += selected.accum_size;
} else {
// Couldn't find any way to get this root. Error?
}
}
SizeResult {
// If total_size is 0, it means that the tenant has all timelines offloaded; we need to report 1
// here so that the data point shows up in the s3 files.
total_size: total_size.max(1),
segments: segment_results,
}
}
fn fill_selected_sizes(selected: &SegmentSize, result: &mut Vec<SegmentSizeResult>) {
result[selected.seg_id] = SegmentSizeResult {
method: selected.method,
accum_size: selected.accum_size,
};
// recurse to children
for child in selected.children.iter() {
StorageModel::fill_selected_sizes(child, result);
}
}
//
// This is the core of the sizing calculation.
//
// This is a recursive function, that for each Segment calculates the best way
// to reach all the Segments that are marked as needed in this subtree, under two
// different conditions:
// a) when the parent of this segment is available (as a snaphot or through WAL), and
// b) when the parent of this segment is not available.
//
fn size_here(&self, seg_id: usize, child_list: &Vec<Vec<usize>>) -> SizeAlternatives {
let seg = &self.segments[seg_id];
// First figure out the best way to get each child
let mut children = Vec::new();
for child_id in &child_list[seg_id] {
children.push(self.size_here(*child_id, child_list))
}
// Method 1. If this node is not needed, we can skip it as long as we
// take snapshots later in each sub-tree
let snapshot_later = if !seg.needed {
let mut snapshot_later = SegmentSize {
seg_id,
method: SegmentMethod::Skipped,
accum_size: 0,
children: Vec::new(),
};
let mut possible = true;
for child in children.iter() {
if let Some(non_incremental) = &child.non_incremental {
snapshot_later.accum_size += non_incremental.accum_size;
snapshot_later.children.push(non_incremental.clone())
} else {
possible = false;
break;
}
}
if possible { Some(snapshot_later) } else { None }
} else {
None
};
// Method 2. Get a snapshot here. This assumed to be possible, if the 'size' of
// this Segment was given.
let snapshot_here = if !seg.needed || seg.parent.is_none() {
if let Some(snapshot_size) = seg.size {
let mut snapshot_here = SegmentSize {
seg_id,
method: SegmentMethod::SnapshotHere,
accum_size: snapshot_size,
children: Vec::new(),
};
for child in children.iter() {
snapshot_here.accum_size += child.incremental.accum_size;
snapshot_here.children.push(child.incremental.clone())
}
Some(snapshot_here)
} else {
None
}
} else {
None
};
// Method 3. Use WAL to get here from parent
let wal_here = {
let mut wal_here = SegmentSize {
seg_id,
method: SegmentMethod::Wal,
accum_size: if let Some(parent_id) = seg.parent {
seg.lsn - self.segments[parent_id].lsn
} else {
0
},
children: Vec::new(),
};
for child in children {
wal_here.accum_size += child.incremental.accum_size;
wal_here.children.push(child.incremental)
}
wal_here
};
// If the parent is not available, what's the cheapest method involving
// a snapshot here or later?
let mut cheapest_non_incremental: Option<SegmentSize> = None;
if let Some(snapshot_here) = snapshot_here {
cheapest_non_incremental = Some(snapshot_here);
}
if let Some(snapshot_later) = snapshot_later {
// Use <=, to prefer skipping if the size is equal
if let Some(parent) = &cheapest_non_incremental {
if snapshot_later.accum_size <= parent.accum_size {
cheapest_non_incremental = Some(snapshot_later);
}
} else {
cheapest_non_incremental = Some(snapshot_later);
}
}
// And what's the cheapest method, if the parent is available?
let cheapest_incremental = if let Some(cheapest_non_incremental) = &cheapest_non_incremental
{
// Is it cheaper to use a snapshot here or later, anyway?
// Use <, to prefer Wal over snapshot if the cost is the same
if wal_here.accum_size < cheapest_non_incremental.accum_size {
wal_here
} else {
cheapest_non_incremental.clone()
}
} else {
wal_here
};
SizeAlternatives {
incremental: cheapest_incremental,
non_incremental: cheapest_non_incremental,
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/tenant_size_model/tests/tests.rs | libs/tenant_size_model/tests/tests.rs | //! Tenant size model tests.
use tenant_size_model::{Segment, SizeResult, StorageModel};
use std::collections::HashMap;
struct ScenarioBuilder {
segments: Vec<Segment>,
/// Mapping from the branch name to the index of a segment describing its latest state.
branches: HashMap<String, usize>,
}
impl ScenarioBuilder {
/// Creates a new storage with the given default branch name.
pub fn new(initial_branch: &str) -> ScenarioBuilder {
let init_segment = Segment {
parent: None,
lsn: 0,
size: Some(0),
needed: false, // determined later
};
ScenarioBuilder {
segments: vec![init_segment],
branches: HashMap::from([(initial_branch.into(), 0)]),
}
}
/// Advances the branch with the named operation, by the relative LSN and logical size bytes.
pub fn modify_branch(&mut self, branch: &str, lsn_bytes: u64, size_bytes: i64) {
let lastseg_id = *self.branches.get(branch).unwrap();
let newseg_id = self.segments.len();
let lastseg = &mut self.segments[lastseg_id];
let newseg = Segment {
parent: Some(lastseg_id),
lsn: lastseg.lsn + lsn_bytes,
size: Some((lastseg.size.unwrap() as i64 + size_bytes) as u64),
needed: false,
};
self.segments.push(newseg);
*self.branches.get_mut(branch).expect("read already") = newseg_id;
}
pub fn insert(&mut self, branch: &str, bytes: u64) {
self.modify_branch(branch, bytes, bytes as i64);
}
pub fn update(&mut self, branch: &str, bytes: u64) {
self.modify_branch(branch, bytes, 0i64);
}
pub fn _delete(&mut self, branch: &str, bytes: u64) {
self.modify_branch(branch, bytes, -(bytes as i64));
}
/// Panics if the parent branch cannot be found.
pub fn branch(&mut self, parent: &str, name: &str) {
// Find the right segment
let branchseg_id = *self
.branches
.get(parent)
.expect("should had found the parent by key");
let _branchseg = &mut self.segments[branchseg_id];
// Create branch name for it
self.branches.insert(name.to_string(), branchseg_id);
}
pub fn calculate(&mut self, retention_period: u64) -> (StorageModel, SizeResult) {
// Phase 1: Mark all the segments that need to be retained
for (_branch, &last_seg_id) in self.branches.iter() {
let last_seg = &self.segments[last_seg_id];
let cutoff_lsn = last_seg.lsn.saturating_sub(retention_period);
let mut seg_id = last_seg_id;
loop {
let seg = &mut self.segments[seg_id];
if seg.lsn <= cutoff_lsn {
break;
}
seg.needed = true;
if let Some(prev_seg_id) = seg.parent {
seg_id = prev_seg_id;
} else {
break;
}
}
}
// Perform the calculation
let storage_model = StorageModel {
segments: self.segments.clone(),
};
let size_result = storage_model.calculate();
(storage_model, size_result)
}
}
// Main branch only. Some updates on it.
#[test]
fn scenario_1() {
// Create main branch
let mut scenario = ScenarioBuilder::new("main");
// Bulk load 5 GB of data to it
scenario.insert("main", 5_000);
// Stream of updates
for _ in 0..5 {
scenario.update("main", 1_000);
}
// Calculate the synthetic size with retention horizon 1000
let (_model, result) = scenario.calculate(1000);
// The end of the branch is at LSN 10000. Need to retain
// a logical snapshot at LSN 9000, plus the WAL between 9000-10000.
// The logical snapshot has size 5000.
assert_eq!(result.total_size, 5000 + 1000);
}
// Main branch only. Some updates on it.
#[test]
fn scenario_2() {
// Create main branch
let mut scenario = ScenarioBuilder::new("main");
// Bulk load 5 GB of data to it
scenario.insert("main", 5_000);
// Stream of updates
for _ in 0..5 {
scenario.update("main", 1_000);
}
// Branch
scenario.branch("main", "child");
scenario.update("child", 1_000);
// More updates on parent
scenario.update("main", 1_000);
//
// The history looks like this now:
//
// 10000 11000
// *----*----*--------------* main
// |
// | 11000
// +-------------- child
//
//
// With retention horizon 1000, we need to retain logical snapshot
// at the branch point, size 5000, and the WAL from 10000-11000 on
// both branches.
let (_model, result) = scenario.calculate(1000);
assert_eq!(result.total_size, 5000 + 1000 + 1000);
}
// Like 2, but more updates on main
#[test]
fn scenario_3() {
// Create main branch
let mut scenario = ScenarioBuilder::new("main");
// Bulk load 5 GB of data to it
scenario.insert("main", 5_000);
// Stream of updates
for _ in 0..5 {
scenario.update("main", 1_000);
}
// Branch
scenario.branch("main", "child");
scenario.update("child", 1_000);
// More updates on parent
for _ in 0..5 {
scenario.update("main", 1_000);
}
//
// The history looks like this now:
//
// 10000 15000
// *----*----*------------------------------------* main
// |
// | 11000
// +-------------- child
//
//
// With retention horizon 1000, it's still cheapest to retain
// - snapshot at branch point (size 5000)
// - WAL on child between 10000-11000
// - WAL on main between 10000-15000
//
// This is in total 5000 + 1000 + 5000
//
let (_model, result) = scenario.calculate(1000);
assert_eq!(result.total_size, 5000 + 1000 + 5000);
}
// Diverged branches
#[test]
fn scenario_4() {
// Create main branch
let mut scenario = ScenarioBuilder::new("main");
// Bulk load 5 GB of data to it
scenario.insert("main", 5_000);
// Stream of updates
for _ in 0..5 {
scenario.update("main", 1_000);
}
// Branch
scenario.branch("main", "child");
scenario.update("child", 1_000);
// More updates on parent
for _ in 0..8 {
scenario.update("main", 1_000);
}
//
// The history looks like this now:
//
// 10000 18000
// *----*----*------------------------------------* main
// |
// | 11000
// +-------------- child
//
//
// With retention horizon 1000, it's now cheapest to retain
// separate snapshots on both branches:
// - snapshot on main branch at LSN 17000 (size 5000)
// - WAL on main between 17000-18000
// - snapshot on child branch at LSN 10000 (size 5000)
// - WAL on child between 10000-11000
//
// This is in total 5000 + 1000 + 5000 + 1000 = 12000
//
// (If we used the method from the previous scenario, and
// kept only snapshot at the branch point, we'd need to keep
// all the WAL between 10000-18000 on the main branch, so
// the total size would be 5000 + 1000 + 8000 = 14000. The
// calculation always picks the cheapest alternative)
let (_model, result) = scenario.calculate(1000);
assert_eq!(result.total_size, 5000 + 1000 + 5000 + 1000);
}
#[test]
fn scenario_5() {
let mut scenario = ScenarioBuilder::new("a");
scenario.insert("a", 5000);
scenario.branch("a", "b");
scenario.update("b", 4000);
scenario.update("a", 2000);
scenario.branch("a", "c");
scenario.insert("c", 4000);
scenario.insert("a", 2000);
let (_model, result) = scenario.calculate(1000);
assert_eq!(result.total_size, 17000);
}
#[test]
fn scenario_6() {
let branches = [
"7ff1edab8182025f15ae33482edb590a",
"b1719e044db05401a05a2ed588a3ad3f",
"0xb68d6691c895ad0a70809470020929ef",
];
// compared to other scenarios, this one uses bytes instead of kB
let mut scenario = ScenarioBuilder::new("");
scenario.branch("", branches[0]); // at 0
scenario.modify_branch(branches[0], 108951064, 43696128); // at 108951064
scenario.branch(branches[0], branches[1]); // at 108951064
scenario.modify_branch(branches[1], 15560408, -1851392); // at 124511472
scenario.modify_branch(branches[0], 174464360, -1531904); // at 283415424
scenario.branch(branches[0], branches[2]); // at 283415424
scenario.modify_branch(branches[2], 15906192, 8192); // at 299321616
scenario.modify_branch(branches[0], 18909976, 32768); // at 302325400
let (model, result) = scenario.calculate(100_000);
// FIXME: We previously calculated 333_792_000. But with this PR, we get
// a much lower number. At a quick look at the model output and the
// calculations here, the new result seems correct to me.
eprintln!(
" MODEL: {}",
serde_json::to_string(&model.segments).unwrap()
);
eprintln!(
"RESULT: {}",
serde_json::to_string(&result.segments).unwrap()
);
assert_eq!(result.total_size, 136_236_928);
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/walproposer/build.rs | libs/walproposer/build.rs | //! Links with walproposer, pgcommon, pgport and runs bindgen on walproposer.h
//! to generate Rust bindings for it.
use std::env;
use std::path::PathBuf;
use std::process::Command;
use anyhow::{Context, anyhow};
const WALPROPOSER_PG_VERSION: &str = "v17";
fn main() -> anyhow::Result<()> {
// Tell cargo to invalidate the built crate whenever the wrapper changes
println!("cargo:rerun-if-changed=bindgen_deps.h");
let root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../..");
// Finding the location of built libraries and Postgres C headers:
// - if POSTGRES_INSTALL_DIR is set look into it, otherwise look into `<project_root>/pg_install`
// - if there's a `bin/pg_config` file use it for getting include server, otherwise use `<project_root>/pg_install/{PG_MAJORVERSION}/include/postgresql/server`
let pg_install_dir = if let Some(postgres_install_dir) = env::var_os("POSTGRES_INSTALL_DIR") {
postgres_install_dir.into()
} else {
root_path.join("pg_install")
};
let pg_install_abs = std::fs::canonicalize(pg_install_dir)?;
let walproposer_lib_dir = root_path.join("build/walproposer-lib");
let walproposer_lib_search_str = walproposer_lib_dir
.to_str()
.ok_or(anyhow!("Bad non-UTF path"))?;
let pgxn_neon = root_path.join("pgxn/neon");
let pgxn_neon = std::fs::canonicalize(pgxn_neon)?;
let pgxn_neon = pgxn_neon.to_str().ok_or(anyhow!("Bad non-UTF path"))?;
println!("cargo:rustc-link-lib=static=walproposer");
println!("cargo:rustc-link-lib=static=pgport");
println!("cargo:rustc-link-lib=static=pgcommon");
println!("cargo:rustc-link-search={walproposer_lib_search_str}");
// Rebuild crate when libwalproposer.a changes
println!("cargo:rerun-if-changed={walproposer_lib_search_str}/libwalproposer.a");
let pg_config_bin = pg_install_abs
.join(WALPROPOSER_PG_VERSION)
.join("bin")
.join("pg_config");
let inc_server_path: String = if pg_config_bin.exists() {
let output = Command::new(pg_config_bin)
.arg("--includedir-server")
.output()
.context("failed to execute `pg_config --includedir-server`")?;
if !output.status.success() {
panic!("`pg_config --includedir-server` failed")
}
String::from_utf8(output.stdout)
.context("pg_config output is not UTF-8")?
.trim_end()
.into()
} else {
let server_path = pg_install_abs
.join(WALPROPOSER_PG_VERSION)
.join("include")
.join("postgresql")
.join("server")
.into_os_string();
server_path
.into_string()
.map_err(|s| anyhow!("Bad postgres server path {s:?}"))?
};
let unwind_abi_functions = [
"log_internal",
"recovery_download",
"start_streaming",
"finish_sync_safekeepers",
"wait_event_set",
"WalProposerStart",
];
// The bindgen::Builder is the main entry point
// to bindgen, and lets you build up options for
// the resulting bindings.
let mut builder = bindgen::Builder::default()
// The input header we would like to generate
// bindings for.
.header("bindgen_deps.h")
// Tell cargo to invalidate the built crate whenever any of the
// included header files changed.
.parse_callbacks(Box::new(bindgen::CargoCallbacks::new()))
.allowlist_type("WalProposer")
.allowlist_type("WalProposerConfig")
.allowlist_type("walproposer_api")
.allowlist_function("WalProposerCreate")
.allowlist_function("WalProposerStart")
.allowlist_function("WalProposerBroadcast")
.allowlist_function("WalProposerPoll")
.allowlist_function("WalProposerFree")
.allowlist_function("SafekeeperStateDesiredEvents")
.allowlist_var("DEBUG5")
.allowlist_var("DEBUG4")
.allowlist_var("DEBUG3")
.allowlist_var("DEBUG2")
.allowlist_var("DEBUG1")
.allowlist_var("LOG")
.allowlist_var("INFO")
.allowlist_var("NOTICE")
.allowlist_var("WARNING")
.allowlist_var("ERROR")
.allowlist_var("FATAL")
.allowlist_var("PANIC")
.allowlist_var("PG_VERSION_NUM")
.allowlist_var("WPEVENT")
.allowlist_var("WL_LATCH_SET")
.allowlist_var("WL_SOCKET_READABLE")
.allowlist_var("WL_SOCKET_WRITEABLE")
.allowlist_var("WL_TIMEOUT")
.allowlist_var("WL_SOCKET_CLOSED")
.allowlist_var("WL_SOCKET_MASK")
.clang_arg("-DWALPROPOSER_LIB")
.clang_arg(format!("-I{pgxn_neon}"))
.clang_arg(format!("-I{inc_server_path}"));
for name in unwind_abi_functions {
builder = builder.override_abi(bindgen::Abi::CUnwind, name);
}
let bindings = builder
// Finish the builder and generate the bindings.
.generate()
// Unwrap the Result and panic on failure.
.expect("Unable to generate bindings");
// Write the bindings to the $OUT_DIR/bindings.rs file.
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()).join("bindings.rs");
bindings
.write_to_file(out_path)
.expect("Couldn't write bindings!");
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/walproposer/src/lib.rs | libs/walproposer/src/lib.rs | pub mod bindings {
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
// bindgen creates some unsafe code with no doc comments.
#![allow(clippy::missing_safety_doc)]
// noted at 1.63 that in many cases there's a u32 -> u32 transmutes in bindgen code.
#![allow(clippy::useless_transmute)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
pub mod api_bindings;
pub mod walproposer;
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/walproposer/src/walproposer.rs | libs/walproposer/src/walproposer.rs | #![allow(clippy::todo)]
use std::ffi::CString;
use std::str::FromStr;
use postgres_ffi::WAL_SEGMENT_SIZE;
use utils::id::TenantTimelineId;
use utils::lsn::Lsn;
use crate::api_bindings::{Level, create_api, take_vec_u8};
use crate::bindings::{
NeonWALReadResult, Safekeeper, WalProposer, WalProposerBroadcast, WalProposerConfig,
WalProposerCreate, WalProposerFree, WalProposerPoll, WalProposerStart,
};
/// Rust high-level wrapper for C walproposer API. Many methods are not required
/// for simple cases, hence todo!() in default implementations.
///
/// Refer to `pgxn/neon/walproposer.h` for documentation.
pub trait ApiImpl {
fn get_shmem_state(&self) -> *mut crate::bindings::WalproposerShmemState {
todo!()
}
fn start_streaming(&self, _startpos: u64, _callback: &StreamingCallback) {
todo!()
}
fn get_flush_rec_ptr(&self) -> u64 {
todo!()
}
fn update_donor(&self, _donor: &mut Safekeeper, _donor_lsn: u64) {
todo!()
}
fn get_current_timestamp(&self) -> i64 {
todo!()
}
fn conn_error_message(&self, _sk: &mut Safekeeper) -> String {
todo!()
}
fn conn_status(&self, _sk: &mut Safekeeper) -> crate::bindings::WalProposerConnStatusType {
todo!()
}
fn conn_connect_start(&self, _sk: &mut Safekeeper) {
todo!()
}
fn conn_connect_poll(
&self,
_sk: &mut Safekeeper,
) -> crate::bindings::WalProposerConnectPollStatusType {
todo!()
}
fn conn_send_query(&self, _sk: &mut Safekeeper, _query: &str) -> bool {
todo!()
}
fn conn_get_query_result(
&self,
_sk: &mut Safekeeper,
) -> crate::bindings::WalProposerExecStatusType {
todo!()
}
fn conn_flush(&self, _sk: &mut Safekeeper) -> i32 {
todo!()
}
fn conn_finish(&self, _sk: &mut Safekeeper) {
todo!()
}
fn conn_async_read(
&self,
_sk: &mut Safekeeper,
_vec: &mut Vec<u8>,
) -> crate::bindings::PGAsyncReadResult {
todo!()
}
fn conn_async_write(
&self,
_sk: &mut Safekeeper,
_buf: &[u8],
) -> crate::bindings::PGAsyncWriteResult {
todo!()
}
fn conn_blocking_write(&self, _sk: &mut Safekeeper, _buf: &[u8]) -> bool {
todo!()
}
fn recovery_download(&self, _wp: &mut WalProposer, _sk: &mut Safekeeper) -> bool {
todo!()
}
fn wal_reader_allocate(&self, _sk: &mut Safekeeper) -> NeonWALReadResult {
todo!()
}
fn wal_read(&self, _sk: &mut Safekeeper, _buf: &mut [u8], _startpos: u64) -> NeonWALReadResult {
todo!()
}
fn wal_reader_events(&self, _sk: &mut Safekeeper) -> u32 {
todo!()
}
fn init_event_set(&self, _wp: &mut WalProposer) {
todo!()
}
fn update_event_set(&self, _sk: &mut Safekeeper, _events_mask: u32) {
todo!()
}
fn active_state_update_event_set(&self, _sk: &mut Safekeeper) {
todo!()
}
fn add_safekeeper_event_set(&self, _sk: &mut Safekeeper, _events_mask: u32) {
todo!()
}
fn rm_safekeeper_event_set(&self, _sk: &mut Safekeeper) {
todo!()
}
fn wait_event_set(&self, _wp: &mut WalProposer, _timeout_millis: i64) -> WaitResult {
todo!()
}
fn strong_random(&self, _buf: &mut [u8]) -> bool {
todo!()
}
fn get_redo_start_lsn(&self) -> u64 {
todo!()
}
fn finish_sync_safekeepers(&self, _lsn: u64) -> ! {
todo!()
}
fn process_safekeeper_feedback(&mut self, _wp: &mut WalProposer, _sk: &mut Safekeeper) {
todo!()
}
fn log_internal(&self, _wp: &mut WalProposer, _level: Level, _msg: &str) {
todo!()
}
fn after_election(&self, _wp: &mut WalProposer) {
todo!()
}
/* BEGIN_HADRON */
fn reset_safekeeper_statuses_for_metrics(&self, _wp: &mut WalProposer, _num_safekeepers: u32) {
// Do nothing for testing purposes.
}
fn update_safekeeper_status_for_metrics(
&self,
_wp: &mut WalProposer,
_sk_index: u32,
_status: u8,
) {
// Do nothing for testing purposes.
}
/* END_HADRON */
}
#[derive(Debug)]
pub enum WaitResult {
Latch,
Timeout,
Network(*mut Safekeeper, u32),
}
#[derive(Clone)]
pub struct Config {
/// Tenant and timeline id
pub ttid: TenantTimelineId,
/// List of safekeepers in format `host:port`
pub safekeepers_list: Vec<String>,
/// libpq connection info options
pub safekeeper_conninfo_options: String,
/// Safekeeper reconnect timeout in milliseconds
pub safekeeper_reconnect_timeout: i32,
/// Safekeeper connection timeout in milliseconds
pub safekeeper_connection_timeout: i32,
/// walproposer mode, finish when all safekeepers are synced or subscribe
/// to WAL streaming
pub sync_safekeepers: bool,
}
/// WalProposer main struct. C methods are reexported as Rust functions.
pub struct Wrapper {
wp: *mut WalProposer,
_safekeepers_list_vec: Vec<u8>,
}
impl Wrapper {
pub fn new(api: Box<dyn ApiImpl>, config: Config) -> Wrapper {
let neon_tenant = CString::new(config.ttid.tenant_id.to_string())
.unwrap()
.into_raw();
let neon_timeline = CString::new(config.ttid.timeline_id.to_string())
.unwrap()
.into_raw();
let mut safekeepers_list_vec = CString::new(config.safekeepers_list.join(","))
.unwrap()
.into_bytes_with_nul();
assert!(safekeepers_list_vec.len() == safekeepers_list_vec.capacity());
let safekeepers_list = safekeepers_list_vec.as_mut_ptr() as *mut std::ffi::c_char;
let safekeeper_conninfo_options = CString::from_str(&config.safekeeper_conninfo_options)
.unwrap()
.into_raw();
let callback_data = Box::into_raw(Box::new(api)) as *mut ::std::os::raw::c_void;
let c_config = WalProposerConfig {
neon_tenant,
neon_timeline,
safekeepers_list,
safekeeper_conninfo_options,
safekeeper_reconnect_timeout: config.safekeeper_reconnect_timeout,
safekeeper_connection_timeout: config.safekeeper_connection_timeout,
wal_segment_size: WAL_SEGMENT_SIZE as i32, // default 16MB
syncSafekeepers: config.sync_safekeepers,
systemId: 0,
pgTimeline: 1,
proto_version: 3,
callback_data,
};
let c_config = Box::into_raw(Box::new(c_config));
let api = create_api();
let wp = unsafe { WalProposerCreate(c_config, api) };
Wrapper {
wp,
_safekeepers_list_vec: safekeepers_list_vec,
}
}
pub fn start(&self) {
unsafe { WalProposerStart(self.wp) }
}
}
impl Drop for Wrapper {
fn drop(&mut self) {
unsafe {
let config = (*self.wp).config;
drop(Box::from_raw(
(*config).callback_data as *mut Box<dyn ApiImpl>,
));
drop(CString::from_raw((*config).neon_tenant));
drop(CString::from_raw((*config).neon_timeline));
drop(Box::from_raw(config));
for i in 0..(*self.wp).n_safekeepers {
let sk = &mut (*self.wp).safekeeper[i as usize];
take_vec_u8(&mut sk.inbuf);
}
WalProposerFree(self.wp);
}
}
}
pub struct StreamingCallback {
wp: *mut WalProposer,
}
impl StreamingCallback {
pub fn new(wp: *mut WalProposer) -> StreamingCallback {
StreamingCallback { wp }
}
pub fn broadcast(&self, startpos: Lsn, endpos: Lsn) {
unsafe { WalProposerBroadcast(self.wp, startpos.0, endpos.0) }
}
pub fn poll(&self) {
unsafe { WalProposerPoll(self.wp) }
}
}
#[cfg(test)]
mod tests {
use core::panic;
use std::cell::{Cell, UnsafeCell};
use std::ffi::CString;
use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::sync_channel;
use utils::id::TenantTimelineId;
use super::ApiImpl;
use crate::api_bindings::Level;
use crate::bindings::{NeonWALReadResult, PG_VERSION_NUM};
use crate::walproposer::Wrapper;
#[derive(Clone, Copy, Debug)]
struct WaitEventsData {
sk: *mut crate::bindings::Safekeeper,
event_mask: u32,
}
struct MockImpl {
// data to return from wait_event_set
wait_events: Cell<WaitEventsData>,
// walproposer->safekeeper messages
expected_messages: Vec<Vec<u8>>,
expected_ptr: AtomicUsize,
// safekeeper->walproposer messages
safekeeper_replies: Vec<Vec<u8>>,
replies_ptr: AtomicUsize,
// channel to send LSN to the main thread
sync_channel: std::sync::mpsc::SyncSender<u64>,
// Shmem state, used for storing donor info
shmem: UnsafeCell<crate::bindings::WalproposerShmemState>,
}
impl MockImpl {
fn check_walproposer_msg(&self, msg: &[u8]) {
let ptr = self
.expected_ptr
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
if ptr >= self.expected_messages.len() {
panic!("unexpected message from walproposer");
}
let expected_msg = &self.expected_messages[ptr];
assert_eq!(msg, expected_msg.as_slice());
}
fn next_safekeeper_reply(&self) -> &[u8] {
let ptr = self
.replies_ptr
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
if ptr >= self.safekeeper_replies.len() {
panic!("no more safekeeper replies");
}
&self.safekeeper_replies[ptr]
}
}
impl ApiImpl for MockImpl {
fn get_shmem_state(&self) -> *mut crate::bindings::WalproposerShmemState {
self.shmem.get()
}
fn get_current_timestamp(&self) -> i64 {
println!("get_current_timestamp");
0
}
fn update_donor(&self, donor: &mut crate::bindings::Safekeeper, donor_lsn: u64) {
let mut shmem = unsafe { *self.get_shmem_state() };
shmem.propEpochStartLsn.value = donor_lsn;
shmem.donor_conninfo = donor.conninfo;
shmem.donor_lsn = donor_lsn;
}
fn conn_status(
&self,
_: &mut crate::bindings::Safekeeper,
) -> crate::bindings::WalProposerConnStatusType {
println!("conn_status");
crate::bindings::WalProposerConnStatusType_WP_CONNECTION_OK
}
fn conn_connect_start(&self, _: &mut crate::bindings::Safekeeper) {
println!("conn_connect_start");
}
fn conn_connect_poll(
&self,
_: &mut crate::bindings::Safekeeper,
) -> crate::bindings::WalProposerConnectPollStatusType {
println!("conn_connect_poll");
crate::bindings::WalProposerConnectPollStatusType_WP_CONN_POLLING_OK
}
fn conn_send_query(&self, _: &mut crate::bindings::Safekeeper, query: &str) -> bool {
println!("conn_send_query: {query}");
true
}
fn conn_get_query_result(
&self,
_: &mut crate::bindings::Safekeeper,
) -> crate::bindings::WalProposerExecStatusType {
println!("conn_get_query_result");
crate::bindings::WalProposerExecStatusType_WP_EXEC_SUCCESS_COPYBOTH
}
fn conn_async_read(
&self,
_: &mut crate::bindings::Safekeeper,
vec: &mut Vec<u8>,
) -> crate::bindings::PGAsyncReadResult {
println!("conn_async_read");
let reply = self.next_safekeeper_reply();
println!("conn_async_read result: {reply:?}");
vec.extend_from_slice(reply);
crate::bindings::PGAsyncReadResult_PG_ASYNC_READ_SUCCESS
}
fn conn_blocking_write(&self, _: &mut crate::bindings::Safekeeper, buf: &[u8]) -> bool {
println!("conn_blocking_write: {buf:?}");
self.check_walproposer_msg(buf);
true
}
fn recovery_download(
&self,
_wp: &mut crate::bindings::WalProposer,
_sk: &mut crate::bindings::Safekeeper,
) -> bool {
true
}
fn wal_reader_allocate(&self, _: &mut crate::bindings::Safekeeper) -> NeonWALReadResult {
println!("wal_reader_allocate");
crate::bindings::NeonWALReadResult_NEON_WALREAD_SUCCESS
}
fn init_event_set(&self, _: &mut crate::bindings::WalProposer) {
println!("init_event_set")
}
fn update_event_set(&self, sk: &mut crate::bindings::Safekeeper, event_mask: u32) {
println!(
"update_event_set, sk={:?}, events_mask={:#b}",
sk as *mut crate::bindings::Safekeeper, event_mask
);
self.wait_events.set(WaitEventsData { sk, event_mask });
}
fn add_safekeeper_event_set(&self, sk: &mut crate::bindings::Safekeeper, event_mask: u32) {
println!(
"add_safekeeper_event_set, sk={:?}, events_mask={:#b}",
sk as *mut crate::bindings::Safekeeper, event_mask
);
self.wait_events.set(WaitEventsData { sk, event_mask });
}
fn rm_safekeeper_event_set(&self, sk: &mut crate::bindings::Safekeeper) {
println!(
"rm_safekeeper_event_set, sk={:?}",
sk as *mut crate::bindings::Safekeeper
);
}
fn wait_event_set(
&self,
_: &mut crate::bindings::WalProposer,
timeout_millis: i64,
) -> super::WaitResult {
let data = self.wait_events.get();
println!("wait_event_set, timeout_millis={timeout_millis}, res={data:?}");
super::WaitResult::Network(data.sk, data.event_mask)
}
fn strong_random(&self, buf: &mut [u8]) -> bool {
println!("strong_random");
buf.fill(0);
true
}
fn finish_sync_safekeepers(&self, lsn: u64) -> ! {
self.sync_channel.send(lsn).unwrap();
panic!("sync safekeepers finished at lsn={}", lsn);
}
fn log_internal(&self, _wp: &mut crate::bindings::WalProposer, level: Level, msg: &str) {
println!("wp_log[{level}] {msg}");
}
fn after_election(&self, _wp: &mut crate::bindings::WalProposer) {
println!("after_election");
}
}
/// Test that walproposer can successfully connect to safekeeper and finish
/// sync_safekeepers. API is mocked in MockImpl.
///
/// Run this test with valgrind to detect leaks:
/// `valgrind --leak-check=full target/debug/deps/walproposer-<build>`
#[test]
fn test_simple_sync_safekeepers() -> anyhow::Result<()> {
let ttid = TenantTimelineId::new(
"9e4c8f36063c6c6e93bc20d65a820f3d".parse()?,
"9e4c8f36063c6c6e93bc20d65a820f3d".parse()?,
);
let (sender, receiver) = sync_channel(1);
// Messages definitions are at walproposer.h
// xxx: it would be better to extract them from safekeeper crate and
// use serialization/deserialization here.
let greeting_tag = (b'g').to_be_bytes();
let tenant_id = CString::new(ttid.tenant_id.to_string())
.unwrap()
.into_bytes_with_nul();
let timeline_id = CString::new(ttid.timeline_id.to_string())
.unwrap()
.into_bytes_with_nul();
let mconf_gen = 0_u32.to_be_bytes();
let mconf_members_len = 0_u32.to_be_bytes();
let mconf_members_new_len = 0_u32.to_be_bytes();
let pg_version: [u8; 4] = PG_VERSION_NUM.to_be_bytes();
let system_id = 0_u64.to_be_bytes();
let wal_seg_size = 16777216_u32.to_be_bytes();
let proposer_greeting = [
greeting_tag.as_slice(),
tenant_id.as_slice(),
timeline_id.as_slice(),
mconf_gen.as_slice(),
mconf_members_len.as_slice(),
mconf_members_new_len.as_slice(),
pg_version.as_slice(),
system_id.as_slice(),
wal_seg_size.as_slice(),
]
.concat();
let voting_tag = (b'v').to_be_bytes();
let vote_request_term = 3_u64.to_be_bytes();
let vote_request = [
voting_tag.as_slice(),
mconf_gen.as_slice(),
vote_request_term.as_slice(),
]
.concat();
let acceptor_greeting_term = 2_u64.to_be_bytes();
let acceptor_greeting_node_id = 1_u64.to_be_bytes();
let acceptor_greeting = [
greeting_tag.as_slice(),
acceptor_greeting_node_id.as_slice(),
mconf_gen.as_slice(),
mconf_members_len.as_slice(),
mconf_members_new_len.as_slice(),
acceptor_greeting_term.as_slice(),
]
.concat();
let vote_response_term = 3_u64.to_be_bytes();
let vote_given = 1_u8.to_be_bytes();
let flush_lsn = 0x539_u64.to_be_bytes();
let truncate_lsn = 0x539_u64.to_be_bytes();
let th_len = 1_u32.to_be_bytes();
let th_term = 2_u64.to_be_bytes();
let th_lsn = 0x539_u64.to_be_bytes();
let vote_response = [
voting_tag.as_slice(),
mconf_gen.as_slice(),
vote_response_term.as_slice(),
vote_given.as_slice(),
flush_lsn.as_slice(),
truncate_lsn.as_slice(),
th_len.as_slice(),
th_term.as_slice(),
th_lsn.as_slice(),
]
.concat();
let my_impl: Box<dyn ApiImpl> = Box::new(MockImpl {
wait_events: Cell::new(WaitEventsData {
sk: std::ptr::null_mut(),
event_mask: 0,
}),
expected_messages: vec![proposer_greeting, vote_request],
expected_ptr: AtomicUsize::new(0),
safekeeper_replies: vec![acceptor_greeting, vote_response],
replies_ptr: AtomicUsize::new(0),
sync_channel: sender,
shmem: UnsafeCell::new(crate::api_bindings::empty_shmem()),
});
let config = crate::walproposer::Config {
ttid,
safekeepers_list: vec!["localhost:5000".to_string()],
safekeeper_conninfo_options: String::new(),
safekeeper_reconnect_timeout: 1000,
safekeeper_connection_timeout: 10000,
sync_safekeepers: true,
};
let wp = Wrapper::new(my_impl, config);
// walproposer will panic when it finishes sync_safekeepers
std::panic::catch_unwind(|| wp.start()).unwrap_err();
// validate the resulting LSN
assert_eq!(receiver.try_recv(), Ok(1337));
Ok(())
// drop() will free up resources here
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/walproposer/src/api_bindings.rs | libs/walproposer/src/api_bindings.rs | //! A C-Rust shim: defines implementation of C walproposer API, assuming wp
//! callback_data stores Box to some Rust implementation.
#![allow(dead_code)]
use std::ffi::{CStr, CString};
use crate::bindings::{
NeonWALReadResult, PGAsyncReadResult, PGAsyncWriteResult, Safekeeper, Size, StringInfoData,
TimestampTz, WalProposer, WalProposerConnStatusType, WalProposerConnectPollStatusType,
WalProposerExecStatusType, WalproposerShmemState, XLogRecPtr, uint32, walproposer_api,
};
use crate::walproposer::{ApiImpl, StreamingCallback, WaitResult};
extern "C" fn get_shmem_state(wp: *mut WalProposer) -> *mut WalproposerShmemState {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).get_shmem_state()
}
}
extern "C-unwind" fn start_streaming(wp: *mut WalProposer, startpos: XLogRecPtr) {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
let callback = StreamingCallback::new(wp);
(*api).start_streaming(startpos, &callback);
}
}
extern "C" fn get_flush_rec_ptr(wp: *mut WalProposer) -> XLogRecPtr {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).get_flush_rec_ptr()
}
}
extern "C" fn update_donor(wp: *mut WalProposer, donor: *mut Safekeeper, donor_lsn: XLogRecPtr) {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).update_donor(&mut (*donor), donor_lsn)
}
}
extern "C" fn get_current_timestamp(wp: *mut WalProposer) -> TimestampTz {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).get_current_timestamp()
}
}
extern "C" fn conn_error_message(sk: *mut Safekeeper) -> *mut ::std::os::raw::c_char {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
let msg = (*api).conn_error_message(&mut (*sk));
let msg = CString::new(msg).unwrap();
// TODO: fix leaking error message
msg.into_raw()
}
}
extern "C" fn conn_status(sk: *mut Safekeeper) -> WalProposerConnStatusType {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_status(&mut (*sk))
}
}
extern "C" fn conn_connect_start(sk: *mut Safekeeper) {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_connect_start(&mut (*sk))
}
}
extern "C" fn conn_connect_poll(sk: *mut Safekeeper) -> WalProposerConnectPollStatusType {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_connect_poll(&mut (*sk))
}
}
extern "C" fn conn_send_query(sk: *mut Safekeeper, query: *mut ::std::os::raw::c_char) -> bool {
let query = unsafe { CStr::from_ptr(query) };
let query = query.to_str().unwrap();
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_send_query(&mut (*sk), query)
}
}
extern "C" fn conn_get_query_result(sk: *mut Safekeeper) -> WalProposerExecStatusType {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_get_query_result(&mut (*sk))
}
}
extern "C" fn conn_flush(sk: *mut Safekeeper) -> ::std::os::raw::c_int {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_flush(&mut (*sk))
}
}
extern "C" fn conn_finish(sk: *mut Safekeeper) {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_finish(&mut (*sk))
}
}
extern "C" fn conn_async_read(
sk: *mut Safekeeper,
buf: *mut *mut ::std::os::raw::c_char,
amount: *mut ::std::os::raw::c_int,
) -> PGAsyncReadResult {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
// This function has guarantee that returned buf will be valid until
// the next call. So we can store a Vec in each Safekeeper and reuse
// it on the next call.
let mut inbuf = take_vec_u8(&mut (*sk).inbuf).unwrap_or_default();
inbuf.clear();
let result = (*api).conn_async_read(&mut (*sk), &mut inbuf);
// Put a Vec back to sk->inbuf and return data ptr.
*amount = inbuf.len() as i32;
*buf = store_vec_u8(&mut (*sk).inbuf, inbuf);
result
}
}
extern "C" fn conn_async_write(
sk: *mut Safekeeper,
buf: *const ::std::os::raw::c_void,
size: usize,
) -> PGAsyncWriteResult {
unsafe {
let buf = std::slice::from_raw_parts(buf as *const u8, size);
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_async_write(&mut (*sk), buf)
}
}
extern "C" fn conn_blocking_write(
sk: *mut Safekeeper,
buf: *const ::std::os::raw::c_void,
size: usize,
) -> bool {
unsafe {
let buf = std::slice::from_raw_parts(buf as *const u8, size);
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).conn_blocking_write(&mut (*sk), buf)
}
}
extern "C-unwind" fn recovery_download(wp: *mut WalProposer, sk: *mut Safekeeper) -> bool {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
// currently `recovery_download` is always called right after election
(*api).after_election(&mut (*wp));
(*api).recovery_download(&mut (*wp), &mut (*sk))
}
}
extern "C" fn wal_reader_allocate(sk: *mut Safekeeper) {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).wal_reader_allocate(&mut (*sk));
}
}
#[allow(clippy::unnecessary_cast)]
extern "C" fn wal_read(
sk: *mut Safekeeper,
buf: *mut ::std::os::raw::c_char,
startptr: XLogRecPtr,
count: Size,
_errmsg: *mut *mut ::std::os::raw::c_char,
) -> NeonWALReadResult {
unsafe {
let buf = std::slice::from_raw_parts_mut(buf as *mut u8, count);
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
// TODO: errmsg is not forwarded
(*api).wal_read(&mut (*sk), buf, startptr)
}
}
extern "C" fn wal_reader_events(sk: *mut Safekeeper) -> uint32 {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).wal_reader_events(&mut (*sk))
}
}
extern "C" fn init_event_set(wp: *mut WalProposer) {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).init_event_set(&mut (*wp));
}
}
extern "C" fn update_event_set(sk: *mut Safekeeper, events: uint32) {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).update_event_set(&mut (*sk), events);
}
}
extern "C" fn active_state_update_event_set(sk: *mut Safekeeper) {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).active_state_update_event_set(&mut (*sk));
}
}
extern "C" fn add_safekeeper_event_set(sk: *mut Safekeeper, events: uint32) {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).add_safekeeper_event_set(&mut (*sk), events);
}
}
extern "C" fn rm_safekeeper_event_set(sk: *mut Safekeeper) {
unsafe {
let callback_data = (*(*(*sk).wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).rm_safekeeper_event_set(&mut (*sk));
}
}
extern "C-unwind" fn wait_event_set(
wp: *mut WalProposer,
timeout: ::std::os::raw::c_long,
event_sk: *mut *mut Safekeeper,
events: *mut uint32,
) -> ::std::os::raw::c_int {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
let result = (*api).wait_event_set(&mut (*wp), timeout);
match result {
WaitResult::Latch => {
*event_sk = std::ptr::null_mut();
*events = crate::bindings::WL_LATCH_SET;
1
}
WaitResult::Timeout => {
*event_sk = std::ptr::null_mut();
// WaitEventSetWait returns 0 for timeout.
*events = 0;
0
}
WaitResult::Network(sk, event_mask) => {
*event_sk = sk;
*events = event_mask;
1
}
}
}
}
extern "C" fn strong_random(
wp: *mut WalProposer,
buf: *mut ::std::os::raw::c_void,
len: usize,
) -> bool {
unsafe {
let buf = std::slice::from_raw_parts_mut(buf as *mut u8, len);
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).strong_random(buf)
}
}
extern "C" fn get_redo_start_lsn(wp: *mut WalProposer) -> XLogRecPtr {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).get_redo_start_lsn()
}
}
unsafe extern "C-unwind" fn finish_sync_safekeepers(wp: *mut WalProposer, lsn: XLogRecPtr) -> ! {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).finish_sync_safekeepers(lsn)
}
}
extern "C" fn process_safekeeper_feedback(wp: *mut WalProposer, sk: *mut Safekeeper) {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
(*api).process_safekeeper_feedback(&mut (*wp), &mut (*sk));
}
}
extern "C-unwind" fn log_internal(
wp: *mut WalProposer,
level: ::std::os::raw::c_int,
line: *const ::std::os::raw::c_char,
) {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
let line = CStr::from_ptr(line);
let line = line.to_str().unwrap();
(*api).log_internal(&mut (*wp), Level::from(level as u32), line)
}
}
/* BEGIN_HADRON */
extern "C" fn reset_safekeeper_statuses_for_metrics(wp: *mut WalProposer, num_safekeepers: u32) {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
if api.is_null() {
return;
}
(*api).reset_safekeeper_statuses_for_metrics(&mut (*wp), num_safekeepers);
}
}
extern "C" fn update_safekeeper_status_for_metrics(
wp: *mut WalProposer,
sk_index: u32,
status: u8,
) {
unsafe {
let callback_data = (*(*wp).config).callback_data;
let api = callback_data as *mut Box<dyn ApiImpl>;
if api.is_null() {
return;
}
(*api).update_safekeeper_status_for_metrics(&mut (*wp), sk_index, status);
}
}
/* END_HADRON */
#[derive(Debug, PartialEq)]
pub enum Level {
Debug5,
Debug4,
Debug3,
Debug2,
Debug1,
Log,
Info,
Notice,
Warning,
Error,
Fatal,
Panic,
WPEvent,
}
impl Level {
pub fn from(elevel: u32) -> Level {
use crate::bindings::*;
match elevel {
DEBUG5 => Level::Debug5,
DEBUG4 => Level::Debug4,
DEBUG3 => Level::Debug3,
DEBUG2 => Level::Debug2,
DEBUG1 => Level::Debug1,
LOG => Level::Log,
INFO => Level::Info,
NOTICE => Level::Notice,
WARNING => Level::Warning,
ERROR => Level::Error,
FATAL => Level::Fatal,
PANIC => Level::Panic,
WPEVENT => Level::WPEvent,
_ => panic!("unknown log level {elevel}"),
}
}
}
pub(crate) fn create_api() -> walproposer_api {
walproposer_api {
get_shmem_state: Some(get_shmem_state),
start_streaming: Some(start_streaming),
get_flush_rec_ptr: Some(get_flush_rec_ptr),
update_donor: Some(update_donor),
get_current_timestamp: Some(get_current_timestamp),
conn_error_message: Some(conn_error_message),
conn_status: Some(conn_status),
conn_connect_start: Some(conn_connect_start),
conn_connect_poll: Some(conn_connect_poll),
conn_send_query: Some(conn_send_query),
conn_get_query_result: Some(conn_get_query_result),
conn_flush: Some(conn_flush),
conn_finish: Some(conn_finish),
conn_async_read: Some(conn_async_read),
conn_async_write: Some(conn_async_write),
conn_blocking_write: Some(conn_blocking_write),
recovery_download: Some(recovery_download),
wal_reader_allocate: Some(wal_reader_allocate),
wal_read: Some(wal_read),
wal_reader_events: Some(wal_reader_events),
init_event_set: Some(init_event_set),
update_event_set: Some(update_event_set),
active_state_update_event_set: Some(active_state_update_event_set),
add_safekeeper_event_set: Some(add_safekeeper_event_set),
rm_safekeeper_event_set: Some(rm_safekeeper_event_set),
wait_event_set: Some(wait_event_set),
strong_random: Some(strong_random),
get_redo_start_lsn: Some(get_redo_start_lsn),
finish_sync_safekeepers: Some(finish_sync_safekeepers),
process_safekeeper_feedback: Some(process_safekeeper_feedback),
log_internal: Some(log_internal),
/* BEGIN_HADRON */
reset_safekeeper_statuses_for_metrics: Some(reset_safekeeper_statuses_for_metrics),
update_safekeeper_status_for_metrics: Some(update_safekeeper_status_for_metrics),
/* END_HADRON */
}
}
pub fn empty_shmem() -> crate::bindings::WalproposerShmemState {
let empty_feedback = crate::bindings::PageserverFeedback {
present: false,
currentClusterSize: 0,
last_received_lsn: 0,
disk_consistent_lsn: 0,
remote_consistent_lsn: 0,
replytime: 0,
shard_number: 0,
corruption_detected: false,
};
let empty_wal_rate_limiter = crate::bindings::WalRateLimiter {
effective_max_wal_bytes_per_second: crate::bindings::pg_atomic_uint32 { value: 0 },
should_limit: crate::bindings::pg_atomic_uint32 { value: 0 },
sent_bytes: 0,
batch_start_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
batch_end_time_us: crate::bindings::pg_atomic_uint64 { value: 0 },
};
crate::bindings::WalproposerShmemState {
propEpochStartLsn: crate::bindings::pg_atomic_uint64 { value: 0 },
donor_name: [0; 64],
donor_conninfo: [0; 1024],
donor_lsn: 0,
mutex: 0,
mineLastElectedTerm: crate::bindings::pg_atomic_uint64 { value: 0 },
backpressureThrottlingTime: crate::bindings::pg_atomic_uint64 { value: 0 },
currentClusterSize: crate::bindings::pg_atomic_uint64 { value: 0 },
shard_ps_feedback: [empty_feedback; 128],
num_shards: 0,
replica_promote: false,
min_ps_feedback: empty_feedback,
wal_rate_limiter: empty_wal_rate_limiter,
num_safekeepers: 0,
safekeeper_status: [0; 32],
}
}
impl std::fmt::Display for Level {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{self:?}")
}
}
/// Take ownership of `Vec<u8>` from StringInfoData.
#[allow(clippy::unnecessary_cast)]
pub(crate) fn take_vec_u8(pg: &mut StringInfoData) -> Option<Vec<u8>> {
if pg.data.is_null() {
return None;
}
let ptr = pg.data as *mut u8;
let length = pg.len as usize;
let capacity = pg.maxlen as usize;
pg.data = std::ptr::null_mut();
pg.len = 0;
pg.maxlen = 0;
unsafe { Some(Vec::from_raw_parts(ptr, length, capacity)) }
}
/// Store `Vec<u8>` in StringInfoData.
fn store_vec_u8(pg: &mut StringInfoData, vec: Vec<u8>) -> *mut ::std::os::raw::c_char {
let ptr = vec.as_ptr() as *mut ::std::os::raw::c_char;
let length = vec.len();
let capacity = vec.capacity();
assert!(pg.data.is_null());
pg.data = ptr;
pg.len = length as i32;
pg.maxlen = capacity as i32;
std::mem::forget(vec);
ptr
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-types2/src/lib.rs | libs/proxy/postgres-types2/src/lib.rs | //! Conversions to and from Postgres types.
//!
//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it
//! unless you want to define your own `ToSql` or `FromSql` definitions.
#![warn(clippy::all, missing_docs)]
use std::any::type_name;
use std::error::Error;
use std::fmt;
use std::sync::Arc;
use fallible_iterator::FallibleIterator;
#[doc(inline)]
pub use postgres_protocol2::Oid;
use postgres_protocol2::types;
use crate::type_gen::{Inner, Other};
/// Generates a simple implementation of `ToSql::accepts` which accepts the
/// types passed to it.
macro_rules! accepts {
($($expected:ident),+) => (
fn accepts(ty: &$crate::Type) -> bool {
matches!(*ty, $($crate::Type::$expected)|+)
}
)
}
// mod pg_lsn;
#[doc(hidden)]
pub mod private;
// mod special;
mod type_gen;
/// A Postgres type.
#[derive(PartialEq, Eq, Clone, Hash)]
pub struct Type(Inner);
impl fmt::Debug for Type {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, fmt)
}
}
impl fmt::Display for Type {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.schema() {
"public" | "pg_catalog" => {}
schema => write!(fmt, "{schema}.")?,
}
fmt.write_str(self.name())
}
}
impl Type {
/// Creates a new `Type`.
pub fn new(name: String, oid: Oid, kind: Kind, schema: String) -> Type {
Type(Inner::Other(Arc::new(Other {
name,
oid,
kind,
schema,
})))
}
/// Returns the `Type` corresponding to the provided `Oid` if it
/// corresponds to a built-in type.
pub fn from_oid(oid: Oid) -> Option<Type> {
Inner::from_oid(oid).map(Type)
}
/// Returns the OID of the `Type`.
pub fn oid(&self) -> Oid {
self.0.oid()
}
/// Returns the kind of this type.
pub fn kind(&self) -> &Kind {
self.0.kind()
}
/// Returns the schema of this type.
pub fn schema(&self) -> &str {
match self.0 {
Inner::Other(ref u) => &u.schema,
_ => "pg_catalog",
}
}
/// Returns the name of this type.
pub fn name(&self) -> &str {
self.0.name()
}
}
/// Represents the kind of a Postgres type.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum Kind {
/// A simple type like `VARCHAR` or `INTEGER`.
Simple,
/// An enumerated type.
Enum,
/// A pseudo-type.
Pseudo,
/// An array type along with the type of its elements.
Array(Type),
/// A range type along with the type of its elements.
Range(Oid),
/// A multirange type along with the type of its elements.
Multirange(Type),
/// A domain type along with its underlying type.
Domain(Oid),
/// A composite type.
Composite(Oid),
}
/// Information about a field of a composite type.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Field {
name: String,
type_: Type,
}
impl Field {
/// Creates a new `Field`.
pub fn new(name: String, type_: Type) -> Field {
Field { name, type_ }
}
/// Returns the name of the field.
pub fn name(&self) -> &str {
&self.name
}
/// Returns the type of the field.
pub fn type_(&self) -> &Type {
&self.type_
}
}
/// An error indicating that a `NULL` Postgres value was passed to a `FromSql`
/// implementation that does not support `NULL` values.
#[derive(Debug, Clone, Copy)]
pub struct WasNull;
impl fmt::Display for WasNull {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.write_str("a Postgres value was `NULL`")
}
}
impl Error for WasNull {}
/// An error indicating that a conversion was attempted between incompatible
/// Rust and Postgres types.
#[derive(Debug)]
pub struct WrongType {
postgres: Type,
rust: &'static str,
}
impl fmt::Display for WrongType {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"cannot convert between the Rust type `{}` and the Postgres type `{}`",
self.rust, self.postgres,
)
}
}
impl Error for WrongType {}
impl WrongType {
/// Creates a new `WrongType` error.
pub fn new<T>(ty: Type) -> WrongType {
WrongType {
postgres: ty,
rust: type_name::<T>(),
}
}
}
/// An error indicating that a as_text conversion was attempted on a binary
/// result.
#[derive(Debug)]
pub struct WrongFormat {}
impl Error for WrongFormat {}
impl fmt::Display for WrongFormat {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"cannot read column as text while it is in binary format"
)
}
}
/// A trait for types that can be created from a Postgres value.
pub trait FromSql<'a>: Sized {
/// Creates a new value of this type from a buffer of data of the specified
/// Postgres `Type` in its binary format.
///
/// The caller of this method is responsible for ensuring that this type
/// is compatible with the Postgres `Type`.
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Self, Box<dyn Error + Sync + Send>>;
/// Creates a new value of this type from a `NULL` SQL value.
///
/// The caller of this method is responsible for ensuring that this type
/// is compatible with the Postgres `Type`.
///
/// The default implementation returns `Err(Box::new(WasNull))`.
#[allow(unused_variables)]
fn from_sql_null(ty: &Type) -> Result<Self, Box<dyn Error + Sync + Send>> {
Err(Box::new(WasNull))
}
/// A convenience function that delegates to `from_sql` and `from_sql_null` depending on the
/// value of `raw`.
fn from_sql_nullable(
ty: &Type,
raw: Option<&'a [u8]>,
) -> Result<Self, Box<dyn Error + Sync + Send>> {
match raw {
Some(raw) => Self::from_sql(ty, raw),
None => Self::from_sql_null(ty),
}
}
/// Determines if a value of this type can be created from the specified
/// Postgres `Type`.
fn accepts(ty: &Type) -> bool;
}
/// A trait for types which can be created from a Postgres value without borrowing any data.
///
/// This is primarily useful for trait bounds on functions.
pub trait FromSqlOwned: for<'a> FromSql<'a> {}
impl<T> FromSqlOwned for T where T: for<'a> FromSql<'a> {}
impl<'a, T: FromSql<'a>> FromSql<'a> for Option<T> {
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Option<T>, Box<dyn Error + Sync + Send>> {
<T as FromSql>::from_sql(ty, raw).map(Some)
}
fn from_sql_null(_: &Type) -> Result<Option<T>, Box<dyn Error + Sync + Send>> {
Ok(None)
}
fn accepts(ty: &Type) -> bool {
<T as FromSql>::accepts(ty)
}
}
impl<'a, T: FromSql<'a>> FromSql<'a> for Vec<T> {
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Vec<T>, Box<dyn Error + Sync + Send>> {
let member_type = match *ty.kind() {
Kind::Array(ref member) => member,
_ => panic!("expected array type"),
};
let array = types::array_from_sql(raw)?;
if array.dimensions().count()? > 1 {
return Err("array contains too many dimensions".into());
}
array
.values()
.map(|v| T::from_sql_nullable(member_type, v))
.collect()
}
fn accepts(ty: &Type) -> bool {
match *ty.kind() {
Kind::Array(ref inner) => T::accepts(inner),
_ => false,
}
}
}
impl<'a> FromSql<'a> for String {
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<String, Box<dyn Error + Sync + Send>> {
<&str as FromSql>::from_sql(ty, raw).map(ToString::to_string)
}
fn accepts(ty: &Type) -> bool {
<&str as FromSql>::accepts(ty)
}
}
impl<'a> FromSql<'a> for &'a str {
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<&'a str, Box<dyn Error + Sync + Send>> {
match *ty {
ref ty if ty.name() == "ltree" => types::ltree_from_sql(raw),
ref ty if ty.name() == "lquery" => types::lquery_from_sql(raw),
ref ty if ty.name() == "ltxtquery" => types::ltxtquery_from_sql(raw),
_ => types::text_from_sql(raw),
}
}
fn accepts(ty: &Type) -> bool {
match *ty {
Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true,
ref ty
if (ty.name() == "citext"
|| ty.name() == "ltree"
|| ty.name() == "lquery"
|| ty.name() == "ltxtquery") =>
{
true
}
_ => false,
}
}
}
macro_rules! simple_from {
($t:ty, $f:ident, $($expected:ident),+) => {
impl<'a> FromSql<'a> for $t {
fn from_sql(_: &Type, raw: &'a [u8]) -> Result<$t, Box<dyn Error + Sync + Send>> {
types::$f(raw)
}
accepts!($($expected),+);
}
}
}
simple_from!(i8, char_from_sql, CHAR);
simple_from!(u32, oid_from_sql, OID);
/// An enum representing the nullability of a Postgres value.
pub enum IsNull {
/// The value is NULL.
Yes,
/// The value is not NULL.
No,
}
/// Supported Postgres message format types
///
/// Using Text format in a message assumes a Postgres `SERVER_ENCODING` of `UTF8`
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Format {
/// Text format (UTF-8)
Text,
/// Compact, typed binary format
Binary,
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-types2/src/type_gen.rs | libs/proxy/postgres-types2/src/type_gen.rs | // Autogenerated file - DO NOT EDIT
use std::sync::Arc;
use crate::{Kind, Oid, Type};
#[derive(PartialEq, Eq, Debug, Hash)]
pub struct Other {
pub name: String,
pub oid: Oid,
pub kind: Kind,
pub schema: String,
}
#[derive(PartialEq, Eq, Clone, Debug, Hash)]
pub enum Inner {
Bool,
Bytea,
Char,
Name,
Int8,
Int2,
Int2Vector,
Int4,
Regproc,
Text,
Oid,
Tid,
Xid,
Cid,
OidVector,
PgDdlCommand,
Json,
Xml,
XmlArray,
PgNodeTree,
JsonArray,
TableAmHandler,
Xid8Array,
IndexAmHandler,
Point,
Lseg,
Path,
Box,
Polygon,
Line,
LineArray,
Cidr,
CidrArray,
Float4,
Float8,
Unknown,
Circle,
CircleArray,
Macaddr8,
Macaddr8Array,
Money,
MoneyArray,
Macaddr,
Inet,
BoolArray,
ByteaArray,
CharArray,
NameArray,
Int2Array,
Int2VectorArray,
Int4Array,
RegprocArray,
TextArray,
TidArray,
XidArray,
CidArray,
OidVectorArray,
BpcharArray,
VarcharArray,
Int8Array,
PointArray,
LsegArray,
PathArray,
BoxArray,
Float4Array,
Float8Array,
PolygonArray,
OidArray,
Aclitem,
AclitemArray,
MacaddrArray,
InetArray,
Bpchar,
Varchar,
Date,
Time,
Timestamp,
TimestampArray,
DateArray,
TimeArray,
Timestamptz,
TimestamptzArray,
Interval,
IntervalArray,
NumericArray,
CstringArray,
Timetz,
TimetzArray,
Bit,
BitArray,
Varbit,
VarbitArray,
Numeric,
Refcursor,
RefcursorArray,
Regprocedure,
Regoper,
Regoperator,
Regclass,
Regtype,
RegprocedureArray,
RegoperArray,
RegoperatorArray,
RegclassArray,
RegtypeArray,
Record,
Cstring,
Any,
Anyarray,
Void,
Trigger,
LanguageHandler,
Internal,
Anyelement,
RecordArray,
Anynonarray,
TxidSnapshotArray,
Uuid,
UuidArray,
TxidSnapshot,
FdwHandler,
PgLsn,
PgLsnArray,
TsmHandler,
PgNdistinct,
PgDependencies,
Anyenum,
TsVector,
Tsquery,
GtsVector,
TsVectorArray,
GtsVectorArray,
TsqueryArray,
Regconfig,
RegconfigArray,
Regdictionary,
RegdictionaryArray,
Jsonb,
JsonbArray,
AnyRange,
EventTrigger,
Int4Range,
Int4RangeArray,
NumRange,
NumRangeArray,
TsRange,
TsRangeArray,
TstzRange,
TstzRangeArray,
DateRange,
DateRangeArray,
Int8Range,
Int8RangeArray,
Jsonpath,
JsonpathArray,
Regnamespace,
RegnamespaceArray,
Regrole,
RegroleArray,
Regcollation,
RegcollationArray,
Int4multiRange,
NummultiRange,
TsmultiRange,
TstzmultiRange,
DatemultiRange,
Int8multiRange,
AnymultiRange,
AnycompatiblemultiRange,
PgBrinBloomSummary,
PgBrinMinmaxMultiSummary,
PgMcvList,
PgSnapshot,
PgSnapshotArray,
Xid8,
Anycompatible,
Anycompatiblearray,
Anycompatiblenonarray,
AnycompatibleRange,
Int4multiRangeArray,
NummultiRangeArray,
TsmultiRangeArray,
TstzmultiRangeArray,
DatemultiRangeArray,
Int8multiRangeArray,
Other(Arc<Other>),
}
impl Inner {
pub fn from_oid(oid: Oid) -> Option<Inner> {
match oid {
16 => Some(Inner::Bool),
17 => Some(Inner::Bytea),
18 => Some(Inner::Char),
19 => Some(Inner::Name),
20 => Some(Inner::Int8),
21 => Some(Inner::Int2),
22 => Some(Inner::Int2Vector),
23 => Some(Inner::Int4),
24 => Some(Inner::Regproc),
25 => Some(Inner::Text),
26 => Some(Inner::Oid),
27 => Some(Inner::Tid),
28 => Some(Inner::Xid),
29 => Some(Inner::Cid),
30 => Some(Inner::OidVector),
32 => Some(Inner::PgDdlCommand),
114 => Some(Inner::Json),
142 => Some(Inner::Xml),
143 => Some(Inner::XmlArray),
194 => Some(Inner::PgNodeTree),
199 => Some(Inner::JsonArray),
269 => Some(Inner::TableAmHandler),
271 => Some(Inner::Xid8Array),
325 => Some(Inner::IndexAmHandler),
600 => Some(Inner::Point),
601 => Some(Inner::Lseg),
602 => Some(Inner::Path),
603 => Some(Inner::Box),
604 => Some(Inner::Polygon),
628 => Some(Inner::Line),
629 => Some(Inner::LineArray),
650 => Some(Inner::Cidr),
651 => Some(Inner::CidrArray),
700 => Some(Inner::Float4),
701 => Some(Inner::Float8),
705 => Some(Inner::Unknown),
718 => Some(Inner::Circle),
719 => Some(Inner::CircleArray),
774 => Some(Inner::Macaddr8),
775 => Some(Inner::Macaddr8Array),
790 => Some(Inner::Money),
791 => Some(Inner::MoneyArray),
829 => Some(Inner::Macaddr),
869 => Some(Inner::Inet),
1000 => Some(Inner::BoolArray),
1001 => Some(Inner::ByteaArray),
1002 => Some(Inner::CharArray),
1003 => Some(Inner::NameArray),
1005 => Some(Inner::Int2Array),
1006 => Some(Inner::Int2VectorArray),
1007 => Some(Inner::Int4Array),
1008 => Some(Inner::RegprocArray),
1009 => Some(Inner::TextArray),
1010 => Some(Inner::TidArray),
1011 => Some(Inner::XidArray),
1012 => Some(Inner::CidArray),
1013 => Some(Inner::OidVectorArray),
1014 => Some(Inner::BpcharArray),
1015 => Some(Inner::VarcharArray),
1016 => Some(Inner::Int8Array),
1017 => Some(Inner::PointArray),
1018 => Some(Inner::LsegArray),
1019 => Some(Inner::PathArray),
1020 => Some(Inner::BoxArray),
1021 => Some(Inner::Float4Array),
1022 => Some(Inner::Float8Array),
1027 => Some(Inner::PolygonArray),
1028 => Some(Inner::OidArray),
1033 => Some(Inner::Aclitem),
1034 => Some(Inner::AclitemArray),
1040 => Some(Inner::MacaddrArray),
1041 => Some(Inner::InetArray),
1042 => Some(Inner::Bpchar),
1043 => Some(Inner::Varchar),
1082 => Some(Inner::Date),
1083 => Some(Inner::Time),
1114 => Some(Inner::Timestamp),
1115 => Some(Inner::TimestampArray),
1182 => Some(Inner::DateArray),
1183 => Some(Inner::TimeArray),
1184 => Some(Inner::Timestamptz),
1185 => Some(Inner::TimestamptzArray),
1186 => Some(Inner::Interval),
1187 => Some(Inner::IntervalArray),
1231 => Some(Inner::NumericArray),
1263 => Some(Inner::CstringArray),
1266 => Some(Inner::Timetz),
1270 => Some(Inner::TimetzArray),
1560 => Some(Inner::Bit),
1561 => Some(Inner::BitArray),
1562 => Some(Inner::Varbit),
1563 => Some(Inner::VarbitArray),
1700 => Some(Inner::Numeric),
1790 => Some(Inner::Refcursor),
2201 => Some(Inner::RefcursorArray),
2202 => Some(Inner::Regprocedure),
2203 => Some(Inner::Regoper),
2204 => Some(Inner::Regoperator),
2205 => Some(Inner::Regclass),
2206 => Some(Inner::Regtype),
2207 => Some(Inner::RegprocedureArray),
2208 => Some(Inner::RegoperArray),
2209 => Some(Inner::RegoperatorArray),
2210 => Some(Inner::RegclassArray),
2211 => Some(Inner::RegtypeArray),
2249 => Some(Inner::Record),
2275 => Some(Inner::Cstring),
2276 => Some(Inner::Any),
2277 => Some(Inner::Anyarray),
2278 => Some(Inner::Void),
2279 => Some(Inner::Trigger),
2280 => Some(Inner::LanguageHandler),
2281 => Some(Inner::Internal),
2283 => Some(Inner::Anyelement),
2287 => Some(Inner::RecordArray),
2776 => Some(Inner::Anynonarray),
2949 => Some(Inner::TxidSnapshotArray),
2950 => Some(Inner::Uuid),
2951 => Some(Inner::UuidArray),
2970 => Some(Inner::TxidSnapshot),
3115 => Some(Inner::FdwHandler),
3220 => Some(Inner::PgLsn),
3221 => Some(Inner::PgLsnArray),
3310 => Some(Inner::TsmHandler),
3361 => Some(Inner::PgNdistinct),
3402 => Some(Inner::PgDependencies),
3500 => Some(Inner::Anyenum),
3614 => Some(Inner::TsVector),
3615 => Some(Inner::Tsquery),
3642 => Some(Inner::GtsVector),
3643 => Some(Inner::TsVectorArray),
3644 => Some(Inner::GtsVectorArray),
3645 => Some(Inner::TsqueryArray),
3734 => Some(Inner::Regconfig),
3735 => Some(Inner::RegconfigArray),
3769 => Some(Inner::Regdictionary),
3770 => Some(Inner::RegdictionaryArray),
3802 => Some(Inner::Jsonb),
3807 => Some(Inner::JsonbArray),
3831 => Some(Inner::AnyRange),
3838 => Some(Inner::EventTrigger),
3904 => Some(Inner::Int4Range),
3905 => Some(Inner::Int4RangeArray),
3906 => Some(Inner::NumRange),
3907 => Some(Inner::NumRangeArray),
3908 => Some(Inner::TsRange),
3909 => Some(Inner::TsRangeArray),
3910 => Some(Inner::TstzRange),
3911 => Some(Inner::TstzRangeArray),
3912 => Some(Inner::DateRange),
3913 => Some(Inner::DateRangeArray),
3926 => Some(Inner::Int8Range),
3927 => Some(Inner::Int8RangeArray),
4072 => Some(Inner::Jsonpath),
4073 => Some(Inner::JsonpathArray),
4089 => Some(Inner::Regnamespace),
4090 => Some(Inner::RegnamespaceArray),
4096 => Some(Inner::Regrole),
4097 => Some(Inner::RegroleArray),
4191 => Some(Inner::Regcollation),
4192 => Some(Inner::RegcollationArray),
4451 => Some(Inner::Int4multiRange),
4532 => Some(Inner::NummultiRange),
4533 => Some(Inner::TsmultiRange),
4534 => Some(Inner::TstzmultiRange),
4535 => Some(Inner::DatemultiRange),
4536 => Some(Inner::Int8multiRange),
4537 => Some(Inner::AnymultiRange),
4538 => Some(Inner::AnycompatiblemultiRange),
4600 => Some(Inner::PgBrinBloomSummary),
4601 => Some(Inner::PgBrinMinmaxMultiSummary),
5017 => Some(Inner::PgMcvList),
5038 => Some(Inner::PgSnapshot),
5039 => Some(Inner::PgSnapshotArray),
5069 => Some(Inner::Xid8),
5077 => Some(Inner::Anycompatible),
5078 => Some(Inner::Anycompatiblearray),
5079 => Some(Inner::Anycompatiblenonarray),
5080 => Some(Inner::AnycompatibleRange),
6150 => Some(Inner::Int4multiRangeArray),
6151 => Some(Inner::NummultiRangeArray),
6152 => Some(Inner::TsmultiRangeArray),
6153 => Some(Inner::TstzmultiRangeArray),
6155 => Some(Inner::DatemultiRangeArray),
6157 => Some(Inner::Int8multiRangeArray),
_ => None,
}
}
pub const fn const_oid(&self) -> Oid {
match *self {
Inner::Bool => 16,
Inner::Bytea => 17,
Inner::Char => 18,
Inner::Name => 19,
Inner::Int8 => 20,
Inner::Int2 => 21,
Inner::Int2Vector => 22,
Inner::Int4 => 23,
Inner::Regproc => 24,
Inner::Text => 25,
Inner::Oid => 26,
Inner::Tid => 27,
Inner::Xid => 28,
Inner::Cid => 29,
Inner::OidVector => 30,
Inner::PgDdlCommand => 32,
Inner::Json => 114,
Inner::Xml => 142,
Inner::XmlArray => 143,
Inner::PgNodeTree => 194,
Inner::JsonArray => 199,
Inner::TableAmHandler => 269,
Inner::Xid8Array => 271,
Inner::IndexAmHandler => 325,
Inner::Point => 600,
Inner::Lseg => 601,
Inner::Path => 602,
Inner::Box => 603,
Inner::Polygon => 604,
Inner::Line => 628,
Inner::LineArray => 629,
Inner::Cidr => 650,
Inner::CidrArray => 651,
Inner::Float4 => 700,
Inner::Float8 => 701,
Inner::Unknown => 705,
Inner::Circle => 718,
Inner::CircleArray => 719,
Inner::Macaddr8 => 774,
Inner::Macaddr8Array => 775,
Inner::Money => 790,
Inner::MoneyArray => 791,
Inner::Macaddr => 829,
Inner::Inet => 869,
Inner::BoolArray => 1000,
Inner::ByteaArray => 1001,
Inner::CharArray => 1002,
Inner::NameArray => 1003,
Inner::Int2Array => 1005,
Inner::Int2VectorArray => 1006,
Inner::Int4Array => 1007,
Inner::RegprocArray => 1008,
Inner::TextArray => 1009,
Inner::TidArray => 1010,
Inner::XidArray => 1011,
Inner::CidArray => 1012,
Inner::OidVectorArray => 1013,
Inner::BpcharArray => 1014,
Inner::VarcharArray => 1015,
Inner::Int8Array => 1016,
Inner::PointArray => 1017,
Inner::LsegArray => 1018,
Inner::PathArray => 1019,
Inner::BoxArray => 1020,
Inner::Float4Array => 1021,
Inner::Float8Array => 1022,
Inner::PolygonArray => 1027,
Inner::OidArray => 1028,
Inner::Aclitem => 1033,
Inner::AclitemArray => 1034,
Inner::MacaddrArray => 1040,
Inner::InetArray => 1041,
Inner::Bpchar => 1042,
Inner::Varchar => 1043,
Inner::Date => 1082,
Inner::Time => 1083,
Inner::Timestamp => 1114,
Inner::TimestampArray => 1115,
Inner::DateArray => 1182,
Inner::TimeArray => 1183,
Inner::Timestamptz => 1184,
Inner::TimestamptzArray => 1185,
Inner::Interval => 1186,
Inner::IntervalArray => 1187,
Inner::NumericArray => 1231,
Inner::CstringArray => 1263,
Inner::Timetz => 1266,
Inner::TimetzArray => 1270,
Inner::Bit => 1560,
Inner::BitArray => 1561,
Inner::Varbit => 1562,
Inner::VarbitArray => 1563,
Inner::Numeric => 1700,
Inner::Refcursor => 1790,
Inner::RefcursorArray => 2201,
Inner::Regprocedure => 2202,
Inner::Regoper => 2203,
Inner::Regoperator => 2204,
Inner::Regclass => 2205,
Inner::Regtype => 2206,
Inner::RegprocedureArray => 2207,
Inner::RegoperArray => 2208,
Inner::RegoperatorArray => 2209,
Inner::RegclassArray => 2210,
Inner::RegtypeArray => 2211,
Inner::Record => 2249,
Inner::Cstring => 2275,
Inner::Any => 2276,
Inner::Anyarray => 2277,
Inner::Void => 2278,
Inner::Trigger => 2279,
Inner::LanguageHandler => 2280,
Inner::Internal => 2281,
Inner::Anyelement => 2283,
Inner::RecordArray => 2287,
Inner::Anynonarray => 2776,
Inner::TxidSnapshotArray => 2949,
Inner::Uuid => 2950,
Inner::UuidArray => 2951,
Inner::TxidSnapshot => 2970,
Inner::FdwHandler => 3115,
Inner::PgLsn => 3220,
Inner::PgLsnArray => 3221,
Inner::TsmHandler => 3310,
Inner::PgNdistinct => 3361,
Inner::PgDependencies => 3402,
Inner::Anyenum => 3500,
Inner::TsVector => 3614,
Inner::Tsquery => 3615,
Inner::GtsVector => 3642,
Inner::TsVectorArray => 3643,
Inner::GtsVectorArray => 3644,
Inner::TsqueryArray => 3645,
Inner::Regconfig => 3734,
Inner::RegconfigArray => 3735,
Inner::Regdictionary => 3769,
Inner::RegdictionaryArray => 3770,
Inner::Jsonb => 3802,
Inner::JsonbArray => 3807,
Inner::AnyRange => 3831,
Inner::EventTrigger => 3838,
Inner::Int4Range => 3904,
Inner::Int4RangeArray => 3905,
Inner::NumRange => 3906,
Inner::NumRangeArray => 3907,
Inner::TsRange => 3908,
Inner::TsRangeArray => 3909,
Inner::TstzRange => 3910,
Inner::TstzRangeArray => 3911,
Inner::DateRange => 3912,
Inner::DateRangeArray => 3913,
Inner::Int8Range => 3926,
Inner::Int8RangeArray => 3927,
Inner::Jsonpath => 4072,
Inner::JsonpathArray => 4073,
Inner::Regnamespace => 4089,
Inner::RegnamespaceArray => 4090,
Inner::Regrole => 4096,
Inner::RegroleArray => 4097,
Inner::Regcollation => 4191,
Inner::RegcollationArray => 4192,
Inner::Int4multiRange => 4451,
Inner::NummultiRange => 4532,
Inner::TsmultiRange => 4533,
Inner::TstzmultiRange => 4534,
Inner::DatemultiRange => 4535,
Inner::Int8multiRange => 4536,
Inner::AnymultiRange => 4537,
Inner::AnycompatiblemultiRange => 4538,
Inner::PgBrinBloomSummary => 4600,
Inner::PgBrinMinmaxMultiSummary => 4601,
Inner::PgMcvList => 5017,
Inner::PgSnapshot => 5038,
Inner::PgSnapshotArray => 5039,
Inner::Xid8 => 5069,
Inner::Anycompatible => 5077,
Inner::Anycompatiblearray => 5078,
Inner::Anycompatiblenonarray => 5079,
Inner::AnycompatibleRange => 5080,
Inner::Int4multiRangeArray => 6150,
Inner::NummultiRangeArray => 6151,
Inner::TsmultiRangeArray => 6152,
Inner::TstzmultiRangeArray => 6153,
Inner::DatemultiRangeArray => 6155,
Inner::Int8multiRangeArray => 6157,
Inner::Other(_) => u32::MAX,
}
}
pub fn oid(&self) -> Oid {
match *self {
Inner::Other(ref u) => u.oid,
_ => self.const_oid(),
}
}
pub fn kind(&self) -> &Kind {
match *self {
Inner::Bool => &Kind::Simple,
Inner::Bytea => &Kind::Simple,
Inner::Char => &Kind::Simple,
Inner::Name => &Kind::Simple,
Inner::Int8 => &Kind::Simple,
Inner::Int2 => &Kind::Simple,
Inner::Int2Vector => &Kind::Array(Type(Inner::Int2)),
Inner::Int4 => &Kind::Simple,
Inner::Regproc => &Kind::Simple,
Inner::Text => &Kind::Simple,
Inner::Oid => &Kind::Simple,
Inner::Tid => &Kind::Simple,
Inner::Xid => &Kind::Simple,
Inner::Cid => &Kind::Simple,
Inner::OidVector => &Kind::Array(Type(Inner::Oid)),
Inner::PgDdlCommand => &Kind::Pseudo,
Inner::Json => &Kind::Simple,
Inner::Xml => &Kind::Simple,
Inner::XmlArray => &Kind::Array(Type(Inner::Xml)),
Inner::PgNodeTree => &Kind::Simple,
Inner::JsonArray => &Kind::Array(Type(Inner::Json)),
Inner::TableAmHandler => &Kind::Pseudo,
Inner::Xid8Array => &Kind::Array(Type(Inner::Xid8)),
Inner::IndexAmHandler => &Kind::Pseudo,
Inner::Point => &Kind::Simple,
Inner::Lseg => &Kind::Simple,
Inner::Path => &Kind::Simple,
Inner::Box => &Kind::Simple,
Inner::Polygon => &Kind::Simple,
Inner::Line => &Kind::Simple,
Inner::LineArray => &Kind::Array(Type(Inner::Line)),
Inner::Cidr => &Kind::Simple,
Inner::CidrArray => &Kind::Array(Type(Inner::Cidr)),
Inner::Float4 => &Kind::Simple,
Inner::Float8 => &Kind::Simple,
Inner::Unknown => &Kind::Simple,
Inner::Circle => &Kind::Simple,
Inner::CircleArray => &Kind::Array(Type(Inner::Circle)),
Inner::Macaddr8 => &Kind::Simple,
Inner::Macaddr8Array => &Kind::Array(Type(Inner::Macaddr8)),
Inner::Money => &Kind::Simple,
Inner::MoneyArray => &Kind::Array(Type(Inner::Money)),
Inner::Macaddr => &Kind::Simple,
Inner::Inet => &Kind::Simple,
Inner::BoolArray => &Kind::Array(Type(Inner::Bool)),
Inner::ByteaArray => &Kind::Array(Type(Inner::Bytea)),
Inner::CharArray => &Kind::Array(Type(Inner::Char)),
Inner::NameArray => &Kind::Array(Type(Inner::Name)),
Inner::Int2Array => &Kind::Array(Type(Inner::Int2)),
Inner::Int2VectorArray => &Kind::Array(Type(Inner::Int2Vector)),
Inner::Int4Array => &Kind::Array(Type(Inner::Int4)),
Inner::RegprocArray => &Kind::Array(Type(Inner::Regproc)),
Inner::TextArray => &Kind::Array(Type(Inner::Text)),
Inner::TidArray => &Kind::Array(Type(Inner::Tid)),
Inner::XidArray => &Kind::Array(Type(Inner::Xid)),
Inner::CidArray => &Kind::Array(Type(Inner::Cid)),
Inner::OidVectorArray => &Kind::Array(Type(Inner::OidVector)),
Inner::BpcharArray => &Kind::Array(Type(Inner::Bpchar)),
Inner::VarcharArray => &Kind::Array(Type(Inner::Varchar)),
Inner::Int8Array => &Kind::Array(Type(Inner::Int8)),
Inner::PointArray => &Kind::Array(Type(Inner::Point)),
Inner::LsegArray => &Kind::Array(Type(Inner::Lseg)),
Inner::PathArray => &Kind::Array(Type(Inner::Path)),
Inner::BoxArray => &Kind::Array(Type(Inner::Box)),
Inner::Float4Array => &Kind::Array(Type(Inner::Float4)),
Inner::Float8Array => &Kind::Array(Type(Inner::Float8)),
Inner::PolygonArray => &Kind::Array(Type(Inner::Polygon)),
Inner::OidArray => &Kind::Array(Type(Inner::Oid)),
Inner::Aclitem => &Kind::Simple,
Inner::AclitemArray => &Kind::Array(Type(Inner::Aclitem)),
Inner::MacaddrArray => &Kind::Array(Type(Inner::Macaddr)),
Inner::InetArray => &Kind::Array(Type(Inner::Inet)),
Inner::Bpchar => &Kind::Simple,
Inner::Varchar => &Kind::Simple,
Inner::Date => &Kind::Simple,
Inner::Time => &Kind::Simple,
Inner::Timestamp => &Kind::Simple,
Inner::TimestampArray => &Kind::Array(Type(Inner::Timestamp)),
Inner::DateArray => &Kind::Array(Type(Inner::Date)),
Inner::TimeArray => &Kind::Array(Type(Inner::Time)),
Inner::Timestamptz => &Kind::Simple,
Inner::TimestamptzArray => &Kind::Array(Type(Inner::Timestamptz)),
Inner::Interval => &Kind::Simple,
Inner::IntervalArray => &Kind::Array(Type(Inner::Interval)),
Inner::NumericArray => &Kind::Array(Type(Inner::Numeric)),
Inner::CstringArray => &Kind::Array(Type(Inner::Cstring)),
Inner::Timetz => &Kind::Simple,
Inner::TimetzArray => &Kind::Array(Type(Inner::Timetz)),
Inner::Bit => &Kind::Simple,
Inner::BitArray => &Kind::Array(Type(Inner::Bit)),
Inner::Varbit => &Kind::Simple,
Inner::VarbitArray => &Kind::Array(Type(Inner::Varbit)),
Inner::Numeric => &Kind::Simple,
Inner::Refcursor => &Kind::Simple,
Inner::RefcursorArray => &Kind::Array(Type(Inner::Refcursor)),
Inner::Regprocedure => &Kind::Simple,
Inner::Regoper => &Kind::Simple,
Inner::Regoperator => &Kind::Simple,
Inner::Regclass => &Kind::Simple,
Inner::Regtype => &Kind::Simple,
Inner::RegprocedureArray => &Kind::Array(Type(Inner::Regprocedure)),
Inner::RegoperArray => &Kind::Array(Type(Inner::Regoper)),
Inner::RegoperatorArray => &Kind::Array(Type(Inner::Regoperator)),
Inner::RegclassArray => &Kind::Array(Type(Inner::Regclass)),
Inner::RegtypeArray => &Kind::Array(Type(Inner::Regtype)),
Inner::Record => &Kind::Pseudo,
Inner::Cstring => &Kind::Pseudo,
Inner::Any => &Kind::Pseudo,
Inner::Anyarray => &Kind::Pseudo,
Inner::Void => &Kind::Pseudo,
Inner::Trigger => &Kind::Pseudo,
Inner::LanguageHandler => &Kind::Pseudo,
Inner::Internal => &Kind::Pseudo,
Inner::Anyelement => &Kind::Pseudo,
Inner::RecordArray => &Kind::Pseudo,
Inner::Anynonarray => &Kind::Pseudo,
Inner::TxidSnapshotArray => &Kind::Array(Type(Inner::TxidSnapshot)),
Inner::Uuid => &Kind::Simple,
Inner::UuidArray => &Kind::Array(Type(Inner::Uuid)),
Inner::TxidSnapshot => &Kind::Simple,
Inner::FdwHandler => &Kind::Pseudo,
Inner::PgLsn => &Kind::Simple,
Inner::PgLsnArray => &Kind::Array(Type(Inner::PgLsn)),
Inner::TsmHandler => &Kind::Pseudo,
Inner::PgNdistinct => &Kind::Simple,
Inner::PgDependencies => &Kind::Simple,
Inner::Anyenum => &Kind::Pseudo,
Inner::TsVector => &Kind::Simple,
Inner::Tsquery => &Kind::Simple,
Inner::GtsVector => &Kind::Simple,
Inner::TsVectorArray => &Kind::Array(Type(Inner::TsVector)),
Inner::GtsVectorArray => &Kind::Array(Type(Inner::GtsVector)),
Inner::TsqueryArray => &Kind::Array(Type(Inner::Tsquery)),
Inner::Regconfig => &Kind::Simple,
Inner::RegconfigArray => &Kind::Array(Type(Inner::Regconfig)),
Inner::Regdictionary => &Kind::Simple,
Inner::RegdictionaryArray => &Kind::Array(Type(Inner::Regdictionary)),
Inner::Jsonb => &Kind::Simple,
Inner::JsonbArray => &Kind::Array(Type(Inner::Jsonb)),
Inner::AnyRange => &Kind::Pseudo,
Inner::EventTrigger => &Kind::Pseudo,
Inner::Int4Range => &const { Kind::Range(Inner::Int4.const_oid()) },
Inner::Int4RangeArray => &Kind::Array(Type(Inner::Int4Range)),
Inner::NumRange => &const { Kind::Range(Inner::Numeric.const_oid()) },
Inner::NumRangeArray => &Kind::Array(Type(Inner::NumRange)),
Inner::TsRange => &const { Kind::Range(Inner::Timestamp.const_oid()) },
Inner::TsRangeArray => &Kind::Array(Type(Inner::TsRange)),
Inner::TstzRange => &const { Kind::Range(Inner::Timestamptz.const_oid()) },
Inner::TstzRangeArray => &Kind::Array(Type(Inner::TstzRange)),
Inner::DateRange => &const { Kind::Range(Inner::Date.const_oid()) },
Inner::DateRangeArray => &Kind::Array(Type(Inner::DateRange)),
Inner::Int8Range => &const { Kind::Range(Inner::Int8.const_oid()) },
Inner::Int8RangeArray => &Kind::Array(Type(Inner::Int8Range)),
Inner::Jsonpath => &Kind::Simple,
Inner::JsonpathArray => &Kind::Array(Type(Inner::Jsonpath)),
Inner::Regnamespace => &Kind::Simple,
Inner::RegnamespaceArray => &Kind::Array(Type(Inner::Regnamespace)),
Inner::Regrole => &Kind::Simple,
Inner::RegroleArray => &Kind::Array(Type(Inner::Regrole)),
Inner::Regcollation => &Kind::Simple,
Inner::RegcollationArray => &Kind::Array(Type(Inner::Regcollation)),
Inner::Int4multiRange => &Kind::Multirange(Type(Inner::Int4)),
Inner::NummultiRange => &Kind::Multirange(Type(Inner::Numeric)),
Inner::TsmultiRange => &Kind::Multirange(Type(Inner::Timestamp)),
Inner::TstzmultiRange => &Kind::Multirange(Type(Inner::Timestamptz)),
Inner::DatemultiRange => &Kind::Multirange(Type(Inner::Date)),
Inner::Int8multiRange => &Kind::Multirange(Type(Inner::Int8)),
Inner::AnymultiRange => &Kind::Pseudo,
Inner::AnycompatiblemultiRange => &Kind::Pseudo,
Inner::PgBrinBloomSummary => &Kind::Simple,
Inner::PgBrinMinmaxMultiSummary => &Kind::Simple,
Inner::PgMcvList => &Kind::Simple,
Inner::PgSnapshot => &Kind::Simple,
Inner::PgSnapshotArray => &Kind::Array(Type(Inner::PgSnapshot)),
Inner::Xid8 => &Kind::Simple,
Inner::Anycompatible => &Kind::Pseudo,
Inner::Anycompatiblearray => &Kind::Pseudo,
Inner::Anycompatiblenonarray => &Kind::Pseudo,
Inner::AnycompatibleRange => &Kind::Pseudo,
Inner::Int4multiRangeArray => &Kind::Array(Type(Inner::Int4multiRange)),
Inner::NummultiRangeArray => &Kind::Array(Type(Inner::NummultiRange)),
Inner::TsmultiRangeArray => &Kind::Array(Type(Inner::TsmultiRange)),
Inner::TstzmultiRangeArray => &Kind::Array(Type(Inner::TstzmultiRange)),
Inner::DatemultiRangeArray => &Kind::Array(Type(Inner::DatemultiRange)),
Inner::Int8multiRangeArray => &Kind::Array(Type(Inner::Int8multiRange)),
Inner::Other(ref u) => &u.kind,
}
}
pub fn name(&self) -> &str {
match *self {
Inner::Bool => "bool",
Inner::Bytea => "bytea",
Inner::Char => "char",
Inner::Name => "name",
Inner::Int8 => "int8",
Inner::Int2 => "int2",
Inner::Int2Vector => "int2vector",
Inner::Int4 => "int4",
Inner::Regproc => "regproc",
Inner::Text => "text",
Inner::Oid => "oid",
Inner::Tid => "tid",
Inner::Xid => "xid",
Inner::Cid => "cid",
Inner::OidVector => "oidvector",
Inner::PgDdlCommand => "pg_ddl_command",
Inner::Json => "json",
Inner::Xml => "xml",
Inner::XmlArray => "_xml",
Inner::PgNodeTree => "pg_node_tree",
Inner::JsonArray => "_json",
Inner::TableAmHandler => "table_am_handler",
Inner::Xid8Array => "_xid8",
Inner::IndexAmHandler => "index_am_handler",
Inner::Point => "point",
Inner::Lseg => "lseg",
Inner::Path => "path",
Inner::Box => "box",
Inner::Polygon => "polygon",
Inner::Line => "line",
Inner::LineArray => "_line",
Inner::Cidr => "cidr",
Inner::CidrArray => "_cidr",
Inner::Float4 => "float4",
Inner::Float8 => "float8",
Inner::Unknown => "unknown",
Inner::Circle => "circle",
Inner::CircleArray => "_circle",
Inner::Macaddr8 => "macaddr8",
Inner::Macaddr8Array => "_macaddr8",
Inner::Money => "money",
Inner::MoneyArray => "_money",
Inner::Macaddr => "macaddr",
Inner::Inet => "inet",
Inner::BoolArray => "_bool",
Inner::ByteaArray => "_bytea",
Inner::CharArray => "_char",
Inner::NameArray => "_name",
Inner::Int2Array => "_int2",
Inner::Int2VectorArray => "_int2vector",
Inner::Int4Array => "_int4",
Inner::RegprocArray => "_regproc",
Inner::TextArray => "_text",
Inner::TidArray => "_tid",
Inner::XidArray => "_xid",
Inner::CidArray => "_cid",
Inner::OidVectorArray => "_oidvector",
Inner::BpcharArray => "_bpchar",
Inner::VarcharArray => "_varchar",
Inner::Int8Array => "_int8",
Inner::PointArray => "_point",
Inner::LsegArray => "_lseg",
Inner::PathArray => "_path",
Inner::BoxArray => "_box",
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-types2/src/private.rs | libs/proxy/postgres-types2/src/private.rs | use std::error::Error;
pub use bytes::BytesMut;
use crate::{FromSql, Type};
pub fn read_be_i32(buf: &mut &[u8]) -> Result<i32, Box<dyn Error + Sync + Send>> {
if buf.len() < 4 {
return Err("invalid buffer size".into());
}
let mut bytes = [0; 4];
bytes.copy_from_slice(&buf[..4]);
*buf = &buf[4..];
Ok(i32::from_be_bytes(bytes))
}
pub fn read_value<'a, T>(
type_: &Type,
buf: &mut &'a [u8],
) -> Result<T, Box<dyn Error + Sync + Send>>
where
T: FromSql<'a>,
{
let len = read_be_i32(buf)?;
let value = if len < 0 {
None
} else {
if len as usize > buf.len() {
return Err("invalid buffer size".into());
}
let (head, tail) = buf.split_at(len as usize);
*buf = tail;
Some(head)
};
T::from_sql_nullable(type_, value)
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/config.rs | libs/proxy/tokio-postgres2/src/config.rs | //! Connection configuration.
use std::net::IpAddr;
use std::time::Duration;
use std::{fmt, str};
pub use postgres_protocol2::authentication::sasl::ScramKeys;
use postgres_protocol2::message::frontend::StartupMessageParams;
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::TcpStream;
use crate::connect::connect;
use crate::connect_raw::{self, StartupStream};
use crate::connect_tls::connect_tls;
use crate::tls::{MakeTlsConnect, TlsConnect, TlsStream};
use crate::{Client, Connection, Error};
/// TLS configuration.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SslMode {
/// Do not use TLS.
Disable,
/// Attempt to connect with TLS but allow sessions without.
Prefer,
/// Require the use of TLS.
Require,
}
/// Channel binding configuration.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum ChannelBinding {
/// Do not use channel binding.
Disable,
/// Attempt to use channel binding but allow sessions without.
Prefer,
/// Require the use of channel binding.
Require,
}
/// Replication mode configuration.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum ReplicationMode {
/// Physical replication.
Physical,
/// Logical replication.
Logical,
}
/// A host specification.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum Host {
/// A TCP hostname.
Tcp(String),
}
/// Precomputed keys which may override password during auth.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AuthKeys {
/// A `ClientKey` & `ServerKey` pair for `SCRAM-SHA-256`.
ScramSha256(ScramKeys<32>),
}
/// Connection configuration.
#[derive(Clone, PartialEq, Eq)]
pub struct Config {
pub(crate) host_addr: Option<IpAddr>,
pub(crate) host: Host,
pub(crate) port: u16,
pub(crate) password: Option<Vec<u8>>,
pub(crate) auth_keys: Option<Box<AuthKeys>>,
pub(crate) ssl_mode: SslMode,
pub(crate) connect_timeout: Option<Duration>,
pub(crate) channel_binding: ChannelBinding,
pub(crate) server_params: StartupMessageParams,
database: bool,
username: bool,
}
impl Config {
/// Creates a new configuration.
pub fn new(host: String, port: u16) -> Config {
Config {
host_addr: None,
host: Host::Tcp(host),
port,
password: None,
auth_keys: None,
ssl_mode: SslMode::Prefer,
connect_timeout: None,
channel_binding: ChannelBinding::Prefer,
server_params: StartupMessageParams::default(),
database: false,
username: false,
}
}
/// Sets the user to authenticate with.
///
/// Required.
pub fn user(&mut self, user: &str) -> &mut Config {
self.set_param("user", user)
}
/// Gets the user to authenticate with, if one has been configured with
/// the `user` method.
pub fn user_is_set(&self) -> bool {
self.username
}
/// Sets the password to authenticate with.
pub fn password<T>(&mut self, password: T) -> &mut Config
where
T: AsRef<[u8]>,
{
self.password = Some(password.as_ref().to_vec());
self
}
/// Gets the password to authenticate with, if one has been configured with
/// the `password` method.
pub fn get_password(&self) -> Option<&[u8]> {
self.password.as_deref()
}
/// Sets precomputed protocol-specific keys to authenticate with.
/// When set, this option will override `password`.
/// See [`AuthKeys`] for more information.
pub fn auth_keys(&mut self, keys: AuthKeys) -> &mut Config {
self.auth_keys = Some(Box::new(keys));
self
}
/// Gets precomputed protocol-specific keys to authenticate with.
/// if one has been configured with the `auth_keys` method.
pub fn get_auth_keys(&self) -> Option<AuthKeys> {
self.auth_keys.as_deref().copied()
}
/// Sets the name of the database to connect to.
///
/// Defaults to the user.
pub fn dbname(&mut self, dbname: &str) -> &mut Config {
self.set_param("database", dbname)
}
/// Gets the name of the database to connect to, if one has been configured
/// with the `dbname` method.
pub fn db_is_set(&self) -> bool {
self.database
}
pub fn set_param(&mut self, name: &str, value: &str) -> &mut Config {
if name == "database" {
self.database = true;
} else if name == "user" {
self.username = true;
}
self.server_params.insert(name, value);
self
}
pub fn set_host_addr(&mut self, addr: IpAddr) -> &mut Config {
self.host_addr = Some(addr);
self
}
pub fn get_host_addr(&self) -> Option<IpAddr> {
self.host_addr
}
/// Sets the SSL configuration.
///
/// Defaults to `prefer`.
pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config {
self.ssl_mode = ssl_mode;
self
}
/// Gets the SSL configuration.
pub fn get_ssl_mode(&self) -> SslMode {
self.ssl_mode
}
/// Gets the hosts that have been added to the configuration with `host`.
pub fn get_host(&self) -> &Host {
&self.host
}
/// Gets the ports that have been added to the configuration with `port`.
pub fn get_port(&self) -> u16 {
self.port
}
/// Sets the timeout applied to socket-level connection attempts.
///
/// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each
/// host separately. Defaults to no limit.
pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config {
self.connect_timeout = Some(connect_timeout);
self
}
/// Gets the connection timeout, if one has been set with the
/// `connect_timeout` method.
pub fn get_connect_timeout(&self) -> Option<&Duration> {
self.connect_timeout.as_ref()
}
/// Sets the channel binding behavior.
///
/// Defaults to `prefer`.
pub fn channel_binding(&mut self, channel_binding: ChannelBinding) -> &mut Config {
self.channel_binding = channel_binding;
self
}
/// Gets the channel binding behavior.
pub fn get_channel_binding(&self) -> ChannelBinding {
self.channel_binding
}
/// Opens a connection to a PostgreSQL database.
///
/// Requires the `runtime` Cargo feature (enabled by default).
pub async fn connect<T>(
&self,
tls: &T,
) -> Result<(Client, Connection<TcpStream, T::Stream>), Error>
where
T: MakeTlsConnect<TcpStream>,
{
connect(tls, self).await
}
pub async fn tls_and_authenticate<S, T>(
&self,
stream: S,
tls: T,
) -> Result<StartupStream<S, T::Stream>, Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsConnect<S>,
{
let stream = connect_tls(stream, self.ssl_mode, tls).await?;
let mut stream = StartupStream::new(stream);
connect_raw::authenticate(&mut stream, self).await?;
Ok(stream)
}
pub fn authenticate<S, T>(
&self,
stream: &mut StartupStream<S, T>,
) -> impl Future<Output = Result<(), Error>>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsStream + Unpin,
{
connect_raw::authenticate(stream, self)
}
}
// Omit password from debug output
impl fmt::Debug for Config {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct Redaction {}
impl fmt::Debug for Redaction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "_")
}
}
f.debug_struct("Config")
.field("password", &self.password.as_ref().map(|_| Redaction {}))
.field("ssl_mode", &self.ssl_mode)
.field("host", &self.host)
.field("port", &self.port)
.field("connect_timeout", &self.connect_timeout)
.field("channel_binding", &self.channel_binding)
.field("server_params", &self.server_params)
.finish()
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/tls.rs | libs/proxy/tokio-postgres2/src/tls.rs | //! TLS support.
use std::error::Error;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub(crate) mod private {
pub struct ForcePrivateApi;
}
/// Channel binding information returned from a TLS handshake.
pub struct ChannelBinding {
pub(crate) tls_server_end_point: Option<Vec<u8>>,
}
impl ChannelBinding {
/// Creates a `ChannelBinding` containing no information.
pub fn none() -> ChannelBinding {
ChannelBinding {
tls_server_end_point: None,
}
}
/// Creates a `ChannelBinding` containing `tls-server-end-point` channel binding information.
pub fn tls_server_end_point(tls_server_end_point: Vec<u8>) -> ChannelBinding {
ChannelBinding {
tls_server_end_point: Some(tls_server_end_point),
}
}
}
/// A constructor of `TlsConnect`ors.
///
/// Requires the `runtime` Cargo feature (enabled by default).
pub trait MakeTlsConnect<S> {
/// The stream type created by the `TlsConnect` implementation.
type Stream: TlsStream + Unpin;
/// The `TlsConnect` implementation created by this type.
type TlsConnect: TlsConnect<S, Stream = Self::Stream>;
/// The error type returned by the `TlsConnect` implementation.
type Error: Into<Box<dyn Error + Sync + Send>>;
/// Creates a new `TlsConnect`or.
///
/// The domain name is provided for certificate verification and SNI.
fn make_tls_connect(&self, domain: &str) -> Result<Self::TlsConnect, Self::Error>;
}
/// An asynchronous function wrapping a stream in a TLS session.
pub trait TlsConnect<S> {
/// The stream returned by the future.
type Stream: TlsStream + Unpin;
/// The error returned by the future.
type Error: Into<Box<dyn Error + Sync + Send>>;
/// The future returned by the connector.
type Future: Future<Output = Result<Self::Stream, Self::Error>>;
/// Returns a future performing a TLS handshake over the stream.
fn connect(self, stream: S) -> Self::Future;
#[doc(hidden)]
fn can_connect(&self, _: private::ForcePrivateApi) -> bool {
true
}
}
/// A TLS-wrapped connection to a PostgreSQL database.
pub trait TlsStream: AsyncRead + AsyncWrite {
/// Returns channel binding information for the session.
fn channel_binding(&self) -> ChannelBinding;
}
/// A `MakeTlsConnect` and `TlsConnect` implementation which simply returns an error.
///
/// This can be used when `sslmode` is `none` or `prefer`.
#[derive(Debug, Copy, Clone)]
pub struct NoTls;
impl<S> MakeTlsConnect<S> for NoTls {
type Stream = NoTlsStream;
type TlsConnect = NoTls;
type Error = NoTlsError;
fn make_tls_connect(&self, _: &str) -> Result<NoTls, NoTlsError> {
Ok(NoTls)
}
}
impl<S> TlsConnect<S> for NoTls {
type Stream = NoTlsStream;
type Error = NoTlsError;
type Future = NoTlsFuture;
fn connect(self, _: S) -> NoTlsFuture {
NoTlsFuture(())
}
fn can_connect(&self, _: private::ForcePrivateApi) -> bool {
false
}
}
/// The future returned by `NoTls`.
pub struct NoTlsFuture(());
impl Future for NoTlsFuture {
type Output = Result<NoTlsStream, NoTlsError>;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Err(NoTlsError(())))
}
}
/// The TLS "stream" type produced by the `NoTls` connector.
///
/// Since `NoTls` doesn't support TLS, this type is uninhabited.
pub enum NoTlsStream {}
impl AsyncRead for NoTlsStream {
fn poll_read(
self: Pin<&mut Self>,
_: &mut Context<'_>,
_: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
match *self {}
}
}
impl AsyncWrite for NoTlsStream {
fn poll_write(self: Pin<&mut Self>, _: &mut Context<'_>, _: &[u8]) -> Poll<io::Result<usize>> {
match *self {}
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
match *self {}
}
fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
match *self {}
}
}
impl TlsStream for NoTlsStream {
fn channel_binding(&self) -> ChannelBinding {
match *self {}
}
}
/// The error returned by `NoTls`.
#[derive(Debug)]
pub struct NoTlsError(());
impl fmt::Display for NoTlsError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.write_str("no TLS implementation configured")
}
}
impl Error for NoTlsError {}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/cancel_query.rs | libs/proxy/tokio-postgres2/src/cancel_query.rs | use tokio::net::TcpStream;
use crate::client::SocketConfig;
use crate::config::{Host, SslMode};
use crate::tls::MakeTlsConnect;
use crate::{Error, cancel_query_raw, connect_socket};
pub(crate) async fn cancel_query<T>(
config: SocketConfig,
ssl_mode: SslMode,
tls: T,
process_id: i32,
secret_key: i32,
) -> Result<(), Error>
where
T: MakeTlsConnect<TcpStream>,
{
let hostname = match &config.host {
Host::Tcp(host) => &**host,
};
let tls = tls
.make_tls_connect(hostname)
.map_err(|e| Error::tls(e.into()))?;
let socket = connect_socket::connect_socket(
config.host_addr,
&config.host,
config.port,
config.connect_timeout,
)
.await?;
cancel_query_raw::cancel_query_raw(socket, ssl_mode, tls, process_id, secret_key).await
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/connect_tls.rs | libs/proxy/tokio-postgres2/src/connect_tls.rs | use bytes::BytesMut;
use postgres_protocol2::message::frontend;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use crate::Error;
use crate::config::SslMode;
use crate::maybe_tls_stream::MaybeTlsStream;
use crate::tls::TlsConnect;
use crate::tls::private::ForcePrivateApi;
pub async fn connect_tls<S, T>(
mut stream: S,
mode: SslMode,
tls: T,
) -> Result<MaybeTlsStream<S, T::Stream>, Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsConnect<S>,
{
match mode {
SslMode::Disable => return Ok(MaybeTlsStream::Raw(stream)),
SslMode::Prefer if !tls.can_connect(ForcePrivateApi) => {
return Ok(MaybeTlsStream::Raw(stream));
}
SslMode::Prefer | SslMode::Require => {}
}
let mut buf = BytesMut::new();
frontend::ssl_request(&mut buf);
stream.write_all(&buf).await.map_err(Error::io)?;
let mut buf = [0];
stream.read_exact(&mut buf).await.map_err(Error::io)?;
if buf[0] != b'S' {
if SslMode::Require == mode {
return Err(Error::tls("server does not support TLS".into()));
} else {
return Ok(MaybeTlsStream::Raw(stream));
}
}
let stream = tls
.connect(stream)
.await
.map_err(|e| Error::tls(e.into()))?;
Ok(MaybeTlsStream::Tls(stream))
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/lib.rs | libs/proxy/tokio-postgres2/src/lib.rs | //! An asynchronous, pipelined, PostgreSQL client.
#![warn(clippy::all)]
use postgres_protocol2::message::backend::ReadyForQueryBody;
pub use crate::cancel_token::{CancelToken, RawCancelToken};
pub use crate::client::{Client, SocketConfig};
pub use crate::config::Config;
pub use crate::connection::Connection;
pub use crate::error::Error;
pub use crate::generic_client::GenericClient;
pub use crate::query::RowStream;
pub use crate::row::{Row, SimpleQueryRow};
pub use crate::simple_query::SimpleQueryStream;
pub use crate::statement::{Column, Statement};
pub use crate::tls::NoTls;
pub use crate::transaction::Transaction;
pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder};
/// After executing a query, the connection will be in one of these states
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ReadyForQueryStatus {
/// Connection state is unknown
Unknown,
/// Connection is idle (no transactions)
Idle = b'I',
/// Connection is in a transaction block
Transaction = b'T',
/// Connection is in a failed transaction block
FailedTransaction = b'E',
}
impl From<ReadyForQueryBody> for ReadyForQueryStatus {
fn from(value: ReadyForQueryBody) -> Self {
match value.status() {
b'I' => Self::Idle,
b'T' => Self::Transaction,
b'E' => Self::FailedTransaction,
_ => Self::Unknown,
}
}
}
mod cancel_query;
mod cancel_query_raw;
mod cancel_token;
mod client;
mod codec;
pub mod config;
pub mod connect;
pub mod connect_raw;
mod connect_socket;
mod connect_tls;
mod connection;
pub mod error;
mod generic_client;
pub mod maybe_tls_stream;
mod prepare;
mod query;
pub mod row;
mod simple_query;
mod statement;
pub mod tls;
mod transaction;
mod transaction_builder;
pub mod types;
/// An asynchronous notification.
#[derive(Clone, Debug)]
pub struct Notification {
process_id: i32,
channel: String,
payload: String,
}
impl Notification {
/// The process ID of the notifying backend process.
pub fn process_id(&self) -> i32 {
self.process_id
}
/// The name of the channel that the notify has been raised on.
pub fn channel(&self) -> &str {
&self.channel
}
/// The "payload" string passed from the notifying process.
pub fn payload(&self) -> &str {
&self.payload
}
}
/// Message returned by the `SimpleQuery` stream.
#[derive(Debug)]
#[non_exhaustive]
pub enum SimpleQueryMessage {
/// A row of data.
Row(SimpleQueryRow),
/// A statement in the query has completed.
///
/// The number of rows modified or selected is returned.
CommandComplete(u64),
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/generic_client.rs | libs/proxy/tokio-postgres2/src/generic_client.rs | #![allow(async_fn_in_trait)]
use crate::query::RowStream;
use crate::{Client, Error, Transaction};
mod private {
pub trait Sealed {}
}
/// A trait allowing abstraction over connections and transactions.
///
/// This trait is "sealed", and cannot be implemented outside of this crate.
pub trait GenericClient: private::Sealed {
/// Like `Client::query_raw_txt`.
async fn query_raw_txt<S, I>(
&mut self,
statement: &str,
params: I,
) -> Result<RowStream<'_>, Error>
where
S: AsRef<str> + Sync + Send,
I: IntoIterator<Item = Option<S>> + Sync + Send,
I::IntoIter: ExactSizeIterator + Sync + Send;
}
impl private::Sealed for Client {}
impl GenericClient for Client {
async fn query_raw_txt<S, I>(
&mut self,
statement: &str,
params: I,
) -> Result<RowStream<'_>, Error>
where
S: AsRef<str> + Sync + Send,
I: IntoIterator<Item = Option<S>> + Sync + Send,
I::IntoIter: ExactSizeIterator + Sync + Send,
{
self.query_raw_txt(statement, params).await
}
}
impl private::Sealed for Transaction<'_> {}
impl GenericClient for Transaction<'_> {
async fn query_raw_txt<S, I>(
&mut self,
statement: &str,
params: I,
) -> Result<RowStream<'_>, Error>
where
S: AsRef<str> + Sync + Send,
I: IntoIterator<Item = Option<S>> + Sync + Send,
I::IntoIter: ExactSizeIterator + Sync + Send,
{
self.query_raw_txt(statement, params).await
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/simple_query.rs | libs/proxy/tokio-postgres2/src/simple_query.rs | use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use fallible_iterator::FallibleIterator;
use futures_util::{Stream, ready};
use pin_project_lite::pin_project;
use postgres_protocol2::message::backend::Message;
use tracing::debug;
use crate::client::{InnerClient, Responses};
use crate::{Error, ReadyForQueryStatus, SimpleQueryMessage, SimpleQueryRow};
/// Information about a column of a single query row.
#[derive(Debug)]
pub struct SimpleColumn {
name: String,
}
impl SimpleColumn {
pub(crate) fn new(name: String) -> SimpleColumn {
SimpleColumn { name }
}
/// Returns the name of the column.
pub fn name(&self) -> &str {
&self.name
}
}
pub async fn simple_query<'a>(
client: &'a mut InnerClient,
query: &str,
) -> Result<SimpleQueryStream<'a>, Error> {
debug!("executing simple query: {}", query);
let responses = client.send_simple_query(query)?;
Ok(SimpleQueryStream {
responses,
columns: None,
status: ReadyForQueryStatus::Unknown,
})
}
pub async fn batch_execute(
client: &mut InnerClient,
query: &str,
) -> Result<ReadyForQueryStatus, Error> {
debug!("executing statement batch: {}", query);
let responses = client.send_simple_query(query)?;
loop {
match responses.next().await? {
Message::ReadyForQuery(status) => return Ok(status.into()),
Message::CommandComplete(_)
| Message::EmptyQueryResponse
| Message::RowDescription(_)
| Message::DataRow(_) => {}
_ => return Err(Error::unexpected_message()),
}
}
}
pin_project! {
/// A stream of simple query results.
pub struct SimpleQueryStream<'a> {
responses: &'a mut Responses,
columns: Option<Arc<[SimpleColumn]>>,
status: ReadyForQueryStatus,
}
}
impl SimpleQueryStream<'_> {
/// Returns if the connection is ready for querying, with the status of the connection.
///
/// This might be available only after the stream has been exhausted.
pub fn ready_status(&self) -> ReadyForQueryStatus {
self.status
}
}
impl Stream for SimpleQueryStream<'_> {
type Item = Result<SimpleQueryMessage, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
loop {
match ready!(this.responses.poll_next(cx)?) {
Message::CommandComplete(body) => {
let rows = body
.tag()
.map_err(Error::parse)?
.rsplit(' ')
.next()
.unwrap()
.parse()
.unwrap_or(0);
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows))));
}
Message::EmptyQueryResponse => {
return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0))));
}
Message::RowDescription(body) => {
let columns = body
.fields()
.map(|f| Ok(SimpleColumn::new(f.name().to_string())))
.collect::<Vec<_>>()
.map_err(Error::parse)?
.into();
*this.columns = Some(columns);
}
Message::DataRow(body) => {
let row = match &this.columns {
Some(columns) => SimpleQueryRow::new(columns.clone(), body)?,
None => return Poll::Ready(Some(Err(Error::unexpected_message()))),
};
return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row))));
}
Message::ReadyForQuery(s) => {
*this.status = s.into();
return Poll::Ready(None);
}
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
}
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/cancel_token.rs | libs/proxy/tokio-postgres2/src/cancel_token.rs | use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::TcpStream;
use crate::client::SocketConfig;
use crate::config::SslMode;
use crate::tls::{MakeTlsConnect, TlsConnect};
use crate::{Error, cancel_query, cancel_query_raw};
/// A cancellation token that allows easy cancellation of a query.
#[derive(Clone)]
pub struct CancelToken {
pub socket_config: SocketConfig,
pub raw: RawCancelToken,
}
/// A raw cancellation token that allows cancellation of a query, given a fresh connection to postgres.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RawCancelToken {
pub ssl_mode: SslMode,
pub process_id: i32,
pub secret_key: i32,
}
impl CancelToken {
/// Attempts to cancel the in-progress query on the connection associated
/// with this `CancelToken`.
///
/// The server provides no information about whether a cancellation attempt was successful or not. An error will
/// only be returned if the client was unable to connect to the database.
///
/// Cancellation is inherently racy. There is no guarantee that the
/// cancellation request will reach the server before the query terminates
/// normally, or that the connection associated with this token is still
/// active.
///
/// Requires the `runtime` Cargo feature (enabled by default).
pub async fn cancel_query<T>(&self, tls: T) -> Result<(), Error>
where
T: MakeTlsConnect<TcpStream>,
{
cancel_query::cancel_query(
self.socket_config.clone(),
self.raw.ssl_mode,
tls,
self.raw.process_id,
self.raw.secret_key,
)
.await
}
}
impl RawCancelToken {
/// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new
/// connection itself.
pub async fn cancel_query_raw<S, T>(&self, stream: S, tls: T) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsConnect<S>,
{
cancel_query_raw::cancel_query_raw(
stream,
self.ssl_mode,
tls,
self.process_id,
self.secret_key,
)
.await
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/row.rs | libs/proxy/tokio-postgres2/src/row.rs | //! Rows.
use std::ops::Range;
use std::sync::Arc;
use std::{fmt, str};
use fallible_iterator::FallibleIterator;
use postgres_protocol2::message::backend::DataRowBody;
use postgres_types2::{Format, WrongFormat};
use crate::row::sealed::{AsName, Sealed};
use crate::simple_query::SimpleColumn;
use crate::statement::Column;
use crate::types::{FromSql, Type, WrongType};
use crate::{Error, Statement};
mod sealed {
pub trait Sealed {}
pub trait AsName {
fn as_name(&self) -> &str;
}
}
impl AsName for Column {
fn as_name(&self) -> &str {
self.name()
}
}
impl AsName for String {
fn as_name(&self) -> &str {
self
}
}
/// A trait implemented by types that can index into columns of a row.
///
/// This cannot be implemented outside of this crate.
pub trait RowIndex: Sealed {
#[doc(hidden)]
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
where
T: AsName;
}
impl Sealed for usize {}
impl RowIndex for usize {
#[inline]
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
where
T: AsName,
{
if *self >= columns.len() {
None
} else {
Some(*self)
}
}
}
impl Sealed for str {}
impl RowIndex for str {
#[inline]
fn __idx<T>(&self, columns: &[T]) -> Option<usize>
where
T: AsName,
{
if let Some(idx) = columns.iter().position(|d| d.as_name() == self) {
return Some(idx);
};
// FIXME ASCII-only case insensitivity isn't really the right thing to
// do. Postgres itself uses a dubious wrapper around tolower and JDBC
// uses the US locale.
columns
.iter()
.position(|d| d.as_name().eq_ignore_ascii_case(self))
}
}
impl<T> Sealed for &T where T: ?Sized + Sealed {}
impl<T> RowIndex for &T
where
T: ?Sized + RowIndex,
{
#[inline]
fn __idx<U>(&self, columns: &[U]) -> Option<usize>
where
U: AsName,
{
T::__idx(*self, columns)
}
}
/// A row of data returned from the database by a query.
pub struct Row {
statement: Statement,
output_format: Format,
body: DataRowBody,
ranges: Vec<Option<Range<usize>>>,
}
impl fmt::Debug for Row {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Row")
.field("columns", &self.columns())
.finish()
}
}
impl Row {
pub(crate) fn new(
statement: Statement,
body: DataRowBody,
output_format: Format,
) -> Result<Row, Error> {
let ranges = body.ranges().collect().map_err(Error::parse)?;
Ok(Row {
statement,
body,
ranges,
output_format,
})
}
/// Returns information about the columns of data in the row.
pub fn columns(&self) -> &[Column] {
self.statement.columns()
}
/// Determines if the row contains no values.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of values in the row.
pub fn len(&self) -> usize {
self.columns().len()
}
/// Deserializes a value from the row.
///
/// The value can be specified either by its numeric index in the row, or by its column name.
///
/// # Panics
///
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
pub fn get<'a, I, T>(&'a self, idx: I) -> T
where
I: RowIndex + fmt::Display,
T: FromSql<'a>,
{
match self.get_inner(&idx) {
Ok(ok) => ok,
Err(err) => panic!("error retrieving column {idx}: {err}"),
}
}
/// Like `Row::get`, but returns a `Result` rather than panicking.
pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result<T, Error>
where
I: RowIndex + fmt::Display,
T: FromSql<'a>,
{
self.get_inner(&idx)
}
fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result<T, Error>
where
I: RowIndex + fmt::Display,
T: FromSql<'a>,
{
let idx = match idx.__idx(self.columns()) {
Some(idx) => idx,
None => return Err(Error::column(idx.to_string())),
};
let ty = self.columns()[idx].type_();
if !T::accepts(ty) {
return Err(Error::from_sql(
Box::new(WrongType::new::<T>(ty.clone())),
idx,
));
}
FromSql::from_sql_nullable(ty, self.col_buffer(idx)).map_err(|e| Error::from_sql(e, idx))
}
/// Get the raw bytes for the column at the given index.
fn col_buffer(&self, idx: usize) -> Option<&[u8]> {
let range = self.ranges.get(idx)?.to_owned()?;
Some(&self.body.buffer()[range])
}
/// Interpret the column at the given index as text
///
/// Useful when using query_raw_txt() which sets text transfer mode
pub fn as_text(&self, idx: usize) -> Result<Option<&str>, Error> {
if self.output_format == Format::Text {
match self.col_buffer(idx) {
Some(raw) => {
FromSql::from_sql(&Type::TEXT, raw).map_err(|e| Error::from_sql(e, idx))
}
None => Ok(None),
}
} else {
Err(Error::from_sql(Box::new(WrongFormat {}), idx))
}
}
/// Row byte size
pub fn body_len(&self) -> usize {
self.body.buffer().len()
}
}
impl AsName for SimpleColumn {
fn as_name(&self) -> &str {
self.name()
}
}
/// A row of data returned from the database by a simple query.
#[derive(Debug)]
pub struct SimpleQueryRow {
columns: Arc<[SimpleColumn]>,
body: DataRowBody,
ranges: Vec<Option<Range<usize>>>,
}
impl SimpleQueryRow {
#[allow(clippy::new_ret_no_self)]
pub(crate) fn new(
columns: Arc<[SimpleColumn]>,
body: DataRowBody,
) -> Result<SimpleQueryRow, Error> {
let ranges = body.ranges().collect().map_err(Error::parse)?;
Ok(SimpleQueryRow {
columns,
body,
ranges,
})
}
/// Returns information about the columns of data in the row.
pub fn columns(&self) -> &[SimpleColumn] {
&self.columns
}
/// Determines if the row contains no values.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of values in the row.
pub fn len(&self) -> usize {
self.columns.len()
}
/// Returns a value from the row.
///
/// The value can be specified either by its numeric index in the row, or by its column name.
///
/// # Panics
///
/// Panics if the index is out of bounds or if the value cannot be converted to the specified type.
pub fn get<I>(&self, idx: I) -> Option<&str>
where
I: RowIndex + fmt::Display,
{
match self.get_inner(&idx) {
Ok(ok) => ok,
Err(err) => panic!("error retrieving column {idx}: {err}"),
}
}
/// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking.
pub fn try_get<I>(&self, idx: I) -> Result<Option<&str>, Error>
where
I: RowIndex + fmt::Display,
{
self.get_inner(&idx)
}
fn get_inner<I>(&self, idx: &I) -> Result<Option<&str>, Error>
where
I: RowIndex + fmt::Display,
{
let idx = match idx.__idx(&self.columns) {
Some(idx) => idx,
None => return Err(Error::column(idx.to_string())),
};
let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]);
FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(|e| Error::from_sql(e, idx))
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/codec.rs | libs/proxy/tokio-postgres2/src/codec.rs | use std::io;
use bytes::BytesMut;
use fallible_iterator::FallibleIterator;
use postgres_protocol2::message::backend;
use tokio::sync::mpsc::UnboundedSender;
use tokio_util::codec::{Decoder, Encoder};
pub enum FrontendMessage {
Raw(BytesMut),
RecordNotices(RecordNotices),
}
pub struct RecordNotices {
pub sender: UnboundedSender<Box<str>>,
pub limit: usize,
}
pub enum BackendMessage {
Normal {
messages: BackendMessages,
ready: bool,
},
Async(backend::Message),
}
pub struct BackendMessages(BytesMut);
impl BackendMessages {
pub fn empty() -> BackendMessages {
BackendMessages(BytesMut::new())
}
}
impl FallibleIterator for BackendMessages {
type Item = backend::Message;
type Error = io::Error;
fn next(&mut self) -> io::Result<Option<backend::Message>> {
backend::Message::parse(&mut self.0)
}
}
pub struct PostgresCodec;
impl Encoder<BytesMut> for PostgresCodec {
type Error = io::Error;
fn encode(&mut self, item: BytesMut, dst: &mut BytesMut) -> io::Result<()> {
dst.unsplit(item);
Ok(())
}
}
impl Decoder for PostgresCodec {
type Item = BackendMessage;
type Error = io::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<BackendMessage>, io::Error> {
let mut idx = 0;
let mut ready = false;
while let Some(header) = backend::Header::parse(&src[idx..])? {
let len = header.len() as usize + 1;
if src[idx..].len() < len {
break;
}
match header.tag() {
backend::NOTICE_RESPONSE_TAG
| backend::NOTIFICATION_RESPONSE_TAG
| backend::PARAMETER_STATUS_TAG => {
if idx == 0 {
let message = backend::Message::parse(src)?.unwrap();
return Ok(Some(BackendMessage::Async(message)));
} else {
break;
}
}
_ => {}
}
idx += len;
if header.tag() == backend::READY_FOR_QUERY_TAG {
ready = true;
break;
}
}
if idx == 0 {
Ok(None)
} else {
Ok(Some(BackendMessage::Normal {
messages: BackendMessages(src.split_to(idx)),
ready,
}))
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/connect.rs | libs/proxy/tokio-postgres2/src/connect.rs | use std::net::IpAddr;
use futures_util::TryStreamExt;
use postgres_protocol2::message::backend::Message;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use crate::client::SocketConfig;
use crate::config::{Host, SslMode};
use crate::connect_raw::StartupStream;
use crate::connect_socket::connect_socket;
use crate::tls::{MakeTlsConnect, TlsConnect};
use crate::{Client, Config, Connection, Error};
pub async fn connect<T>(
tls: &T,
config: &Config,
) -> Result<(Client, Connection<TcpStream, T::Stream>), Error>
where
T: MakeTlsConnect<TcpStream>,
{
let hostname = match &config.host {
Host::Tcp(host) => host.as_str(),
};
let tls = tls
.make_tls_connect(hostname)
.map_err(|e| Error::tls(e.into()))?;
match connect_once(config.host_addr, &config.host, config.port, tls, config).await {
Ok((client, connection)) => Ok((client, connection)),
Err(e) => Err(e),
}
}
async fn connect_once<T>(
host_addr: Option<IpAddr>,
host: &Host,
port: u16,
tls: T,
config: &Config,
) -> Result<(Client, Connection<TcpStream, T::Stream>), Error>
where
T: TlsConnect<TcpStream>,
{
let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?;
let stream = config.tls_and_authenticate(socket, tls).await?;
managed(
stream,
host_addr,
host.clone(),
port,
config.ssl_mode,
config.connect_timeout,
)
.await
}
pub async fn managed<TlsStream>(
mut stream: StartupStream<TcpStream, TlsStream>,
host_addr: Option<IpAddr>,
host: Host,
port: u16,
ssl_mode: SslMode,
connect_timeout: Option<std::time::Duration>,
) -> Result<(Client, Connection<TcpStream, TlsStream>), Error>
where
TlsStream: AsyncRead + AsyncWrite + Unpin,
{
let (process_id, secret_key) = wait_until_ready(&mut stream).await?;
let socket_config = SocketConfig {
host_addr,
host,
port,
connect_timeout,
};
let mut stream = stream.into_framed();
let write_buf = std::mem::take(stream.write_buffer_mut());
let (client_tx, conn_rx) = mpsc::unbounded_channel();
let (conn_tx, client_rx) = mpsc::channel(4);
let client = Client::new(
client_tx,
client_rx,
socket_config,
ssl_mode,
process_id,
secret_key,
write_buf,
);
let connection = Connection::new(stream, conn_tx, conn_rx);
Ok((client, connection))
}
async fn wait_until_ready<S, T>(stream: &mut StartupStream<S, T>) -> Result<(i32, i32), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
let mut process_id = 0;
let mut secret_key = 0;
loop {
match stream.try_next().await.map_err(Error::io)? {
Some(Message::BackendKeyData(body)) => {
process_id = body.process_id();
secret_key = body.secret_key();
}
// These values are currently not used by `Client`/`Connection`. Ignore them.
Some(Message::ParameterStatus(_)) | Some(Message::NoticeResponse(_)) => {}
Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key)),
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/connection.rs | libs/proxy/tokio-postgres2/src/connection.rs | use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::BytesMut;
use fallible_iterator::FallibleIterator;
use futures_util::{Sink, StreamExt, ready};
use postgres_protocol2::message::backend::{Message, NoticeResponseBody};
use postgres_protocol2::message::frontend;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::sync::mpsc;
use tokio_util::codec::Framed;
use tokio_util::sync::PollSender;
use tracing::trace;
use crate::Error;
use crate::codec::{
BackendMessage, BackendMessages, FrontendMessage, PostgresCodec, RecordNotices,
};
use crate::maybe_tls_stream::MaybeTlsStream;
#[derive(PartialEq, Debug)]
enum State {
Active,
Closing,
}
/// A connection to a PostgreSQL database.
///
/// This is one half of what is returned when a new connection is established. It performs the actual IO with the
/// server, and should generally be spawned off onto an executor to run in the background.
///
/// `Connection` implements `Future`, and only resolves when the connection is closed, either because a fatal error has
/// occurred, or because its associated `Client` has dropped and all outstanding work has completed.
#[must_use = "futures do nothing unless polled"]
pub struct Connection<S, T> {
stream: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
sender: PollSender<BackendMessages>,
receiver: mpsc::UnboundedReceiver<FrontendMessage>,
notices: Option<RecordNotices>,
pending_response: Option<BackendMessages>,
state: State,
}
pub const INITIAL_CAPACITY: usize = 2 * 1024;
pub const GC_THRESHOLD: usize = 16 * 1024;
/// Gargabe collect the [`BytesMut`] if it has too much spare capacity.
pub fn gc_bytesmut(buf: &mut BytesMut) {
// We use a different mode to shrink the buf when above the threshold.
// When above the threshold, we only re-allocate when the buf has 2x spare capacity.
let reclaim = GC_THRESHOLD.checked_sub(buf.len()).unwrap_or(buf.len());
// `try_reclaim` tries to get the capacity from any shared `BytesMut`s,
// before then comparing the length against the capacity.
if buf.try_reclaim(reclaim) {
let capacity = usize::max(buf.len(), INITIAL_CAPACITY);
// Allocate a new `BytesMut` so that we deallocate the old version.
let mut new = BytesMut::with_capacity(capacity);
new.extend_from_slice(buf);
*buf = new;
}
}
pub enum Never {}
impl<S, T> Connection<S, T>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
pub(crate) fn new(
stream: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
sender: mpsc::Sender<BackendMessages>,
receiver: mpsc::UnboundedReceiver<FrontendMessage>,
) -> Connection<S, T> {
Connection {
stream,
sender: PollSender::new(sender),
receiver,
notices: None,
pending_response: None,
state: State::Active,
}
}
/// Read and process messages from the connection to postgres.
/// client <- postgres
fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll<Result<Never, Error>> {
loop {
let messages = match self.pending_response.take() {
Some(messages) => messages,
None => {
let message = match self.stream.poll_next_unpin(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(Err(Error::closed())),
Poll::Ready(Some(Err(e))) => return Poll::Ready(Err(Error::io(e))),
Poll::Ready(Some(Ok(message))) => message,
};
match message {
BackendMessage::Async(Message::NoticeResponse(body)) => {
self.handle_notice(body)?;
continue;
}
BackendMessage::Async(_) => continue,
BackendMessage::Normal { messages, ready } => {
// if we read a ReadyForQuery from postgres, let's try GC the read buffer.
if ready {
gc_bytesmut(self.stream.read_buffer_mut());
}
messages
}
}
}
};
match self.sender.poll_reserve(cx) {
Poll::Ready(Ok(())) => {
let _ = self.sender.send_item(messages);
}
Poll::Ready(Err(_)) => {
return Poll::Ready(Err(Error::closed()));
}
Poll::Pending => {
self.pending_response = Some(messages);
trace!("poll_read: waiting on sender");
return Poll::Pending;
}
}
}
}
fn handle_notice(&mut self, body: NoticeResponseBody) -> Result<(), Error> {
let Some(notices) = &mut self.notices else {
return Ok(());
};
let mut fields = body.fields();
while let Some(field) = fields.next().map_err(Error::parse)? {
// loop until we find the message field
if field.type_() == b'M' {
// if the message field is within the limit, send it.
if let Some(new_limit) = notices.limit.checked_sub(field.value().len()) {
match notices.sender.send(field.value().into()) {
// set the new limit.
Ok(()) => notices.limit = new_limit,
// closed.
Err(_) => self.notices = None,
}
}
break;
}
}
Ok(())
}
/// Fetch the next client request and enqueue the response sender.
fn poll_request(&mut self, cx: &mut Context<'_>) -> Poll<Option<FrontendMessage>> {
if self.receiver.is_closed() {
return Poll::Ready(None);
}
match self.receiver.poll_recv(cx) {
Poll::Ready(Some(request)) => {
trace!("polled new request");
Poll::Ready(Some(request))
}
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
/// Process client requests and write them to the postgres connection, flushing if necessary.
/// client -> postgres
fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
loop {
if Pin::new(&mut self.stream)
.poll_ready(cx)
.map_err(Error::io)?
.is_pending()
{
trace!("poll_write: waiting on socket");
// poll_ready is self-flushing.
return Poll::Pending;
}
match self.poll_request(cx) {
// send the message to postgres
Poll::Ready(Some(FrontendMessage::Raw(request))) => {
Pin::new(&mut self.stream)
.start_send(request)
.map_err(Error::io)?;
}
Poll::Ready(Some(FrontendMessage::RecordNotices(notices))) => {
self.notices = Some(notices)
}
// No more messages from the client, and no more responses to wait for.
// Send a terminate message to postgres
Poll::Ready(None) => {
trace!("poll_write: at eof, terminating");
frontend::terminate(self.stream.write_buffer_mut());
trace!("poll_write: sent eof, closing");
trace!("poll_write: done");
return Poll::Ready(Ok(()));
}
// Still waiting for a message from the client.
Poll::Pending => {
trace!("poll_write: waiting on request");
ready!(self.poll_flush(cx))?;
return Poll::Pending;
}
}
}
}
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
match Pin::new(&mut self.stream)
.poll_flush(cx)
.map_err(Error::io)?
{
Poll::Ready(()) => {
trace!("poll_flush: flushed");
// Since our codec prefers to share the buffer with the `Client`,
// if we don't release our share, then the `Client` would have to re-alloc
// the buffer when they next use it.
debug_assert!(self.stream.write_buffer().is_empty());
*self.stream.write_buffer_mut() = BytesMut::new();
Poll::Ready(Ok(()))
}
Poll::Pending => {
trace!("poll_flush: waiting on socket");
Poll::Pending
}
}
}
fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
match Pin::new(&mut self.stream)
.poll_close(cx)
.map_err(Error::io)?
{
Poll::Ready(()) => {
trace!("poll_shutdown: complete");
Poll::Ready(Ok(()))
}
Poll::Pending => {
trace!("poll_shutdown: waiting on socket");
Poll::Pending
}
}
}
fn poll_message(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Never, Error>>> {
if self.state != State::Closing {
// if the state is still active, try read from and write to postgres.
let Poll::Pending = self.poll_read(cx)?;
if self.poll_write(cx)?.is_ready() {
self.state = State::Closing;
}
// poll_read returned Pending.
// poll_write returned Pending or Ready(()).
// if poll_write returned Ready(()), then we are waiting to read more data from postgres.
if self.state != State::Closing {
return Poll::Pending;
}
}
match self.poll_shutdown(cx) {
Poll::Ready(Ok(())) => Poll::Ready(None),
Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
Poll::Pending => Poll::Pending,
}
}
}
impl<S, T> Future for Connection<S, T>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(), Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
match self.poll_message(cx)? {
Poll::Ready(None) => Poll::Ready(Ok(())),
Poll::Pending => Poll::Pending,
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/maybe_tls_stream.rs | libs/proxy/tokio-postgres2/src/maybe_tls_stream.rs | //! MaybeTlsStream.
//!
//! Represents a stream that may or may not be encrypted with TLS.
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use crate::tls::{ChannelBinding, TlsStream};
/// A stream that may or may not be encrypted with TLS.
pub enum MaybeTlsStream<S, T> {
/// An unencrypted stream.
Raw(S),
/// An encrypted stream.
Tls(T),
}
impl<S, T> AsyncRead for MaybeTlsStream<S, T>
where
S: AsyncRead + Unpin,
T: AsyncRead + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
match &mut *self {
MaybeTlsStream::Raw(s) => Pin::new(s).poll_read(cx, buf),
MaybeTlsStream::Tls(s) => Pin::new(s).poll_read(cx, buf),
}
}
}
impl<S, T> AsyncWrite for MaybeTlsStream<S, T>
where
S: AsyncWrite + Unpin,
T: AsyncWrite + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
match &mut *self {
MaybeTlsStream::Raw(s) => Pin::new(s).poll_write(cx, buf),
MaybeTlsStream::Tls(s) => Pin::new(s).poll_write(cx, buf),
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match &mut *self {
MaybeTlsStream::Raw(s) => Pin::new(s).poll_flush(cx),
MaybeTlsStream::Tls(s) => Pin::new(s).poll_flush(cx),
}
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match &mut *self {
MaybeTlsStream::Raw(s) => Pin::new(s).poll_shutdown(cx),
MaybeTlsStream::Tls(s) => Pin::new(s).poll_shutdown(cx),
}
}
}
impl<S, T> TlsStream for MaybeTlsStream<S, T>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsStream + Unpin,
{
fn channel_binding(&self) -> ChannelBinding {
match self {
MaybeTlsStream::Raw(_) => ChannelBinding::none(),
MaybeTlsStream::Tls(s) => s.channel_binding(),
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/client.rs | libs/proxy/tokio-postgres2/src/client.rs | use std::collections::HashMap;
use std::fmt;
use std::net::IpAddr;
use std::task::{Context, Poll};
use std::time::Duration;
use bytes::BytesMut;
use fallible_iterator::FallibleIterator;
use futures_util::{TryStreamExt, future, ready};
use postgres_protocol2::message::backend::Message;
use postgres_protocol2::message::frontend;
use serde::{Deserialize, Serialize};
use tokio::sync::mpsc;
use crate::cancel_token::RawCancelToken;
use crate::codec::{BackendMessages, FrontendMessage, RecordNotices};
use crate::config::{Host, SslMode};
use crate::connection::gc_bytesmut;
use crate::query::RowStream;
use crate::simple_query::SimpleQueryStream;
use crate::types::{Oid, Type};
use crate::{
CancelToken, Error, ReadyForQueryStatus, SimpleQueryMessage, Transaction, TransactionBuilder,
query, simple_query,
};
pub struct Responses {
/// new messages from conn
receiver: mpsc::Receiver<BackendMessages>,
/// current batch of messages
cur: BackendMessages,
/// number of total queries sent.
waiting: usize,
/// number of ReadyForQuery messages received.
received: usize,
}
impl Responses {
pub fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Result<Message, Error>> {
loop {
// get the next saved message
if let Some(message) = self.cur.next().map_err(Error::parse)? {
let received = self.received;
// increase the query head if this is the last message.
if let Message::ReadyForQuery(_) = message {
self.received += 1;
}
// check if the client has skipped this query.
if received + 1 < self.waiting {
// grab the next message.
continue;
}
// convenience: turn the error messaage into a proper error.
let res = match message {
Message::ErrorResponse(body) => Err(Error::db(body)),
message => Ok(message),
};
return Poll::Ready(res);
}
// get the next batch of messages.
match ready!(self.receiver.poll_recv(cx)) {
Some(messages) => self.cur = messages,
None => return Poll::Ready(Err(Error::closed())),
}
}
}
pub async fn next(&mut self) -> Result<Message, Error> {
future::poll_fn(|cx| self.poll_next(cx)).await
}
}
/// A cache of type info and prepared statements for fetching type info
/// (corresponding to the queries in the [crate::prepare] module).
#[derive(Default)]
pub(crate) struct CachedTypeInfo {
/// Cache of types already looked up.
pub(crate) types: HashMap<Oid, Type>,
}
pub struct InnerClient {
sender: mpsc::UnboundedSender<FrontendMessage>,
responses: Responses,
/// A buffer to use when writing out postgres commands.
buffer: BytesMut,
}
impl InnerClient {
pub fn start(&mut self) -> Result<PartialQuery<'_>, Error> {
self.responses.waiting += 1;
Ok(PartialQuery(Some(self)))
}
pub fn send_simple_query(&mut self, query: &str) -> Result<&mut Responses, Error> {
self.responses.waiting += 1;
self.buffer.clear();
// simple queries do not need sync.
frontend::query(query, &mut self.buffer).map_err(Error::encode)?;
let buf = self.buffer.split();
self.send_message(FrontendMessage::Raw(buf))
}
fn send_message(&mut self, messages: FrontendMessage) -> Result<&mut Responses, Error> {
self.sender.send(messages).map_err(|_| Error::closed())?;
Ok(&mut self.responses)
}
}
pub struct PartialQuery<'a>(Option<&'a mut InnerClient>);
impl Drop for PartialQuery<'_> {
fn drop(&mut self) {
if let Some(client) = self.0.take() {
client.buffer.clear();
frontend::sync(&mut client.buffer);
let buf = client.buffer.split();
let _ = client.send_message(FrontendMessage::Raw(buf));
}
}
}
impl<'a> PartialQuery<'a> {
pub fn send_with_flush<F>(&mut self, f: F) -> Result<&mut Responses, Error>
where
F: FnOnce(&mut BytesMut) -> Result<(), Error>,
{
let client = self.0.as_deref_mut().unwrap();
client.buffer.clear();
f(&mut client.buffer)?;
frontend::flush(&mut client.buffer);
let buf = client.buffer.split();
client.send_message(FrontendMessage::Raw(buf))
}
pub fn send_with_sync<F>(mut self, f: F) -> Result<&'a mut Responses, Error>
where
F: FnOnce(&mut BytesMut) -> Result<(), Error>,
{
let client = self.0.as_deref_mut().unwrap();
client.buffer.clear();
f(&mut client.buffer)?;
frontend::sync(&mut client.buffer);
let buf = client.buffer.split();
let _ = client.send_message(FrontendMessage::Raw(buf));
Ok(&mut self.0.take().unwrap().responses)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct SocketConfig {
pub host_addr: Option<IpAddr>,
pub host: Host,
pub port: u16,
pub connect_timeout: Option<Duration>,
}
/// An asynchronous PostgreSQL client.
///
/// The client is one half of what is returned when a connection is established. Users interact with the database
/// through this client object.
pub struct Client {
inner: InnerClient,
cached_typeinfo: CachedTypeInfo,
socket_config: SocketConfig,
ssl_mode: SslMode,
process_id: i32,
secret_key: i32,
}
impl Client {
pub(crate) fn new(
sender: mpsc::UnboundedSender<FrontendMessage>,
receiver: mpsc::Receiver<BackendMessages>,
socket_config: SocketConfig,
ssl_mode: SslMode,
process_id: i32,
secret_key: i32,
write_buf: BytesMut,
) -> Client {
Client {
inner: InnerClient {
sender,
responses: Responses {
receiver,
cur: BackendMessages::empty(),
waiting: 0,
received: 0,
},
buffer: write_buf,
},
cached_typeinfo: Default::default(),
socket_config,
ssl_mode,
process_id,
secret_key,
}
}
/// Returns process_id.
pub fn get_process_id(&self) -> i32 {
self.process_id
}
pub(crate) fn inner_mut(&mut self) -> &mut InnerClient {
&mut self.inner
}
pub fn record_notices(&mut self, limit: usize) -> mpsc::UnboundedReceiver<Box<str>> {
let (tx, rx) = mpsc::unbounded_channel();
let notices = RecordNotices { sender: tx, limit };
self.inner
.sender
.send(FrontendMessage::RecordNotices(notices))
.ok();
rx
}
/// Pass text directly to the Postgres backend to allow it to sort out typing itself and
/// to save a roundtrip
pub async fn query_raw_txt<S, I>(
&mut self,
statement: &str,
params: I,
) -> Result<RowStream<'_>, Error>
where
S: AsRef<str>,
I: IntoIterator<Item = Option<S>>,
I::IntoIter: ExactSizeIterator,
{
query::query_txt(
&mut self.inner,
&mut self.cached_typeinfo,
statement,
params,
)
.await
}
/// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows.
///
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
/// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings,
/// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a list of the
/// rows, this method returns a list of an enum which indicates either the completion of one of the commands,
/// or a row of data. This preserves the framing between the separate statements in the request.
///
/// # Warning
///
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
/// them to this method!
pub async fn simple_query(&mut self, query: &str) -> Result<Vec<SimpleQueryMessage>, Error> {
self.simple_query_raw(query).await?.try_collect().await
}
pub(crate) async fn simple_query_raw(
&mut self,
query: &str,
) -> Result<SimpleQueryStream<'_>, Error> {
simple_query::simple_query(self.inner_mut(), query).await
}
/// Executes a sequence of SQL statements using the simple query protocol.
///
/// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that
/// point. This is intended for use when, for example, initializing a database schema.
///
/// # Warning
///
/// Prepared statements should be use for any query which contains user-specified data, as they provided the
/// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass
/// them to this method!
pub async fn batch_execute(&mut self, query: &str) -> Result<ReadyForQueryStatus, Error> {
simple_query::batch_execute(self.inner_mut(), query).await
}
/// Similar to `discard_all`, but it does not clear any query plans
///
/// This runs in the background, so it can be executed without `await`ing.
pub fn reset_session_background(&mut self) -> Result<(), Error> {
// "CLOSE ALL": closes any cursors
// "SET SESSION AUTHORIZATION DEFAULT": resets the current_user back to the session_user
// "RESET ALL": resets any GUCs back to their session defaults.
// "DEALLOCATE ALL": deallocates any prepared statements
// "UNLISTEN *": stops listening on all channels
// "SELECT pg_advisory_unlock_all();": unlocks all advisory locks
// "DISCARD TEMP;": drops all temporary tables
// "DISCARD SEQUENCES;": deallocates all cached sequence state
let _responses = self.inner_mut().send_simple_query(
"ROLLBACK;
CLOSE ALL;
SET SESSION AUTHORIZATION DEFAULT;
RESET ALL;
DEALLOCATE ALL;
UNLISTEN *;
SELECT pg_advisory_unlock_all();
DISCARD TEMP;
DISCARD SEQUENCES;",
)?;
// Clean up memory usage.
gc_bytesmut(&mut self.inner_mut().buffer);
Ok(())
}
/// Begins a new database transaction.
///
/// The transaction will roll back by default - use the `commit` method to commit it.
pub async fn transaction(&mut self) -> Result<Transaction<'_>, Error> {
struct RollbackIfNotDone<'me> {
client: &'me mut Client,
done: bool,
}
impl Drop for RollbackIfNotDone<'_> {
fn drop(&mut self) {
if self.done {
return;
}
let _ = self.client.inner.send_simple_query("ROLLBACK");
}
}
// This is done, as `Future` created by this method can be dropped after
// `RequestMessages` is synchronously send to the `Connection` by
// `batch_execute()`, but before `Responses` is asynchronously polled to
// completion. In that case `Transaction` won't be created and thus
// won't be rolled back.
{
let mut cleaner = RollbackIfNotDone {
client: self,
done: false,
};
cleaner.client.batch_execute("BEGIN").await?;
cleaner.done = true;
}
Ok(Transaction::new(self))
}
/// Returns a builder for a transaction with custom settings.
///
/// Unlike the `transaction` method, the builder can be used to control the transaction's isolation level and other
/// attributes.
pub fn build_transaction(&mut self) -> TransactionBuilder<'_> {
TransactionBuilder::new(self)
}
/// Constructs a cancellation token that can later be used to request cancellation of a query running on the
/// connection associated with this client.
pub fn cancel_token(&self) -> CancelToken {
CancelToken {
socket_config: self.socket_config.clone(),
raw: RawCancelToken {
ssl_mode: self.ssl_mode,
process_id: self.process_id,
secret_key: self.secret_key,
},
}
}
/// Determines if the connection to the server has already closed.
///
/// In that case, all future queries will fail.
pub fn is_closed(&self) -> bool {
self.inner.sender.is_closed()
}
}
impl fmt::Debug for Client {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Client").finish()
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/types.rs | libs/proxy/tokio-postgres2/src/types.rs | //! Types.
//!
//! This module is a reexport of the `postgres_types` crate.
#[doc(inline)]
pub use postgres_types2::*;
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/statement.rs | libs/proxy/tokio-postgres2/src/statement.rs | use std::fmt;
use std::sync::Arc;
use crate::types::Type;
use postgres_protocol2::Oid;
use postgres_protocol2::message::backend::Field;
struct StatementInner {
name: &'static str,
columns: Vec<Column>,
}
/// A prepared statement.
///
/// Prepared statements can only be used with the connection that created them.
#[derive(Clone)]
pub struct Statement(Arc<StatementInner>);
impl Statement {
pub(crate) fn new(name: &'static str, columns: Vec<Column>) -> Statement {
Statement(Arc::new(StatementInner { name, columns }))
}
pub(crate) fn name(&self) -> &str {
self.0.name
}
/// Returns information about the columns returned when the statement is queried.
pub fn columns(&self) -> &[Column] {
&self.0.columns
}
}
/// Information about a column of a query.
pub struct Column {
name: String,
pub(crate) type_: Type,
// raw fields from RowDescription
table_oid: Oid,
column_id: i16,
format: i16,
// that better be stored in self.type_, but that is more radical refactoring
type_oid: Oid,
type_size: i16,
type_modifier: i32,
}
impl Column {
pub(crate) fn new(name: String, type_: Type, raw_field: Field<'_>) -> Column {
Column {
name,
type_,
table_oid: raw_field.table_oid(),
column_id: raw_field.column_id(),
format: raw_field.format(),
type_oid: raw_field.type_oid(),
type_size: raw_field.type_size(),
type_modifier: raw_field.type_modifier(),
}
}
/// Returns the name of the column.
pub fn name(&self) -> &str {
&self.name
}
/// Returns the type of the column.
pub fn type_(&self) -> &Type {
&self.type_
}
/// Returns the table OID of the column.
pub fn table_oid(&self) -> Oid {
self.table_oid
}
/// Returns the column ID of the column.
pub fn column_id(&self) -> i16 {
self.column_id
}
/// Returns the format of the column.
pub fn format(&self) -> i16 {
self.format
}
/// Returns the type OID of the column.
pub fn type_oid(&self) -> Oid {
self.type_oid
}
/// Returns the type size of the column.
pub fn type_size(&self) -> i16 {
self.type_size
}
/// Returns the type modifier of the column.
pub fn type_modifier(&self) -> i32 {
self.type_modifier
}
}
impl fmt::Debug for Column {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Column")
.field("name", &self.name)
.field("type", &self.type_)
.finish()
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/transaction_builder.rs | libs/proxy/tokio-postgres2/src/transaction_builder.rs | use crate::{Client, Error, Transaction};
/// The isolation level of a database transaction.
#[derive(Debug, Copy, Clone)]
#[non_exhaustive]
pub enum IsolationLevel {
/// Equivalent to `ReadCommitted`.
ReadUncommitted,
/// An individual statement in the transaction will see rows committed before it began.
ReadCommitted,
/// All statements in the transaction will see the same view of rows committed before the first query in the
/// transaction.
RepeatableRead,
/// The reads and writes in this transaction must be able to be committed as an atomic "unit" with respect to reads
/// and writes of all other concurrent serializable transactions without interleaving.
Serializable,
}
/// A builder for database transactions.
pub struct TransactionBuilder<'a> {
client: &'a mut Client,
isolation_level: Option<IsolationLevel>,
read_only: Option<bool>,
deferrable: Option<bool>,
}
impl<'a> TransactionBuilder<'a> {
pub(crate) fn new(client: &'a mut Client) -> TransactionBuilder<'a> {
TransactionBuilder {
client,
isolation_level: None,
read_only: None,
deferrable: None,
}
}
/// Sets the isolation level of the transaction.
pub fn isolation_level(mut self, isolation_level: IsolationLevel) -> Self {
self.isolation_level = Some(isolation_level);
self
}
/// Sets the access mode of the transaction.
pub fn read_only(mut self, read_only: bool) -> Self {
self.read_only = Some(read_only);
self
}
/// Sets the deferrability of the transaction.
///
/// If the transaction is also serializable and read only, creation of the transaction may block, but when it
/// completes the transaction is able to run with less overhead and a guarantee that it will not be aborted due to
/// serialization failure.
pub fn deferrable(mut self, deferrable: bool) -> Self {
self.deferrable = Some(deferrable);
self
}
/// Begins the transaction.
///
/// The transaction will roll back by default - use the `commit` method to commit it.
pub async fn start(self) -> Result<Transaction<'a>, Error> {
let mut query = "START TRANSACTION".to_string();
let mut first = true;
if let Some(level) = self.isolation_level {
first = false;
query.push_str(" ISOLATION LEVEL ");
let level = match level {
IsolationLevel::ReadUncommitted => "READ UNCOMMITTED",
IsolationLevel::ReadCommitted => "READ COMMITTED",
IsolationLevel::RepeatableRead => "REPEATABLE READ",
IsolationLevel::Serializable => "SERIALIZABLE",
};
query.push_str(level);
}
if let Some(read_only) = self.read_only {
if !first {
query.push(',');
}
first = false;
let s = if read_only {
" READ ONLY"
} else {
" READ WRITE"
};
query.push_str(s);
}
if let Some(deferrable) = self.deferrable {
if !first {
query.push(',');
}
let s = if deferrable {
" DEFERRABLE"
} else {
" NOT DEFERRABLE"
};
query.push_str(s);
}
self.client.batch_execute(&query).await?;
Ok(Transaction::new(self.client))
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/query.rs | libs/proxy/tokio-postgres2/src/query.rs | use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::BufMut;
use futures_util::{Stream, ready};
use postgres_protocol2::message::backend::Message;
use postgres_protocol2::message::frontend;
use postgres_types2::Format;
use crate::client::{CachedTypeInfo, InnerClient, Responses};
use crate::{Error, ReadyForQueryStatus, Row, Statement};
pub async fn query_txt<'a, S, I>(
client: &'a mut InnerClient,
typecache: &mut CachedTypeInfo,
query: &str,
params: I,
) -> Result<RowStream<'a>, Error>
where
S: AsRef<str>,
I: IntoIterator<Item = Option<S>>,
I::IntoIter: ExactSizeIterator,
{
let params = params.into_iter();
let mut client = client.start()?;
// Flow:
// 1. Parse the query
// 2. Inspect the row description for OIDs
// 3. If there's any OIDs we don't already know about, perform the typeinfo routine
// 4. Execute the query
// 5. Sync.
//
// The typeinfo routine:
// 1. Parse the typeinfo query
// 2. Execute the query on each OID
// 3. If the result does not match an OID we know, repeat 2.
// parse the query and get type info
let responses = client.send_with_flush(|buf| {
frontend::parse(
"", // unnamed prepared statement
query, // query to parse
std::iter::empty(), // give no type info
buf,
)
.map_err(Error::encode)?;
frontend::describe(b'S', "", buf).map_err(Error::encode)?;
Ok(())
})?;
match responses.next().await? {
Message::ParseComplete => {}
_ => return Err(Error::unexpected_message()),
}
match responses.next().await? {
Message::ParameterDescription(_) => {}
_ => return Err(Error::unexpected_message()),
};
let row_description = match responses.next().await? {
Message::RowDescription(body) => Some(body),
Message::NoData => None,
_ => return Err(Error::unexpected_message()),
};
let columns =
crate::prepare::parse_row_description(&mut client, typecache, row_description).await?;
let responses = client.send_with_sync(|buf| {
// Bind, pass params as text, retrieve as text
match frontend::bind(
"", // empty string selects the unnamed portal
"", // unnamed prepared statement
std::iter::empty(), // all parameters use the default format (text)
params,
|param, buf| match param {
Some(param) => {
buf.put_slice(param.as_ref().as_bytes());
Ok(postgres_protocol2::IsNull::No)
}
None => Ok(postgres_protocol2::IsNull::Yes),
},
Some(0), // all text
buf,
) {
Ok(()) => Ok(()),
Err(frontend::BindError::Conversion(e)) => Err(Error::to_sql(e, 0)),
Err(frontend::BindError::Serialization(e)) => Err(Error::encode(e)),
}?;
// Execute
frontend::execute("", 0, buf).map_err(Error::encode)?;
Ok(())
})?;
match responses.next().await? {
Message::BindComplete => {}
_ => return Err(Error::unexpected_message()),
}
Ok(RowStream {
responses,
statement: Statement::new("", columns),
command_tag: None,
status: ReadyForQueryStatus::Unknown,
output_format: Format::Text,
})
}
/// A stream of table rows.
pub struct RowStream<'a> {
responses: &'a mut Responses,
output_format: Format,
pub statement: Statement,
pub command_tag: Option<String>,
pub status: ReadyForQueryStatus,
}
impl Stream for RowStream<'_> {
type Item = Result<Row, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
match ready!(this.responses.poll_next(cx)?) {
Message::DataRow(body) => {
return Poll::Ready(Some(Ok(Row::new(
this.statement.clone(),
body,
this.output_format,
)?)));
}
Message::EmptyQueryResponse | Message::PortalSuspended => {}
Message::CommandComplete(body) => {
if let Ok(tag) = body.tag() {
this.command_tag = Some(tag.to_string());
}
}
Message::ReadyForQuery(status) => {
this.status = status.into();
return Poll::Ready(None);
}
_ => return Poll::Ready(Some(Err(Error::unexpected_message()))),
}
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/cancel_query_raw.rs | libs/proxy/tokio-postgres2/src/cancel_query_raw.rs | use bytes::BytesMut;
use postgres_protocol2::message::frontend;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use crate::config::SslMode;
use crate::tls::TlsConnect;
use crate::{Error, connect_tls};
pub async fn cancel_query_raw<S, T>(
stream: S,
mode: SslMode,
tls: T,
process_id: i32,
secret_key: i32,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsConnect<S>,
{
let mut stream = connect_tls::connect_tls(stream, mode, tls).await?;
let mut buf = BytesMut::new();
frontend::cancel_request(process_id, secret_key, &mut buf);
stream.write_all(&buf).await.map_err(Error::io)?;
stream.flush().await.map_err(Error::io)?;
stream.shutdown().await.map_err(Error::io)?;
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/prepare.rs | libs/proxy/tokio-postgres2/src/prepare.rs | use bytes::BytesMut;
use fallible_iterator::FallibleIterator;
use postgres_protocol2::IsNull;
use postgres_protocol2::message::backend::{Message, RowDescriptionBody};
use postgres_protocol2::message::frontend;
use postgres_protocol2::types::oid_to_sql;
use postgres_types2::Format;
use crate::client::{CachedTypeInfo, PartialQuery, Responses};
use crate::types::{Kind, Oid, Type};
use crate::{Column, Error, Row, Statement};
pub(crate) const TYPEINFO_QUERY: &str = "\
SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid
FROM pg_catalog.pg_type t
LEFT OUTER JOIN pg_catalog.pg_range r ON r.rngtypid = t.oid
INNER JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid
WHERE t.oid = $1
";
/// we need to make sure we close this prepared statement.
struct CloseStmt<'a, 'b> {
client: Option<&'a mut PartialQuery<'b>>,
name: &'static str,
}
impl<'a> CloseStmt<'a, '_> {
fn close(mut self) -> Result<&'a mut Responses, Error> {
let client = self.client.take().unwrap();
client.send_with_flush(|buf| {
frontend::close(b'S', self.name, buf).map_err(Error::encode)?;
Ok(())
})
}
}
impl Drop for CloseStmt<'_, '_> {
fn drop(&mut self) {
if let Some(client) = self.client.take() {
let _ = client.send_with_flush(|buf| {
frontend::close(b'S', self.name, buf).map_err(Error::encode)?;
Ok(())
});
}
}
}
async fn prepare_typecheck(
client: &mut PartialQuery<'_>,
name: &'static str,
query: &str,
) -> Result<Statement, Error> {
let responses = client.send_with_flush(|buf| {
frontend::parse(name, query, [], buf).map_err(Error::encode)?;
frontend::describe(b'S', name, buf).map_err(Error::encode)?;
Ok(())
})?;
match responses.next().await? {
Message::ParseComplete => {}
_ => return Err(Error::unexpected_message()),
}
match responses.next().await? {
Message::ParameterDescription(_) => {}
_ => return Err(Error::unexpected_message()),
};
let row_description = match responses.next().await? {
Message::RowDescription(body) => Some(body),
Message::NoData => None,
_ => return Err(Error::unexpected_message()),
};
let mut columns = vec![];
if let Some(row_description) = row_description {
let mut it = row_description.fields();
while let Some(field) = it.next().map_err(Error::parse)? {
let type_ = Type::from_oid(field.type_oid()).ok_or_else(Error::unexpected_message)?;
let column = Column::new(field.name().to_string(), type_, field);
columns.push(column);
}
}
Ok(Statement::new(name, columns))
}
fn try_from_cache(typecache: &CachedTypeInfo, oid: Oid) -> Option<Type> {
if let Some(type_) = Type::from_oid(oid) {
return Some(type_);
}
if let Some(type_) = typecache.types.get(&oid) {
return Some(type_.clone());
};
None
}
pub async fn parse_row_description(
client: &mut PartialQuery<'_>,
typecache: &mut CachedTypeInfo,
row_description: Option<RowDescriptionBody>,
) -> Result<Vec<Column>, Error> {
let mut columns = vec![];
if let Some(row_description) = row_description {
let mut it = row_description.fields();
while let Some(field) = it.next().map_err(Error::parse)? {
let type_ = try_from_cache(typecache, field.type_oid()).unwrap_or(Type::UNKNOWN);
let column = Column::new(field.name().to_string(), type_, field);
columns.push(column);
}
}
let all_known = columns.iter().all(|c| c.type_ != Type::UNKNOWN);
if all_known {
// all known, return early.
return Ok(columns);
}
let typeinfo = "neon_proxy_typeinfo";
// make sure to close the typeinfo statement before exiting.
let mut guard = CloseStmt {
name: typeinfo,
client: None,
};
let client = guard.client.insert(client);
// get the typeinfo statement.
let stmt = prepare_typecheck(client, typeinfo, TYPEINFO_QUERY).await?;
for column in &mut columns {
column.type_ = get_type(client, typecache, &stmt, column.type_oid()).await?;
}
// cancel the close guard.
let responses = guard.close()?;
match responses.next().await? {
Message::CloseComplete => {}
_ => return Err(Error::unexpected_message()),
}
Ok(columns)
}
async fn get_type(
client: &mut PartialQuery<'_>,
typecache: &mut CachedTypeInfo,
stmt: &Statement,
mut oid: Oid,
) -> Result<Type, Error> {
let mut stack = vec![];
let mut type_ = loop {
if let Some(type_) = try_from_cache(typecache, oid) {
break type_;
}
let row = exec(client, stmt, oid).await?;
if stack.len() > 8 {
return Err(Error::unexpected_message());
}
let name: String = row.try_get(0)?;
let type_: i8 = row.try_get(1)?;
let elem_oid: Oid = row.try_get(2)?;
let rngsubtype: Option<Oid> = row.try_get(3)?;
let basetype: Oid = row.try_get(4)?;
let schema: String = row.try_get(5)?;
let relid: Oid = row.try_get(6)?;
let kind = if type_ == b'e' as i8 {
Kind::Enum
} else if type_ == b'p' as i8 {
Kind::Pseudo
} else if basetype != 0 {
Kind::Domain(basetype)
} else if elem_oid != 0 {
stack.push((name, oid, schema));
oid = elem_oid;
continue;
} else if relid != 0 {
Kind::Composite(relid)
} else if let Some(rngsubtype) = rngsubtype {
Kind::Range(rngsubtype)
} else {
Kind::Simple
};
let type_ = Type::new(name, oid, kind, schema);
typecache.types.insert(oid, type_.clone());
break type_;
};
while let Some((name, oid, schema)) = stack.pop() {
type_ = Type::new(name, oid, Kind::Array(type_), schema);
typecache.types.insert(oid, type_.clone());
}
Ok(type_)
}
/// exec the typeinfo statement returning one row.
async fn exec(
client: &mut PartialQuery<'_>,
statement: &Statement,
param: Oid,
) -> Result<Row, Error> {
let responses = client.send_with_flush(|buf| {
encode_bind(statement, param, "", buf);
frontend::execute("", 0, buf).map_err(Error::encode)?;
Ok(())
})?;
match responses.next().await? {
Message::BindComplete => {}
_ => return Err(Error::unexpected_message()),
}
let row = match responses.next().await? {
Message::DataRow(body) => Row::new(statement.clone(), body, Format::Binary)?,
_ => return Err(Error::unexpected_message()),
};
match responses.next().await? {
Message::CommandComplete(_) => {}
_ => return Err(Error::unexpected_message()),
};
Ok(row)
}
fn encode_bind(statement: &Statement, param: Oid, portal: &str, buf: &mut BytesMut) {
frontend::bind(
portal,
statement.name(),
[Format::Binary as i16],
[param],
|param, buf| {
oid_to_sql(param, buf);
Ok(IsNull::No)
},
[Format::Binary as i16],
buf,
)
.unwrap();
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/connect_socket.rs | libs/proxy/tokio-postgres2/src/connect_socket.rs | use std::future::Future;
use std::io;
use std::net::{IpAddr, SocketAddr};
use std::time::Duration;
use tokio::net::{self, TcpStream};
use tokio::time;
use crate::Error;
use crate::config::Host;
pub(crate) async fn connect_socket(
host_addr: Option<IpAddr>,
host: &Host,
port: u16,
connect_timeout: Option<Duration>,
) -> Result<TcpStream, Error> {
match host {
Host::Tcp(host) => {
let addrs = match host_addr {
Some(addr) => vec![SocketAddr::new(addr, port)],
None => net::lookup_host((&**host, port))
.await
.map_err(Error::connect)?
.collect(),
};
let mut last_err = None;
for addr in addrs {
let stream =
match connect_with_timeout(TcpStream::connect(addr), connect_timeout).await {
Ok(stream) => stream,
Err(e) => {
last_err = Some(e);
continue;
}
};
stream.set_nodelay(true).map_err(Error::connect)?;
return Ok(stream);
}
Err(last_err.unwrap_or_else(|| {
Error::connect(io::Error::new(
io::ErrorKind::InvalidInput,
"could not resolve any addresses",
))
}))
}
}
}
async fn connect_with_timeout<F, T>(connect: F, timeout: Option<Duration>) -> Result<T, Error>
where
F: Future<Output = io::Result<T>>,
{
match timeout {
Some(timeout) => match time::timeout(timeout, connect).await {
Ok(Ok(socket)) => Ok(socket),
Ok(Err(e)) => Err(Error::connect(e)),
Err(_) => Err(Error::connect(io::Error::new(
io::ErrorKind::TimedOut,
"connection timed out",
))),
},
None => match connect.await {
Ok(socket) => Ok(socket),
Err(e) => Err(Error::connect(e)),
},
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/connect_raw.rs | libs/proxy/tokio-postgres2/src/connect_raw.rs | use std::io;
use std::pin::Pin;
use std::task::{Context, Poll, ready};
use bytes::BytesMut;
use fallible_iterator::FallibleIterator;
use futures_util::{SinkExt, Stream, TryStreamExt};
use postgres_protocol2::authentication::sasl;
use postgres_protocol2::authentication::sasl::ScramSha256;
use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message};
use postgres_protocol2::message::frontend;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio_util::codec::{Framed, FramedParts};
use crate::Error;
use crate::codec::PostgresCodec;
use crate::config::{self, AuthKeys, Config};
use crate::connection::{GC_THRESHOLD, INITIAL_CAPACITY};
use crate::maybe_tls_stream::MaybeTlsStream;
use crate::tls::TlsStream;
pub struct StartupStream<S, T> {
inner: Framed<MaybeTlsStream<S, T>, PostgresCodec>,
read_buf: BytesMut,
}
impl<S, T> Stream for StartupStream<S, T>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
type Item = io::Result<Message>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// We don't use `self.inner.poll_next()` as that might over-read into the read buffer.
// read 1 byte tag, 4 bytes length.
let header = ready!(self.as_mut().poll_fill_buf_exact(cx, 5)?);
let len = u32::from_be_bytes(header[1..5].try_into().unwrap());
if len < 4 {
return Poll::Ready(Some(Err(std::io::Error::other(
"postgres message too small",
))));
}
if len >= 65536 {
return Poll::Ready(Some(Err(std::io::Error::other(
"postgres message too large",
))));
}
// the tag is an additional byte.
let _message = ready!(self.as_mut().poll_fill_buf_exact(cx, len as usize + 1)?);
// Message::parse will remove the all the bytes from the buffer.
Poll::Ready(Message::parse(&mut self.read_buf).transpose())
}
}
impl<S, T> StartupStream<S, T>
where
S: AsyncRead + AsyncWrite + Unpin,
T: AsyncRead + AsyncWrite + Unpin,
{
/// Fill the buffer until it's the exact length provided. No additional data will be read from the socket.
///
/// If the current buffer length is greater, nothing happens.
fn poll_fill_buf_exact(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
len: usize,
) -> Poll<Result<&[u8], std::io::Error>> {
let this = self.get_mut();
let mut stream = Pin::new(this.inner.get_mut());
let mut n = this.read_buf.len();
while n < len {
this.read_buf.resize(len, 0);
let mut buf = ReadBuf::new(&mut this.read_buf[..]);
buf.set_filled(n);
if stream.as_mut().poll_read(cx, &mut buf)?.is_pending() {
this.read_buf.truncate(n);
return Poll::Pending;
}
if buf.filled().len() == n {
return Poll::Ready(Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"early eof",
)));
}
n = buf.filled().len();
this.read_buf.truncate(n);
}
Poll::Ready(Ok(&this.read_buf[..len]))
}
pub fn into_framed(mut self) -> Framed<MaybeTlsStream<S, T>, PostgresCodec> {
*self.inner.read_buffer_mut() = self.read_buf;
self.inner
}
pub fn new(io: MaybeTlsStream<S, T>) -> Self {
let mut parts = FramedParts::new(io, PostgresCodec);
parts.write_buf = BytesMut::with_capacity(INITIAL_CAPACITY);
let mut inner = Framed::from_parts(parts);
// This is the default already, but nice to be explicit.
// We divide by two because writes will overshoot the boundary.
// We don't want constant overshoots to cause us to constantly re-shrink the buffer.
inner.set_backpressure_boundary(GC_THRESHOLD / 2);
Self {
inner,
read_buf: BytesMut::with_capacity(INITIAL_CAPACITY),
}
}
}
pub(crate) async fn authenticate<S, T>(
stream: &mut StartupStream<S, T>,
config: &Config,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsStream + Unpin,
{
frontend::startup_message(&config.server_params, stream.inner.write_buffer_mut())
.map_err(Error::encode)?;
stream.inner.flush().await.map_err(Error::io)?;
match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationOk) => {
can_skip_channel_binding(config)?;
return Ok(());
}
Some(Message::AuthenticationCleartextPassword) => {
can_skip_channel_binding(config)?;
let pass = config
.password
.as_ref()
.ok_or_else(|| Error::config("password missing".into()))?;
frontend::password_message(pass, stream.inner.write_buffer_mut())
.map_err(Error::encode)?;
}
Some(Message::AuthenticationSasl(body)) => {
authenticate_sasl(stream, body, config).await?;
}
Some(Message::AuthenticationMd5Password)
| Some(Message::AuthenticationKerberosV5)
| Some(Message::AuthenticationScmCredential)
| Some(Message::AuthenticationGss)
| Some(Message::AuthenticationSspi) => {
return Err(Error::authentication(
"unsupported authentication method".into(),
));
}
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
}
stream.inner.flush().await.map_err(Error::io)?;
match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationOk) => Ok(()),
Some(Message::ErrorResponse(body)) => Err(Error::db(body)),
Some(_) => Err(Error::unexpected_message()),
None => Err(Error::closed()),
}
}
fn can_skip_channel_binding(config: &Config) -> Result<(), Error> {
match config.channel_binding {
config::ChannelBinding::Disable | config::ChannelBinding::Prefer => Ok(()),
config::ChannelBinding::Require => Err(Error::authentication(
"server did not use channel binding".into(),
)),
}
}
async fn authenticate_sasl<S, T>(
stream: &mut StartupStream<S, T>,
body: AuthenticationSaslBody,
config: &Config,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin,
T: TlsStream + Unpin,
{
let mut has_scram = false;
let mut has_scram_plus = false;
let mut mechanisms = body.mechanisms();
while let Some(mechanism) = mechanisms.next().map_err(Error::parse)? {
match mechanism {
sasl::SCRAM_SHA_256 => has_scram = true,
sasl::SCRAM_SHA_256_PLUS => has_scram_plus = true,
_ => {}
}
}
let channel_binding = stream
.inner
.get_ref()
.channel_binding()
.tls_server_end_point
.filter(|_| config.channel_binding != config::ChannelBinding::Disable)
.map(sasl::ChannelBinding::tls_server_end_point);
let (channel_binding, mechanism) = if has_scram_plus {
match channel_binding {
Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS),
None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256),
}
} else if has_scram {
match channel_binding {
Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256),
None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256),
}
} else {
return Err(Error::authentication("unsupported SASL mechanism".into()));
};
if mechanism != sasl::SCRAM_SHA_256_PLUS {
can_skip_channel_binding(config)?;
}
let mut scram = if let Some(AuthKeys::ScramSha256(keys)) = config.get_auth_keys() {
ScramSha256::new_with_keys(keys, channel_binding)
} else if let Some(password) = config.get_password() {
ScramSha256::new(password, channel_binding)
} else {
return Err(Error::config("password or auth keys missing".into()));
};
frontend::sasl_initial_response(mechanism, scram.message(), stream.inner.write_buffer_mut())
.map_err(Error::encode)?;
stream.inner.flush().await.map_err(Error::io)?;
let body = match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationSaslContinue(body)) => body,
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
};
scram
.update(body.data())
.await
.map_err(|e| Error::authentication(e.into()))?;
frontend::sasl_response(scram.message(), stream.inner.write_buffer_mut())
.map_err(Error::encode)?;
stream.inner.flush().await.map_err(Error::io)?;
let body = match stream.try_next().await.map_err(Error::io)? {
Some(Message::AuthenticationSaslFinal(body)) => body,
Some(Message::ErrorResponse(body)) => return Err(Error::db(body)),
Some(_) => return Err(Error::unexpected_message()),
None => return Err(Error::closed()),
};
scram
.finish(body.data())
.map_err(|e| Error::authentication(e.into()))?;
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/transaction.rs | libs/proxy/tokio-postgres2/src/transaction.rs | use crate::query::RowStream;
use crate::{CancelToken, Client, Error, ReadyForQueryStatus};
/// A representation of a PostgreSQL database transaction.
///
/// Transactions will implicitly roll back when dropped. Use the `commit` method to commit the changes made in the
/// transaction. Transactions can be nested, with inner transactions implemented via safepoints.
pub struct Transaction<'a> {
client: &'a mut Client,
done: bool,
}
impl Drop for Transaction<'_> {
fn drop(&mut self) {
if self.done {
return;
}
let _ = self.client.inner_mut().send_simple_query("ROLLBACK");
}
}
impl<'a> Transaction<'a> {
pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> {
Transaction {
client,
done: false,
}
}
/// Consumes the transaction, committing all changes made within it.
pub async fn commit(mut self) -> Result<ReadyForQueryStatus, Error> {
self.done = true;
self.client.batch_execute("COMMIT").await
}
/// Rolls the transaction back, discarding all changes made within it.
///
/// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller.
pub async fn rollback(mut self) -> Result<ReadyForQueryStatus, Error> {
self.done = true;
self.client.batch_execute("ROLLBACK").await
}
/// Like `Client::query_raw_txt`.
pub async fn query_raw_txt<S, I>(
&mut self,
statement: &str,
params: I,
) -> Result<RowStream<'_>, Error>
where
S: AsRef<str>,
I: IntoIterator<Item = Option<S>>,
I::IntoIter: ExactSizeIterator,
{
self.client.query_raw_txt(statement, params).await
}
/// Like `Client::cancel_token`.
pub fn cancel_token(&self) -> CancelToken {
self.client.cancel_token()
}
/// Returns a reference to the underlying `Client`.
pub fn client(&self) -> &Client {
self.client
}
/// Returns a reference to the underlying `Client`.
pub fn client_mut(&mut self) -> &mut Client {
self.client
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/error/mod.rs | libs/proxy/tokio-postgres2/src/error/mod.rs | //! Errors.
use std::error::{self, Error as _Error};
use std::{fmt, io};
use fallible_iterator::FallibleIterator;
use postgres_protocol2::message::backend::{ErrorFields, ErrorResponseBody};
pub use self::sqlstate::*;
#[allow(clippy::unreadable_literal)]
pub mod sqlstate;
/// The severity of a Postgres error or notice.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Severity {
/// PANIC
Panic,
/// FATAL
Fatal,
/// ERROR
Error,
/// WARNING
Warning,
/// NOTICE
Notice,
/// DEBUG
Debug,
/// INFO
Info,
/// LOG
Log,
}
impl fmt::Display for Severity {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match *self {
Severity::Panic => "PANIC",
Severity::Fatal => "FATAL",
Severity::Error => "ERROR",
Severity::Warning => "WARNING",
Severity::Notice => "NOTICE",
Severity::Debug => "DEBUG",
Severity::Info => "INFO",
Severity::Log => "LOG",
};
fmt.write_str(s)
}
}
impl Severity {
fn from_str(s: &str) -> Option<Severity> {
match s {
"PANIC" => Some(Severity::Panic),
"FATAL" => Some(Severity::Fatal),
"ERROR" => Some(Severity::Error),
"WARNING" => Some(Severity::Warning),
"NOTICE" => Some(Severity::Notice),
"DEBUG" => Some(Severity::Debug),
"INFO" => Some(Severity::Info),
"LOG" => Some(Severity::Log),
_ => None,
}
}
}
/// A Postgres error or notice.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DbError {
severity: String,
parsed_severity: Option<Severity>,
code: SqlState,
message: String,
detail: Option<String>,
hint: Option<String>,
position: Option<ErrorPosition>,
where_: Option<String>,
schema: Option<String>,
table: Option<String>,
column: Option<String>,
datatype: Option<String>,
constraint: Option<String>,
file: Option<String>,
line: Option<u32>,
routine: Option<String>,
}
impl DbError {
pub fn new_test_error(code: SqlState, message: String) -> Self {
DbError {
severity: "ERROR".to_string(),
parsed_severity: Some(Severity::Error),
code,
message,
detail: None,
hint: None,
position: None,
where_: None,
schema: None,
table: None,
column: None,
datatype: None,
constraint: None,
file: None,
line: None,
routine: None,
}
}
pub(crate) fn parse(fields: &mut ErrorFields<'_>) -> io::Result<DbError> {
let mut severity = None;
let mut parsed_severity = None;
let mut code = None;
let mut message = None;
let mut detail = None;
let mut hint = None;
let mut normal_position = None;
let mut internal_position = None;
let mut internal_query = None;
let mut where_ = None;
let mut schema = None;
let mut table = None;
let mut column = None;
let mut datatype = None;
let mut constraint = None;
let mut file = None;
let mut line = None;
let mut routine = None;
while let Some(field) = fields.next()? {
match field.type_() {
b'S' => severity = Some(field.value().to_owned()),
b'C' => code = Some(SqlState::from_code(field.value())),
b'M' => message = Some(field.value().to_owned()),
b'D' => detail = Some(field.value().to_owned()),
b'H' => hint = Some(field.value().to_owned()),
b'P' => {
normal_position = Some(field.value().parse::<u32>().map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"`P` field did not contain an integer",
)
})?);
}
b'p' => {
internal_position = Some(field.value().parse::<u32>().map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"`p` field did not contain an integer",
)
})?);
}
b'q' => internal_query = Some(field.value().to_owned()),
b'W' => where_ = Some(field.value().to_owned()),
b's' => schema = Some(field.value().to_owned()),
b't' => table = Some(field.value().to_owned()),
b'c' => column = Some(field.value().to_owned()),
b'd' => datatype = Some(field.value().to_owned()),
b'n' => constraint = Some(field.value().to_owned()),
b'F' => file = Some(field.value().to_owned()),
b'L' => {
line = Some(field.value().parse::<u32>().map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"`L` field did not contain an integer",
)
})?);
}
b'R' => routine = Some(field.value().to_owned()),
b'V' => {
parsed_severity = Some(Severity::from_str(field.value()).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"`V` field contained an invalid value",
)
})?);
}
_ => {}
}
}
Ok(DbError {
severity: severity
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`S` field missing"))?,
parsed_severity,
code: code
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`C` field missing"))?,
message: message
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`M` field missing"))?,
detail,
hint,
position: match normal_position {
Some(position) => Some(ErrorPosition::Original(position)),
None => match internal_position {
Some(position) => Some(ErrorPosition::Internal {
position,
query: internal_query.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"`q` field missing but `p` field present",
)
})?,
}),
None => None,
},
},
where_,
schema,
table,
column,
datatype,
constraint,
file,
line,
routine,
})
}
/// The field contents are ERROR, FATAL, or PANIC (in an error message),
/// or WARNING, NOTICE, DEBUG, INFO, or LOG (in a notice message), or a
/// localized translation of one of these.
pub fn severity(&self) -> &str {
&self.severity
}
/// A parsed, nonlocalized version of `severity`. (PostgreSQL 9.6+)
pub fn parsed_severity(&self) -> Option<Severity> {
self.parsed_severity
}
/// The SQLSTATE code for the error.
pub fn code(&self) -> &SqlState {
&self.code
}
/// The primary human-readable error message.
///
/// This should be accurate but terse (typically one line).
pub fn message(&self) -> &str {
&self.message
}
/// An optional secondary error message carrying more detail about the
/// problem.
///
/// Might run to multiple lines.
pub fn detail(&self) -> Option<&str> {
self.detail.as_deref()
}
/// An optional suggestion what to do about the problem.
///
/// This is intended to differ from `detail` in that it offers advice
/// (potentially inappropriate) rather than hard facts. Might run to
/// multiple lines.
pub fn hint(&self) -> Option<&str> {
self.hint.as_deref()
}
/// An optional error cursor position into either the original query string
/// or an internally generated query.
pub fn position(&self) -> Option<&ErrorPosition> {
self.position.as_ref()
}
/// An indication of the context in which the error occurred.
///
/// Presently this includes a call stack traceback of active procedural
/// language functions and internally-generated queries. The trace is one
/// entry per line, most recent first.
pub fn where_(&self) -> Option<&str> {
self.where_.as_deref()
}
/// If the error was associated with a specific database object, the name
/// of the schema containing that object, if any. (PostgreSQL 9.3+)
pub fn schema(&self) -> Option<&str> {
self.schema.as_deref()
}
/// If the error was associated with a specific table, the name of the
/// table. (Refer to the schema name field for the name of the table's
/// schema.) (PostgreSQL 9.3+)
pub fn table(&self) -> Option<&str> {
self.table.as_deref()
}
/// If the error was associated with a specific table column, the name of
/// the column.
///
/// (Refer to the schema and table name fields to identify the table.)
/// (PostgreSQL 9.3+)
pub fn column(&self) -> Option<&str> {
self.column.as_deref()
}
/// If the error was associated with a specific data type, the name of the
/// data type. (Refer to the schema name field for the name of the data
/// type's schema.) (PostgreSQL 9.3+)
pub fn datatype(&self) -> Option<&str> {
self.datatype.as_deref()
}
/// If the error was associated with a specific constraint, the name of the
/// constraint.
///
/// Refer to fields listed above for the associated table or domain.
/// (For this purpose, indexes are treated as constraints, even if they
/// weren't created with constraint syntax.) (PostgreSQL 9.3+)
pub fn constraint(&self) -> Option<&str> {
self.constraint.as_deref()
}
/// The file name of the source-code location where the error was reported.
pub fn file(&self) -> Option<&str> {
self.file.as_deref()
}
/// The line number of the source-code location where the error was
/// reported.
pub fn line(&self) -> Option<u32> {
self.line
}
/// The name of the source-code routine reporting the error.
pub fn routine(&self) -> Option<&str> {
self.routine.as_deref()
}
}
impl fmt::Display for DbError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{}: {}", self.severity, self.message)?;
if let Some(detail) = &self.detail {
write!(fmt, "\nDETAIL: {detail}")?;
}
if let Some(hint) = &self.hint {
write!(fmt, "\nHINT: {hint}")?;
}
Ok(())
}
}
impl error::Error for DbError {}
/// Represents the position of an error in a query.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum ErrorPosition {
/// A position in the original query.
Original(u32),
/// A position in an internally generated query.
Internal {
/// The byte position.
position: u32,
/// A query generated by the Postgres server.
query: String,
},
}
#[derive(Debug, PartialEq)]
enum Kind {
Io,
UnexpectedMessage,
Tls,
ToSql(usize),
FromSql(usize),
Column(String),
Closed,
Db,
Parse,
Encode,
Authentication,
Config,
Connect,
Timeout,
}
struct ErrorInner {
kind: Kind,
cause: Option<Box<dyn error::Error + Sync + Send>>,
}
/// An error communicating with the Postgres server.
pub struct Error(Box<ErrorInner>);
impl fmt::Debug for Error {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Error")
.field("kind", &self.0.kind)
.field("cause", &self.0.cause)
.finish()
}
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.0.kind {
Kind::Io => fmt.write_str("error communicating with the server")?,
Kind::UnexpectedMessage => fmt.write_str("unexpected message from server")?,
Kind::Tls => fmt.write_str("error performing TLS handshake")?,
Kind::ToSql(idx) => write!(fmt, "error serializing parameter {idx}")?,
Kind::FromSql(idx) => write!(fmt, "error deserializing column {idx}")?,
Kind::Column(column) => write!(fmt, "invalid column `{column}`")?,
Kind::Closed => fmt.write_str("connection closed")?,
Kind::Db => fmt.write_str("db error")?,
Kind::Parse => fmt.write_str("error parsing response from server")?,
Kind::Encode => fmt.write_str("error encoding message to server")?,
Kind::Authentication => fmt.write_str("authentication error")?,
Kind::Config => fmt.write_str("invalid configuration")?,
Kind::Connect => fmt.write_str("error connecting to server")?,
Kind::Timeout => fmt.write_str("timeout waiting for server")?,
};
if let Some(ref cause) = self.0.cause {
write!(fmt, ": {cause}")?;
}
Ok(())
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
self.0.cause.as_ref().map(|e| &**e as _)
}
}
impl Error {
/// Consumes the error, returning its cause.
pub fn into_source(self) -> Option<Box<dyn error::Error + Sync + Send>> {
self.0.cause
}
/// Returns the source of this error if it was a `DbError`.
///
/// This is a simple convenience method.
pub fn as_db_error(&self) -> Option<&DbError> {
self.source().and_then(|e| e.downcast_ref::<DbError>())
}
/// Determines if the error was associated with closed connection.
pub fn is_closed(&self) -> bool {
self.0.kind == Kind::Closed
}
/// Returns the SQLSTATE error code associated with the error.
///
/// This is a convenience method that downcasts the cause to a `DbError` and returns its code.
pub fn code(&self) -> Option<&SqlState> {
self.as_db_error().map(DbError::code)
}
fn new(kind: Kind, cause: Option<Box<dyn error::Error + Sync + Send>>) -> Error {
Error(Box::new(ErrorInner { kind, cause }))
}
pub fn closed() -> Error {
Error::new(Kind::Closed, None)
}
pub fn unexpected_message() -> Error {
Error::new(Kind::UnexpectedMessage, None)
}
#[allow(clippy::needless_pass_by_value)]
pub fn db(error: ErrorResponseBody) -> Error {
match DbError::parse(&mut error.fields()) {
Ok(e) => Error::new(Kind::Db, Some(Box::new(e))),
Err(e) => Error::new(Kind::Parse, Some(Box::new(e))),
}
}
pub(crate) fn parse(e: io::Error) -> Error {
Error::new(Kind::Parse, Some(Box::new(e)))
}
pub(crate) fn encode(e: io::Error) -> Error {
Error::new(Kind::Encode, Some(Box::new(e)))
}
#[allow(clippy::wrong_self_convention)]
pub(crate) fn to_sql(e: Box<dyn error::Error + Sync + Send>, idx: usize) -> Error {
Error::new(Kind::ToSql(idx), Some(e))
}
pub(crate) fn from_sql(e: Box<dyn error::Error + Sync + Send>, idx: usize) -> Error {
Error::new(Kind::FromSql(idx), Some(e))
}
pub(crate) fn column(column: String) -> Error {
Error::new(Kind::Column(column), None)
}
pub(crate) fn tls(e: Box<dyn error::Error + Sync + Send>) -> Error {
Error::new(Kind::Tls, Some(e))
}
pub fn io(e: io::Error) -> Error {
Error::new(Kind::Io, Some(Box::new(e)))
}
pub(crate) fn authentication(e: Box<dyn error::Error + Sync + Send>) -> Error {
Error::new(Kind::Authentication, Some(e))
}
pub(crate) fn config(e: Box<dyn error::Error + Sync + Send>) -> Error {
Error::new(Kind::Config, Some(e))
}
pub(crate) fn connect(e: io::Error) -> Error {
Error::new(Kind::Connect, Some(Box::new(e)))
}
#[doc(hidden)]
pub fn __private_api_timeout() -> Error {
Error::new(Kind::Timeout, None)
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/tokio-postgres2/src/error/sqlstate.rs | libs/proxy/tokio-postgres2/src/error/sqlstate.rs | //! Rust repr for <https://www.postgresql.org/docs/current/errcodes-appendix.html>
/// A SQLSTATE error code
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct SqlState([u8; 5]);
impl SqlState {
/// Creates a `SqlState` from its error code.
pub fn from_code(s: &str) -> SqlState {
let mut code = [b'0'; 5];
if s.len() == 5 {
code.copy_from_slice(s.as_bytes());
}
SqlState(code)
}
/// Returns the error code corresponding to the `SqlState`.
pub fn code(&self) -> &str {
std::str::from_utf8(&self.0).unwrap()
}
// Class 08 - Connection Exception
/// 08000
pub const CONNECTION_EXCEPTION: SqlState = SqlState(*b"08000");
/// 08003
pub const CONNECTION_DOES_NOT_EXIST: SqlState = SqlState(*b"08003");
/// 08006
pub const CONNECTION_FAILURE: SqlState = SqlState(*b"08006");
/// 08001
pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = SqlState(*b"08001");
/// 08P01
pub const PROTOCOL_VIOLATION: SqlState = SqlState(*b"08P01");
// Class 22 - Data Exception
/// 22023
pub const INVALID_PARAMETER_VALUE: SqlState = SqlState(*b"22023");
// Class 3D - Invalid Catalog Name
/// 3D000
pub const INVALID_CATALOG_NAME: SqlState = SqlState(*b"3D000");
// Class 3F - Invalid Schema Name
/// 3F000
pub const INVALID_SCHEMA_NAME: SqlState = SqlState(*b"3F000");
// Class 40 - Transaction Rollback
/// 40001
pub const T_R_SERIALIZATION_FAILURE: SqlState = SqlState(*b"40001");
// Class 42 - Syntax Error or Access Rule Violation
/// 42601
pub const SYNTAX_ERROR: SqlState = SqlState(*b"42601");
// Class 53 - Insufficient Resources
/// 53200
pub const OUT_OF_MEMORY: SqlState = SqlState(*b"53200");
/// 53300
pub const TOO_MANY_CONNECTIONS: SqlState = SqlState(*b"53300");
// Class 57 - Operator Intervention
/// 57014
pub const QUERY_CANCELED: SqlState = SqlState(*b"57014");
}
#[cfg(test)]
mod tests {
use super::SqlState;
#[test]
fn round_trip() {
let state = SqlState::from_code("08P01");
assert_eq!(state, SqlState::PROTOCOL_VIOLATION);
assert_eq!(state.code(), "08P01");
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/lib.rs | libs/proxy/postgres-protocol2/src/lib.rs | //! Low level Postgres protocol APIs.
//!
//! This crate implements the low level components of Postgres's communication
//! protocol, including message and value serialization and deserialization.
//! It is designed to be used as a building block by higher level APIs such as
//! `rust-postgres`, and should not typically be used directly.
//!
//! # Note
//!
//! This library assumes that the `client_encoding` backend parameter has been
//! set to `UTF8`. It will most likely not behave properly if that is not the case.
#![warn(missing_docs, clippy::all)]
use std::io;
use byteorder::{BigEndian, ByteOrder};
use bytes::{BufMut, BytesMut};
pub mod authentication;
pub mod escape;
pub mod message;
pub mod password;
pub mod types;
/// A Postgres OID.
pub type Oid = u32;
/// A Postgres Log Sequence Number (LSN).
pub type Lsn = u64;
/// An enum indicating if a value is `NULL` or not.
pub enum IsNull {
/// The value is `NULL`.
Yes,
/// The value is not `NULL`.
No,
}
fn write_nullable<F, E>(serializer: F, buf: &mut BytesMut) -> Result<(), E>
where
F: FnOnce(&mut BytesMut) -> Result<IsNull, E>,
E: From<io::Error>,
{
let base = buf.len();
buf.put_i32(0);
let size = match serializer(buf)? {
IsNull::No => i32::from_usize(buf.len() - base - 4)?,
IsNull::Yes => -1,
};
BigEndian::write_i32(&mut buf[base..], size);
Ok(())
}
trait FromUsize: Sized {
fn from_usize(x: usize) -> Result<Self, io::Error>;
}
macro_rules! from_usize {
($t:ty) => {
impl FromUsize for $t {
#[inline]
fn from_usize(x: usize) -> io::Result<$t> {
if x > <$t>::MAX as usize {
Err(io::Error::new(
io::ErrorKind::InvalidInput,
"value too large to transmit",
))
} else {
Ok(x as $t)
}
}
}
};
}
from_usize!(i16);
from_usize!(i32);
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/password/test.rs | libs/proxy/postgres-protocol2/src/password/test.rs | use crate::password;
#[tokio::test]
async fn test_encrypt_scram_sha_256() {
// Specify the salt to make the test deterministic. Any bytes will do.
let salt: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
assert_eq!(
password::scram_sha_256_salt(b"secret", salt).await,
"SCRAM-SHA-256$4096:AQIDBAUGBwgJCgsMDQ4PEA==$8rrDg00OqaiWXJ7p+sCgHEIaBSHY89ZJl3mfIsf32oY=:05L1f+yZbiN8O0AnO40Og85NNRhvzTS57naKRWCcsIA="
);
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/password/mod.rs | libs/proxy/postgres-protocol2/src/password/mod.rs | //! Functions to encrypt a password in the client.
//!
//! This is intended to be used by client applications that wish to
//! send commands like `ALTER USER joe PASSWORD 'pwd'`. The password
//! need not be sent in cleartext if it is encrypted on the client
//! side. This is good because it ensures the cleartext password won't
//! end up in logs pg_stat displays, etc.
use base64::Engine as _;
use base64::prelude::BASE64_STANDARD;
use hmac::{Hmac, Mac};
use rand::RngCore;
use sha2::digest::FixedOutput;
use sha2::{Digest, Sha256};
use crate::authentication::sasl;
#[cfg(test)]
mod test;
const SCRAM_DEFAULT_ITERATIONS: u32 = 4096;
const SCRAM_DEFAULT_SALT_LEN: usize = 16;
/// Hash password using SCRAM-SHA-256 with a randomly-generated
/// salt.
///
/// The client may assume the returned string doesn't contain any
/// special characters that would require escaping in an SQL command.
pub async fn scram_sha_256(password: &[u8]) -> String {
let mut salt: [u8; SCRAM_DEFAULT_SALT_LEN] = [0; SCRAM_DEFAULT_SALT_LEN];
let mut rng = rand::rng();
rng.fill_bytes(&mut salt);
scram_sha_256_salt(password, salt).await
}
// Internal implementation of scram_sha_256 with a caller-provided
// salt. This is useful for testing.
pub(crate) async fn scram_sha_256_salt(
password: &[u8],
salt: [u8; SCRAM_DEFAULT_SALT_LEN],
) -> String {
// Prepare the password, per [RFC
// 4013](https://tools.ietf.org/html/rfc4013), if possible.
//
// Postgres treats passwords as byte strings (without embedded NUL
// bytes), but SASL expects passwords to be valid UTF-8.
//
// Follow the behavior of libpq's PQencryptPasswordConn(), and
// also the backend. If the password is not valid UTF-8, or if it
// contains prohibited characters (such as non-ASCII whitespace),
// just skip the SASLprep step and use the original byte
// sequence.
let prepared: Vec<u8> = match std::str::from_utf8(password) {
Ok(password_str) => {
match stringprep::saslprep(password_str) {
Ok(p) => p.into_owned().into_bytes(),
// contains invalid characters; skip saslprep
Err(_) => Vec::from(password),
}
}
// not valid UTF-8; skip saslprep
Err(_) => Vec::from(password),
};
// salt password
let salted_password = sasl::hi(&prepared, &salt, SCRAM_DEFAULT_ITERATIONS).await;
// client key
let mut hmac = Hmac::<Sha256>::new_from_slice(&salted_password)
.expect("HMAC is able to accept all key sizes");
hmac.update(b"Client Key");
let client_key = hmac.finalize().into_bytes();
// stored key
let mut hash = Sha256::default();
hash.update(client_key.as_slice());
let stored_key = hash.finalize_fixed();
// server key
let mut hmac = Hmac::<Sha256>::new_from_slice(&salted_password)
.expect("HMAC is able to accept all key sizes");
hmac.update(b"Server Key");
let server_key = hmac.finalize().into_bytes();
format!(
"SCRAM-SHA-256${}:{}${}:{}",
SCRAM_DEFAULT_ITERATIONS,
BASE64_STANDARD.encode(salt),
BASE64_STANDARD.encode(stored_key),
BASE64_STANDARD.encode(server_key)
)
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/escape/test.rs | libs/proxy/postgres-protocol2/src/escape/test.rs | use crate::escape::{escape_identifier, escape_literal};
#[test]
fn test_escape_idenifier() {
assert_eq!(escape_identifier("foo"), String::from("\"foo\""));
assert_eq!(escape_identifier("f\\oo"), String::from("\"f\\oo\""));
assert_eq!(escape_identifier("f'oo"), String::from("\"f'oo\""));
assert_eq!(escape_identifier("f\"oo"), String::from("\"f\"\"oo\""));
}
#[test]
fn test_escape_literal() {
assert_eq!(escape_literal("foo"), String::from("'foo'"));
assert_eq!(escape_literal("f\\oo"), String::from(" E'f\\\\oo'"));
assert_eq!(escape_literal("f'oo"), String::from("'f''oo'"));
assert_eq!(escape_literal("f\"oo"), String::from("'f\"oo'"));
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/escape/mod.rs | libs/proxy/postgres-protocol2/src/escape/mod.rs | //! Provides functions for escaping literals and identifiers for use
//! in SQL queries.
//!
//! Prefer parameterized queries where possible. Do not escape
//! parameters in a parameterized query.
#[cfg(test)]
mod test;
/// Escape a literal and surround result with single quotes. Not
/// recommended in most cases.
///
/// If input contains backslashes, result will be of the form `
/// E'...'` so it is safe to use regardless of the setting of
/// standard_conforming_strings.
pub fn escape_literal(input: &str) -> String {
escape_internal(input, false)
}
/// Escape an identifier and surround result with double quotes.
pub fn escape_identifier(input: &str) -> String {
escape_internal(input, true)
}
// Translation of PostgreSQL libpq's PQescapeInternal(). Does not
// require a connection because input string is known to be valid
// UTF-8.
//
// Escape arbitrary strings. If as_ident is true, we escape the
// result as an identifier; if false, as a literal. The result is
// returned in a newly allocated buffer. If we fail due to an
// encoding violation or out of memory condition, we return NULL,
// storing an error message into conn.
fn escape_internal(input: &str, as_ident: bool) -> String {
let mut num_backslashes = 0;
let mut num_quotes = 0;
let quote_char = if as_ident { '"' } else { '\'' };
// Scan the string for characters that must be escaped.
for ch in input.chars() {
if ch == quote_char {
num_quotes += 1;
} else if ch == '\\' {
num_backslashes += 1;
}
}
// Allocate output String.
let mut result_size = input.len() + num_quotes + 3; // two quotes, plus a NUL
if !as_ident && num_backslashes > 0 {
result_size += num_backslashes + 2;
}
let mut output = String::with_capacity(result_size);
// If we are escaping a literal that contains backslashes, we use
// the escape string syntax so that the result is correct under
// either value of standard_conforming_strings. We also emit a
// leading space in this case, to guard against the possibility
// that the result might be interpolated immediately following an
// identifier.
if !as_ident && num_backslashes > 0 {
output.push(' ');
output.push('E');
}
// Opening quote.
output.push(quote_char);
// Use fast path if possible.
//
// We've already verified that the input string is well-formed in
// the current encoding. If it contains no quotes and, in the
// case of literal-escaping, no backslashes, then we can just copy
// it directly to the output buffer, adding the necessary quotes.
//
// If not, we must rescan the input and process each character
// individually.
if num_quotes == 0 && (num_backslashes == 0 || as_ident) {
output.push_str(input);
} else {
for ch in input.chars() {
if ch == quote_char || (!as_ident && ch == '\\') {
output.push(ch);
}
output.push(ch);
}
}
output.push(quote_char);
output
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/authentication/mod.rs | libs/proxy/postgres-protocol2/src/authentication/mod.rs | //! Authentication protocol support.
pub mod sasl;
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/authentication/sasl.rs | libs/proxy/postgres-protocol2/src/authentication/sasl.rs | //! SASL-based authentication support.
use std::fmt::Write;
use std::{io, iter, mem, str};
use base64::Engine as _;
use base64::prelude::BASE64_STANDARD;
use hmac::{Hmac, Mac};
use rand::{self, Rng};
use sha2::digest::FixedOutput;
use sha2::{Digest, Sha256};
use tokio::task::yield_now;
const NONCE_LENGTH: usize = 24;
/// The identifier of the SCRAM-SHA-256 SASL authentication mechanism.
pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256";
/// The identifier of the SCRAM-SHA-256-PLUS SASL authentication mechanism.
pub const SCRAM_SHA_256_PLUS: &str = "SCRAM-SHA-256-PLUS";
// since postgres passwords are not required to exclude saslprep-prohibited
// characters or even be valid UTF8, we run saslprep if possible and otherwise
// return the raw password.
fn normalize(pass: &[u8]) -> Vec<u8> {
let pass = match str::from_utf8(pass) {
Ok(pass) => pass,
Err(_) => return pass.to_vec(),
};
match stringprep::saslprep(pass) {
Ok(pass) => pass.into_owned().into_bytes(),
Err(_) => pass.as_bytes().to_vec(),
}
}
pub(crate) async fn hi(str: &[u8], salt: &[u8], iterations: u32) -> [u8; 32] {
let mut hmac =
Hmac::<Sha256>::new_from_slice(str).expect("HMAC is able to accept all key sizes");
hmac.update(salt);
hmac.update(&[0, 0, 0, 1]);
let mut prev = hmac.finalize().into_bytes();
let mut hi = prev;
for i in 1..iterations {
let mut hmac = Hmac::<Sha256>::new_from_slice(str).expect("already checked above");
hmac.update(&prev);
prev = hmac.finalize().into_bytes();
for (hi, prev) in hi.iter_mut().zip(prev) {
*hi ^= prev;
}
// yield every ~250us
// hopefully reduces tail latencies
if i.is_multiple_of(1024) {
yield_now().await
}
}
hi.into()
}
enum ChannelBindingInner {
Unrequested,
Unsupported,
TlsServerEndPoint(Vec<u8>),
}
/// The channel binding configuration for a SCRAM authentication exchange.
pub struct ChannelBinding(ChannelBindingInner);
impl ChannelBinding {
/// The server did not request channel binding.
pub fn unrequested() -> ChannelBinding {
ChannelBinding(ChannelBindingInner::Unrequested)
}
/// The server requested channel binding but the client is unable to provide it.
pub fn unsupported() -> ChannelBinding {
ChannelBinding(ChannelBindingInner::Unsupported)
}
/// The server requested channel binding and the client will use the `tls-server-end-point`
/// method.
pub fn tls_server_end_point(signature: Vec<u8>) -> ChannelBinding {
ChannelBinding(ChannelBindingInner::TlsServerEndPoint(signature))
}
fn gs2_header(&self) -> &'static str {
match self.0 {
ChannelBindingInner::Unrequested => "y,,",
ChannelBindingInner::Unsupported => "n,,",
ChannelBindingInner::TlsServerEndPoint(_) => "p=tls-server-end-point,,",
}
}
fn cbind_data(&self) -> &[u8] {
match self.0 {
ChannelBindingInner::Unrequested | ChannelBindingInner::Unsupported => &[],
ChannelBindingInner::TlsServerEndPoint(ref buf) => buf,
}
}
}
/// A pair of keys for the SCRAM-SHA-256 mechanism.
/// See <https://datatracker.ietf.org/doc/html/rfc5802#section-3> for details.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ScramKeys<const N: usize> {
/// Used by server to authenticate client.
pub client_key: [u8; N],
/// Used by client to verify server's signature.
pub server_key: [u8; N],
}
/// Password or keys which were derived from it.
enum Credentials<const N: usize> {
/// A regular password as a vector of bytes.
Password(Vec<u8>),
/// A precomputed pair of keys.
Keys(ScramKeys<N>),
}
enum State {
Update {
nonce: String,
password: Credentials<32>,
channel_binding: ChannelBinding,
},
Finish {
server_key: [u8; 32],
auth_message: String,
},
Done,
}
/// A type which handles the client side of the SCRAM-SHA-256/SCRAM-SHA-256-PLUS authentication
/// process.
///
/// During the authentication process, if the backend sends an `AuthenticationSASL` message which
/// includes `SCRAM-SHA-256` as an authentication mechanism, this type can be used.
///
/// After a `ScramSha256` is constructed, the buffer returned by the `message()` method should be
/// sent to the backend in a `SASLInitialResponse` message along with the mechanism name.
///
/// The server will reply with an `AuthenticationSASLContinue` message. Its contents should be
/// passed to the `update()` method, after which the buffer returned by the `message()` method
/// should be sent to the backend in a `SASLResponse` message.
///
/// The server will reply with an `AuthenticationSASLFinal` message. Its contents should be passed
/// to the `finish()` method, after which the authentication process is complete.
pub struct ScramSha256 {
message: String,
state: State,
}
fn nonce() -> String {
// rand 0.5's ThreadRng is cryptographically secure
let mut rng = rand::rng();
(0..NONCE_LENGTH)
.map(|_| {
let mut v = rng.random_range(0x21u8..0x7e);
if v == 0x2c {
v = 0x7e
}
v as char
})
.collect()
}
impl ScramSha256 {
/// Constructs a new instance which will use the provided password for authentication.
pub fn new(password: &[u8], channel_binding: ChannelBinding) -> ScramSha256 {
let password = Credentials::Password(normalize(password));
ScramSha256::new_inner(password, channel_binding, nonce())
}
/// Constructs a new instance which will use the provided key pair for authentication.
pub fn new_with_keys(keys: ScramKeys<32>, channel_binding: ChannelBinding) -> ScramSha256 {
let password = Credentials::Keys(keys);
ScramSha256::new_inner(password, channel_binding, nonce())
}
fn new_inner(
password: Credentials<32>,
channel_binding: ChannelBinding,
nonce: String,
) -> ScramSha256 {
ScramSha256 {
message: format!("{}n=,r={}", channel_binding.gs2_header(), nonce),
state: State::Update {
nonce,
password,
channel_binding,
},
}
}
/// Returns the message which should be sent to the backend in an `SASLResponse` message.
pub fn message(&self) -> &[u8] {
if let State::Done = self.state {
panic!("invalid SCRAM state");
}
self.message.as_bytes()
}
/// Updates the state machine with the response from the backend.
///
/// This should be called when an `AuthenticationSASLContinue` message is received.
pub async fn update(&mut self, message: &[u8]) -> io::Result<()> {
let (client_nonce, password, channel_binding) =
match mem::replace(&mut self.state, State::Done) {
State::Update {
nonce,
password,
channel_binding,
} => (nonce, password, channel_binding),
_ => return Err(io::Error::other("invalid SCRAM state")),
};
let message =
str::from_utf8(message).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
let parsed = Parser::new(message).server_first_message()?;
if !parsed.nonce.starts_with(&client_nonce) {
return Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid nonce"));
}
let (client_key, server_key) = match password {
Credentials::Password(password) => {
let salt = match BASE64_STANDARD.decode(parsed.salt) {
Ok(salt) => salt,
Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)),
};
let salted_password = hi(&password, &salt, parsed.iteration_count).await;
let make_key = |name| {
let mut hmac = Hmac::<Sha256>::new_from_slice(&salted_password)
.expect("HMAC is able to accept all key sizes");
hmac.update(name);
let mut key = [0u8; 32];
key.copy_from_slice(hmac.finalize().into_bytes().as_slice());
key
};
(make_key(b"Client Key"), make_key(b"Server Key"))
}
Credentials::Keys(keys) => (keys.client_key, keys.server_key),
};
let mut hash = Sha256::default();
hash.update(client_key);
let stored_key = hash.finalize_fixed();
let mut cbind_input = vec![];
cbind_input.extend(channel_binding.gs2_header().as_bytes());
cbind_input.extend(channel_binding.cbind_data());
let cbind_input = BASE64_STANDARD.encode(&cbind_input);
self.message.clear();
write!(&mut self.message, "c={},r={}", cbind_input, parsed.nonce).unwrap();
let auth_message = format!("n=,r={},{},{}", client_nonce, message, self.message);
let mut hmac = Hmac::<Sha256>::new_from_slice(&stored_key)
.expect("HMAC is able to accept all key sizes");
hmac.update(auth_message.as_bytes());
let client_signature = hmac.finalize().into_bytes();
let mut client_proof = client_key;
for (proof, signature) in client_proof.iter_mut().zip(client_signature) {
*proof ^= signature;
}
write!(
&mut self.message,
",p={}",
BASE64_STANDARD.encode(client_proof)
)
.unwrap();
self.state = State::Finish {
server_key,
auth_message,
};
Ok(())
}
/// Finalizes the authentication process.
///
/// This should be called when the backend sends an `AuthenticationSASLFinal` message.
/// Authentication has only succeeded if this method returns `Ok(())`.
pub fn finish(&mut self, message: &[u8]) -> io::Result<()> {
let (server_key, auth_message) = match mem::replace(&mut self.state, State::Done) {
State::Finish {
server_key,
auth_message,
} => (server_key, auth_message),
_ => return Err(io::Error::other("invalid SCRAM state")),
};
let message =
str::from_utf8(message).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
let parsed = Parser::new(message).server_final_message()?;
let verifier = match parsed {
ServerFinalMessage::Error(e) => {
return Err(io::Error::other(format!("SCRAM error: {e}")));
}
ServerFinalMessage::Verifier(verifier) => verifier,
};
let verifier = match BASE64_STANDARD.decode(verifier) {
Ok(verifier) => verifier,
Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)),
};
let mut hmac = Hmac::<Sha256>::new_from_slice(&server_key)
.expect("HMAC is able to accept all key sizes");
hmac.update(auth_message.as_bytes());
hmac.verify_slice(&verifier)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error"))
}
}
struct Parser<'a> {
s: &'a str,
it: iter::Peekable<str::CharIndices<'a>>,
}
impl<'a> Parser<'a> {
fn new(s: &'a str) -> Parser<'a> {
Parser {
s,
it: s.char_indices().peekable(),
}
}
fn eat(&mut self, target: char) -> io::Result<()> {
match self.it.next() {
Some((_, c)) if c == target => Ok(()),
Some((i, c)) => {
let m =
format!("unexpected character at byte {i}: expected `{target}` but got `{c}");
Err(io::Error::new(io::ErrorKind::InvalidInput, m))
}
None => Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unexpected EOF",
)),
}
}
fn take_while<F>(&mut self, f: F) -> io::Result<&'a str>
where
F: Fn(char) -> bool,
{
let start = match self.it.peek() {
Some(&(i, _)) => i,
None => return Ok(""),
};
loop {
match self.it.peek() {
Some(&(_, c)) if f(c) => {
self.it.next();
}
Some(&(i, _)) => return Ok(&self.s[start..i]),
None => return Ok(&self.s[start..]),
}
}
}
fn printable(&mut self) -> io::Result<&'a str> {
self.take_while(|c| matches!(c, '\x21'..='\x2b' | '\x2d'..='\x7e'))
}
fn nonce(&mut self) -> io::Result<&'a str> {
self.eat('r')?;
self.eat('=')?;
self.printable()
}
fn base64(&mut self) -> io::Result<&'a str> {
self.take_while(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '='))
}
fn salt(&mut self) -> io::Result<&'a str> {
self.eat('s')?;
self.eat('=')?;
self.base64()
}
fn posit_number(&mut self) -> io::Result<u32> {
let n = self.take_while(|c| c.is_ascii_digit())?;
n.parse()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
}
fn iteration_count(&mut self) -> io::Result<u32> {
self.eat('i')?;
self.eat('=')?;
self.posit_number()
}
fn eof(&mut self) -> io::Result<()> {
match self.it.peek() {
Some(&(i, _)) => Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unexpected trailing data at byte {i}"),
)),
None => Ok(()),
}
}
fn server_first_message(&mut self) -> io::Result<ServerFirstMessage<'a>> {
let nonce = self.nonce()?;
self.eat(',')?;
let salt = self.salt()?;
self.eat(',')?;
let iteration_count = self.iteration_count()?;
self.eof()?;
Ok(ServerFirstMessage {
nonce,
salt,
iteration_count,
})
}
fn value(&mut self) -> io::Result<&'a str> {
self.take_while(|c| matches!(c, '\0' | '=' | ','))
}
fn server_error(&mut self) -> io::Result<Option<&'a str>> {
match self.it.peek() {
Some(&(_, 'e')) => {}
_ => return Ok(None),
}
self.eat('e')?;
self.eat('=')?;
self.value().map(Some)
}
fn verifier(&mut self) -> io::Result<&'a str> {
self.eat('v')?;
self.eat('=')?;
self.base64()
}
fn server_final_message(&mut self) -> io::Result<ServerFinalMessage<'a>> {
let message = match self.server_error()? {
Some(error) => ServerFinalMessage::Error(error),
None => ServerFinalMessage::Verifier(self.verifier()?),
};
self.eof()?;
Ok(message)
}
}
struct ServerFirstMessage<'a> {
nonce: &'a str,
salt: &'a str,
iteration_count: u32,
}
enum ServerFinalMessage<'a> {
Error(&'a str),
Verifier(&'a str),
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn parse_server_first_message() {
let message = "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096";
let message = Parser::new(message).server_first_message().unwrap();
assert_eq!(message.nonce, "fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j");
assert_eq!(message.salt, "QSXCR+Q6sek8bf92");
assert_eq!(message.iteration_count, 4096);
}
// recorded auth exchange from psql
#[tokio::test]
async fn exchange() {
let password = "foobar";
let nonce = "9IZ2O01zb9IgiIZ1WJ/zgpJB";
let client_first = "n,,n=,r=9IZ2O01zb9IgiIZ1WJ/zgpJB";
let server_first = "r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,s=fs3IXBy7U7+IvVjZ,i\
=4096";
let client_final = "c=biws,r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,p=AmNKosjJzS3\
1NTlQYNs5BTeQjdHdk7lOflDo5re2an8=";
let server_final = "v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw=";
let mut scram = ScramSha256::new_inner(
Credentials::Password(normalize(password.as_bytes())),
ChannelBinding::unsupported(),
nonce.to_string(),
);
assert_eq!(str::from_utf8(scram.message()).unwrap(), client_first);
scram.update(server_first.as_bytes()).await.unwrap();
assert_eq!(str::from_utf8(scram.message()).unwrap(), client_final);
scram.finish(server_final.as_bytes()).unwrap();
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/types/test.rs | libs/proxy/postgres-protocol2/src/types/test.rs | use bytes::{Buf, BytesMut};
use super::*;
#[test]
fn ltree_sql() {
let mut query = vec![1u8];
query.extend_from_slice("A.B.C".as_bytes());
let mut buf = BytesMut::new();
ltree_to_sql("A.B.C", &mut buf);
assert_eq!(query.as_slice(), buf.chunk());
}
#[test]
fn ltree_str() {
let mut query = vec![1u8];
query.extend_from_slice("A.B.C".as_bytes());
assert!(ltree_from_sql(query.as_slice()).is_ok())
}
#[test]
fn ltree_wrong_version() {
let mut query = vec![2u8];
query.extend_from_slice("A.B.C".as_bytes());
assert!(ltree_from_sql(query.as_slice()).is_err())
}
#[test]
fn lquery_sql() {
let mut query = vec![1u8];
query.extend_from_slice("A.B.C".as_bytes());
let mut buf = BytesMut::new();
lquery_to_sql("A.B.C", &mut buf);
assert_eq!(query.as_slice(), buf.chunk());
}
#[test]
fn lquery_str() {
let mut query = vec![1u8];
query.extend_from_slice("A.B.C".as_bytes());
assert!(lquery_from_sql(query.as_slice()).is_ok())
}
#[test]
fn lquery_wrong_version() {
let mut query = vec![2u8];
query.extend_from_slice("A.B.C".as_bytes());
assert!(lquery_from_sql(query.as_slice()).is_err())
}
#[test]
fn ltxtquery_sql() {
let mut query = vec![1u8];
query.extend_from_slice("a & b*".as_bytes());
let mut buf = BytesMut::new();
ltree_to_sql("a & b*", &mut buf);
assert_eq!(query.as_slice(), buf.chunk());
}
#[test]
fn ltxtquery_str() {
let mut query = vec![1u8];
query.extend_from_slice("a & b*".as_bytes());
assert!(ltree_from_sql(query.as_slice()).is_ok())
}
#[test]
fn ltxtquery_wrong_version() {
let mut query = vec![2u8];
query.extend_from_slice("a & b*".as_bytes());
assert!(ltree_from_sql(query.as_slice()).is_err())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/types/mod.rs | libs/proxy/postgres-protocol2/src/types/mod.rs | //! Conversions to and from Postgres's binary format for various types.
use std::boxed::Box as StdBox;
use std::error::Error;
use std::str;
use byteorder::{BigEndian, ReadBytesExt};
use bytes::{BufMut, BytesMut};
use fallible_iterator::FallibleIterator;
use crate::Oid;
#[cfg(test)]
mod test;
/// Serializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value.
#[inline]
pub fn text_to_sql(v: &str, buf: &mut BytesMut) {
buf.put_slice(v.as_bytes());
}
/// Deserializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value.
#[inline]
pub fn text_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
Ok(str::from_utf8(buf)?)
}
/// Deserializes a `"char"` value.
#[inline]
pub fn char_from_sql(mut buf: &[u8]) -> Result<i8, StdBox<dyn Error + Sync + Send>> {
let v = buf.read_i8()?;
if !buf.is_empty() {
return Err("invalid buffer size".into());
}
Ok(v)
}
/// Serializes an `OID` value.
#[inline]
pub fn oid_to_sql(v: Oid, buf: &mut BytesMut) {
buf.put_u32(v);
}
/// Deserializes an `OID` value.
#[inline]
pub fn oid_from_sql(mut buf: &[u8]) -> Result<Oid, StdBox<dyn Error + Sync + Send>> {
let v = buf.read_u32::<BigEndian>()?;
if !buf.is_empty() {
return Err("invalid buffer size".into());
}
Ok(v)
}
/// A fallible iterator over `HSTORE` entries.
pub struct HstoreEntries<'a> {
remaining: i32,
buf: &'a [u8],
}
impl<'a> FallibleIterator for HstoreEntries<'a> {
type Item = (&'a str, Option<&'a str>);
type Error = StdBox<dyn Error + Sync + Send>;
#[inline]
#[allow(clippy::type_complexity)]
fn next(
&mut self,
) -> Result<Option<(&'a str, Option<&'a str>)>, StdBox<dyn Error + Sync + Send>> {
if self.remaining == 0 {
if !self.buf.is_empty() {
return Err("invalid buffer size".into());
}
return Ok(None);
}
self.remaining -= 1;
let key_len = self.buf.read_i32::<BigEndian>()?;
if key_len < 0 {
return Err("invalid key length".into());
}
let (key, buf) = self.buf.split_at(key_len as usize);
let key = str::from_utf8(key)?;
self.buf = buf;
let value_len = self.buf.read_i32::<BigEndian>()?;
let value = if value_len < 0 {
None
} else {
let (value, buf) = self.buf.split_at(value_len as usize);
let value = str::from_utf8(value)?;
self.buf = buf;
Some(value)
};
Ok(Some((key, value)))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining as usize;
(len, Some(len))
}
}
/// Deserializes an array value.
#[inline]
pub fn array_from_sql(mut buf: &[u8]) -> Result<Array<'_>, StdBox<dyn Error + Sync + Send>> {
let dimensions = buf.read_i32::<BigEndian>()?;
if dimensions < 0 {
return Err("invalid dimension count".into());
}
let mut r = buf;
let mut elements = 1i32;
for _ in 0..dimensions {
let len = r.read_i32::<BigEndian>()?;
if len < 0 {
return Err("invalid dimension size".into());
}
let _lower_bound = r.read_i32::<BigEndian>()?;
elements = match elements.checked_mul(len) {
Some(elements) => elements,
None => return Err("too many array elements".into()),
};
}
if dimensions == 0 {
elements = 0;
}
Ok(Array {
dimensions,
elements,
buf,
})
}
/// A Postgres array.
pub struct Array<'a> {
dimensions: i32,
elements: i32,
buf: &'a [u8],
}
impl<'a> Array<'a> {
/// Returns an iterator over the dimensions of the array.
#[inline]
pub fn dimensions(&self) -> ArrayDimensions<'a> {
ArrayDimensions(&self.buf[..self.dimensions as usize * 8])
}
/// Returns an iterator over the values of the array.
#[inline]
pub fn values(&self) -> ArrayValues<'a> {
ArrayValues {
remaining: self.elements,
buf: &self.buf[self.dimensions as usize * 8..],
}
}
}
/// An iterator over the dimensions of an array.
pub struct ArrayDimensions<'a>(&'a [u8]);
impl FallibleIterator for ArrayDimensions<'_> {
type Item = ArrayDimension;
type Error = StdBox<dyn Error + Sync + Send>;
#[inline]
fn next(&mut self) -> Result<Option<ArrayDimension>, StdBox<dyn Error + Sync + Send>> {
if self.0.is_empty() {
return Ok(None);
}
let len = self.0.read_i32::<BigEndian>()?;
let lower_bound = self.0.read_i32::<BigEndian>()?;
Ok(Some(ArrayDimension { len, lower_bound }))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.0.len() / 8;
(len, Some(len))
}
}
/// Information about a dimension of an array.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct ArrayDimension {
/// The length of this dimension.
pub len: i32,
/// The base value used to index into this dimension.
pub lower_bound: i32,
}
/// An iterator over the values of an array, in row-major order.
pub struct ArrayValues<'a> {
remaining: i32,
buf: &'a [u8],
}
impl<'a> FallibleIterator for ArrayValues<'a> {
type Item = Option<&'a [u8]>;
type Error = StdBox<dyn Error + Sync + Send>;
#[inline]
fn next(&mut self) -> Result<Option<Option<&'a [u8]>>, StdBox<dyn Error + Sync + Send>> {
if self.remaining == 0 {
if !self.buf.is_empty() {
return Err("invalid message length: arrayvalue not drained".into());
}
return Ok(None);
}
self.remaining -= 1;
let len = self.buf.read_i32::<BigEndian>()?;
let val = if len < 0 {
None
} else {
if self.buf.len() < len as usize {
return Err("invalid value length".into());
}
let (val, buf) = self.buf.split_at(len as usize);
self.buf = buf;
Some(val)
};
Ok(Some(val))
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining as usize;
(len, Some(len))
}
}
/// Serializes a Postgres ltree string
#[inline]
pub fn ltree_to_sql(v: &str, buf: &mut BytesMut) {
// A version number is prepended to an ltree string per spec
buf.put_u8(1);
// Append the rest of the query
buf.put_slice(v.as_bytes());
}
/// Deserialize a Postgres ltree string
#[inline]
pub fn ltree_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
match buf {
// Remove the version number from the front of the ltree per spec
[1u8, rest @ ..] => Ok(str::from_utf8(rest)?),
_ => Err("ltree version 1 only supported".into()),
}
}
/// Serializes a Postgres lquery string
#[inline]
pub fn lquery_to_sql(v: &str, buf: &mut BytesMut) {
// A version number is prepended to an lquery string per spec
buf.put_u8(1);
// Append the rest of the query
buf.put_slice(v.as_bytes());
}
/// Deserialize a Postgres lquery string
#[inline]
pub fn lquery_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
match buf {
// Remove the version number from the front of the lquery per spec
[1u8, rest @ ..] => Ok(str::from_utf8(rest)?),
_ => Err("lquery version 1 only supported".into()),
}
}
/// Serializes a Postgres ltxtquery string
#[inline]
pub fn ltxtquery_to_sql(v: &str, buf: &mut BytesMut) {
// A version number is prepended to an ltxtquery string per spec
buf.put_u8(1);
// Append the rest of the query
buf.put_slice(v.as_bytes());
}
/// Deserialize a Postgres ltxtquery string
#[inline]
pub fn ltxtquery_from_sql(buf: &[u8]) -> Result<&str, StdBox<dyn Error + Sync + Send>> {
match buf {
// Remove the version number from the front of the ltxtquery per spec
[1u8, rest @ ..] => Ok(str::from_utf8(rest)?),
_ => Err("ltxtquery version 1 only supported".into()),
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/message/backend.rs | libs/proxy/postgres-protocol2/src/message/backend.rs | #![allow(missing_docs)]
use std::io::{self, Read};
use std::ops::Range;
use std::{cmp, str};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use bytes::{Bytes, BytesMut};
use fallible_iterator::FallibleIterator;
use memchr::memchr;
use crate::Oid;
// top-level message tags
const PARSE_COMPLETE_TAG: u8 = b'1';
const BIND_COMPLETE_TAG: u8 = b'2';
const CLOSE_COMPLETE_TAG: u8 = b'3';
pub const NOTIFICATION_RESPONSE_TAG: u8 = b'A';
const COPY_DONE_TAG: u8 = b'c';
const COMMAND_COMPLETE_TAG: u8 = b'C';
const COPY_DATA_TAG: u8 = b'd';
const DATA_ROW_TAG: u8 = b'D';
const ERROR_RESPONSE_TAG: u8 = b'E';
const COPY_IN_RESPONSE_TAG: u8 = b'G';
const COPY_OUT_RESPONSE_TAG: u8 = b'H';
const COPY_BOTH_RESPONSE_TAG: u8 = b'W';
const EMPTY_QUERY_RESPONSE_TAG: u8 = b'I';
const BACKEND_KEY_DATA_TAG: u8 = b'K';
pub const NO_DATA_TAG: u8 = b'n';
pub const NOTICE_RESPONSE_TAG: u8 = b'N';
const AUTHENTICATION_TAG: u8 = b'R';
const PORTAL_SUSPENDED_TAG: u8 = b's';
pub const PARAMETER_STATUS_TAG: u8 = b'S';
const PARAMETER_DESCRIPTION_TAG: u8 = b't';
const ROW_DESCRIPTION_TAG: u8 = b'T';
pub const READY_FOR_QUERY_TAG: u8 = b'Z';
#[derive(Debug, Copy, Clone)]
pub struct Header {
tag: u8,
len: i32,
}
#[allow(clippy::len_without_is_empty)]
impl Header {
#[inline]
pub fn parse(buf: &[u8]) -> io::Result<Option<Header>> {
if buf.len() < 5 {
return Ok(None);
}
let tag = buf[0];
let len = BigEndian::read_i32(&buf[1..]);
if len < 4 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid message length: header length < 4",
));
}
Ok(Some(Header { tag, len }))
}
#[inline]
pub fn tag(self) -> u8 {
self.tag
}
#[inline]
pub fn len(self) -> i32 {
self.len
}
}
/// An enum representing Postgres backend messages.
pub enum Message {
AuthenticationCleartextPassword,
AuthenticationGss,
AuthenticationKerberosV5,
AuthenticationMd5Password,
AuthenticationOk,
AuthenticationScmCredential,
AuthenticationSspi,
AuthenticationGssContinue,
AuthenticationSasl(AuthenticationSaslBody),
AuthenticationSaslContinue(AuthenticationSaslContinueBody),
AuthenticationSaslFinal(AuthenticationSaslFinalBody),
BackendKeyData(BackendKeyDataBody),
BindComplete,
CloseComplete,
CommandComplete(CommandCompleteBody),
CopyData,
CopyDone,
CopyInResponse,
CopyOutResponse,
CopyBothResponse,
DataRow(DataRowBody),
EmptyQueryResponse,
ErrorResponse(ErrorResponseBody),
NoData,
NoticeResponse(NoticeResponseBody),
NotificationResponse(NotificationResponseBody),
ParameterDescription(ParameterDescriptionBody),
ParameterStatus(ParameterStatusBody),
ParseComplete,
PortalSuspended,
ReadyForQuery(ReadyForQueryBody),
RowDescription(RowDescriptionBody),
}
impl Message {
#[inline]
pub fn parse(buf: &mut BytesMut) -> io::Result<Option<Message>> {
if buf.len() < 5 {
let to_read = 5 - buf.len();
buf.reserve(to_read);
return Ok(None);
}
let tag = buf[0];
let len = (&buf[1..5]).read_u32::<BigEndian>().unwrap();
if len < 4 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: parsing u32",
));
}
let total_len = len as usize + 1;
if buf.len() < total_len {
let to_read = total_len - buf.len();
buf.reserve(to_read);
return Ok(None);
}
let mut buf = Buffer {
bytes: buf.split_to(total_len).freeze(),
idx: 5,
};
let message = match tag {
PARSE_COMPLETE_TAG => Message::ParseComplete,
BIND_COMPLETE_TAG => Message::BindComplete,
CLOSE_COMPLETE_TAG => Message::CloseComplete,
NOTIFICATION_RESPONSE_TAG => Message::NotificationResponse(NotificationResponseBody {}),
COPY_DONE_TAG => Message::CopyDone,
COMMAND_COMPLETE_TAG => {
let tag = buf.read_cstr()?;
Message::CommandComplete(CommandCompleteBody { tag })
}
COPY_DATA_TAG => Message::CopyData,
DATA_ROW_TAG => {
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::DataRow(DataRowBody { storage, len })
}
ERROR_RESPONSE_TAG => {
let storage = buf.read_all();
Message::ErrorResponse(ErrorResponseBody { storage })
}
COPY_IN_RESPONSE_TAG => Message::CopyInResponse,
COPY_OUT_RESPONSE_TAG => Message::CopyOutResponse,
COPY_BOTH_RESPONSE_TAG => Message::CopyBothResponse,
EMPTY_QUERY_RESPONSE_TAG => Message::EmptyQueryResponse,
BACKEND_KEY_DATA_TAG => {
let process_id = buf.read_i32::<BigEndian>()?;
let secret_key = buf.read_i32::<BigEndian>()?;
Message::BackendKeyData(BackendKeyDataBody {
process_id,
secret_key,
})
}
NO_DATA_TAG => Message::NoData,
NOTICE_RESPONSE_TAG => {
let storage = buf.read_all();
Message::NoticeResponse(NoticeResponseBody { storage })
}
AUTHENTICATION_TAG => match buf.read_i32::<BigEndian>()? {
0 => Message::AuthenticationOk,
2 => Message::AuthenticationKerberosV5,
3 => Message::AuthenticationCleartextPassword,
5 => Message::AuthenticationMd5Password,
6 => Message::AuthenticationScmCredential,
7 => Message::AuthenticationGss,
8 => Message::AuthenticationGssContinue,
9 => Message::AuthenticationSspi,
10 => {
let storage = buf.read_all();
Message::AuthenticationSasl(AuthenticationSaslBody(storage))
}
11 => {
let storage = buf.read_all();
Message::AuthenticationSaslContinue(AuthenticationSaslContinueBody(storage))
}
12 => {
let storage = buf.read_all();
Message::AuthenticationSaslFinal(AuthenticationSaslFinalBody(storage))
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown authentication tag `{tag}`"),
));
}
},
PORTAL_SUSPENDED_TAG => Message::PortalSuspended,
PARAMETER_STATUS_TAG => {
let name = buf.read_cstr()?;
let value = buf.read_cstr()?;
Message::ParameterStatus(ParameterStatusBody { name, value })
}
PARAMETER_DESCRIPTION_TAG => {
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::ParameterDescription(ParameterDescriptionBody { storage, len })
}
ROW_DESCRIPTION_TAG => {
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::RowDescription(RowDescriptionBody { storage, len })
}
READY_FOR_QUERY_TAG => {
let status = buf.read_u8()?;
Message::ReadyForQuery(ReadyForQueryBody { status })
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown message tag `{tag}`"),
));
}
};
if !buf.is_empty() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: expected buffer to be empty",
));
}
Ok(Some(message))
}
}
struct Buffer {
bytes: Bytes,
idx: usize,
}
impl Buffer {
#[inline]
fn slice(&self) -> &[u8] {
&self.bytes[self.idx..]
}
#[inline]
fn is_empty(&self) -> bool {
self.slice().is_empty()
}
#[inline]
fn read_cstr(&mut self) -> io::Result<Bytes> {
match memchr(0, self.slice()) {
Some(pos) => {
let start = self.idx;
let end = start + pos;
let cstr = self.bytes.slice(start..end);
self.idx = end + 1;
Ok(cstr)
}
None => Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unexpected EOF",
)),
}
}
#[inline]
fn read_all(&mut self) -> Bytes {
let buf = self.bytes.slice(self.idx..);
self.idx = self.bytes.len();
buf
}
}
impl Read for Buffer {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let len = {
let slice = self.slice();
let len = cmp::min(slice.len(), buf.len());
buf[..len].copy_from_slice(&slice[..len]);
len
};
self.idx += len;
Ok(len)
}
}
pub struct AuthenticationMd5PasswordBody {
salt: [u8; 4],
}
impl AuthenticationMd5PasswordBody {
#[inline]
pub fn salt(&self) -> [u8; 4] {
self.salt
}
}
pub struct AuthenticationSaslBody(Bytes);
impl AuthenticationSaslBody {
#[inline]
pub fn mechanisms(&self) -> SaslMechanisms<'_> {
SaslMechanisms(&self.0)
}
}
pub struct SaslMechanisms<'a>(&'a [u8]);
impl<'a> FallibleIterator for SaslMechanisms<'a> {
type Item = &'a str;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<&'a str>> {
let value_end = find_null(self.0, 0)?;
if value_end == 0 {
if self.0.len() != 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid message length: expected to be at end of iterator for sasl",
));
}
Ok(None)
} else {
let value = get_str(&self.0[..value_end])?;
self.0 = &self.0[value_end + 1..];
Ok(Some(value))
}
}
}
pub struct AuthenticationSaslContinueBody(Bytes);
impl AuthenticationSaslContinueBody {
#[inline]
pub fn data(&self) -> &[u8] {
&self.0
}
}
pub struct AuthenticationSaslFinalBody(Bytes);
impl AuthenticationSaslFinalBody {
#[inline]
pub fn data(&self) -> &[u8] {
&self.0
}
}
pub struct BackendKeyDataBody {
process_id: i32,
secret_key: i32,
}
impl BackendKeyDataBody {
#[inline]
pub fn process_id(&self) -> i32 {
self.process_id
}
#[inline]
pub fn secret_key(&self) -> i32 {
self.secret_key
}
}
pub struct CommandCompleteBody {
tag: Bytes,
}
impl CommandCompleteBody {
#[inline]
pub fn tag(&self) -> io::Result<&str> {
get_str(&self.tag)
}
}
#[derive(Debug)]
pub struct DataRowBody {
storage: Bytes,
len: u16,
}
impl DataRowBody {
#[inline]
pub fn ranges(&self) -> DataRowRanges<'_> {
DataRowRanges {
buf: &self.storage,
len: self.storage.len(),
remaining: self.len,
}
}
#[inline]
pub fn buffer(&self) -> &[u8] {
&self.storage
}
}
pub struct DataRowRanges<'a> {
buf: &'a [u8],
len: usize,
remaining: u16,
}
impl FallibleIterator for DataRowRanges<'_> {
type Item = Option<Range<usize>>;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<Option<Range<usize>>>> {
if self.remaining == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: datarowrange is not empty",
));
}
}
self.remaining -= 1;
let len = self.buf.read_i32::<BigEndian>()?;
if len < 0 {
Ok(Some(None))
} else {
let len = len as usize;
if self.buf.len() < len {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unexpected EOF",
));
}
let base = self.len - self.buf.len();
self.buf = &self.buf[len..];
Ok(Some(Some(base..base + len)))
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining as usize;
(len, Some(len))
}
}
pub struct ErrorResponseBody {
storage: Bytes,
}
impl ErrorResponseBody {
#[inline]
pub fn fields(&self) -> ErrorFields<'_> {
ErrorFields { buf: &self.storage }
}
}
pub struct ErrorFields<'a> {
buf: &'a [u8],
}
impl<'a> FallibleIterator for ErrorFields<'a> {
type Item = ErrorField<'a>;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<ErrorField<'a>>> {
let type_ = self.buf.read_u8()?;
if type_ == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: error fields is not drained",
));
}
}
let value_end = find_null(self.buf, 0)?;
let value = get_str(&self.buf[..value_end])?;
self.buf = &self.buf[value_end + 1..];
Ok(Some(ErrorField { type_, value }))
}
}
pub struct ErrorField<'a> {
type_: u8,
value: &'a str,
}
impl ErrorField<'_> {
#[inline]
pub fn type_(&self) -> u8 {
self.type_
}
#[inline]
pub fn value(&self) -> &str {
self.value
}
}
pub struct NoticeResponseBody {
storage: Bytes,
}
impl NoticeResponseBody {
#[inline]
pub fn fields(&self) -> ErrorFields<'_> {
ErrorFields { buf: &self.storage }
}
pub fn as_bytes(&self) -> &[u8] {
&self.storage
}
}
pub struct NotificationResponseBody {}
pub struct ParameterDescriptionBody {
storage: Bytes,
len: u16,
}
impl ParameterDescriptionBody {
#[inline]
pub fn parameters(&self) -> Parameters<'_> {
Parameters {
buf: &self.storage,
remaining: self.len,
}
}
}
pub struct Parameters<'a> {
buf: &'a [u8],
remaining: u16,
}
impl FallibleIterator for Parameters<'_> {
type Item = Oid;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<Oid>> {
if self.remaining == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: parameters is not drained",
));
}
}
self.remaining -= 1;
self.buf.read_u32::<BigEndian>().map(Some)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining as usize;
(len, Some(len))
}
}
pub struct ParameterStatusBody {
name: Bytes,
value: Bytes,
}
impl ParameterStatusBody {
#[inline]
pub fn name(&self) -> io::Result<&str> {
get_str(&self.name)
}
#[inline]
pub fn value(&self) -> io::Result<&str> {
get_str(&self.value)
}
}
pub struct ReadyForQueryBody {
status: u8,
}
impl ReadyForQueryBody {
#[inline]
pub fn status(&self) -> u8 {
self.status
}
}
pub struct RowDescriptionBody {
storage: Bytes,
len: u16,
}
impl RowDescriptionBody {
#[inline]
pub fn fields(&self) -> Fields<'_> {
Fields {
buf: &self.storage,
remaining: self.len,
}
}
}
pub struct Fields<'a> {
buf: &'a [u8],
remaining: u16,
}
impl<'a> FallibleIterator for Fields<'a> {
type Item = Field<'a>;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<Field<'a>>> {
if self.remaining == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: field is not drained",
));
}
}
self.remaining -= 1;
let name_end = find_null(self.buf, 0)?;
let name = get_str(&self.buf[..name_end])?;
self.buf = &self.buf[name_end + 1..];
let table_oid = self.buf.read_u32::<BigEndian>()?;
let column_id = self.buf.read_i16::<BigEndian>()?;
let type_oid = self.buf.read_u32::<BigEndian>()?;
let type_size = self.buf.read_i16::<BigEndian>()?;
let type_modifier = self.buf.read_i32::<BigEndian>()?;
let format = self.buf.read_i16::<BigEndian>()?;
Ok(Some(Field {
name,
table_oid,
column_id,
type_oid,
type_size,
type_modifier,
format,
}))
}
}
pub struct Field<'a> {
name: &'a str,
table_oid: Oid,
column_id: i16,
type_oid: Oid,
type_size: i16,
type_modifier: i32,
format: i16,
}
impl<'a> Field<'a> {
#[inline]
pub fn name(&self) -> &'a str {
self.name
}
#[inline]
pub fn table_oid(&self) -> Oid {
self.table_oid
}
#[inline]
pub fn column_id(&self) -> i16 {
self.column_id
}
#[inline]
pub fn type_oid(&self) -> Oid {
self.type_oid
}
#[inline]
pub fn type_size(&self) -> i16 {
self.type_size
}
#[inline]
pub fn type_modifier(&self) -> i32 {
self.type_modifier
}
#[inline]
pub fn format(&self) -> i16 {
self.format
}
}
#[inline]
fn find_null(buf: &[u8], start: usize) -> io::Result<usize> {
match memchr(0, &buf[start..]) {
Some(pos) => Ok(pos + start),
None => Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unexpected EOF",
)),
}
}
#[inline]
fn get_str(buf: &[u8]) -> io::Result<&str> {
str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/message/mod.rs | libs/proxy/postgres-protocol2/src/message/mod.rs | //! Postgres message protocol support.
//!
//! See [Postgres's documentation][docs] for more information on message flow.
//!
//! [docs]: https://www.postgresql.org/docs/9.5/static/protocol-flow.html
pub mod backend;
pub mod frontend;
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/postgres-protocol2/src/message/frontend.rs | libs/proxy/postgres-protocol2/src/message/frontend.rs | //! Frontend message serialization.
#![allow(missing_docs)]
use std::error::Error;
use std::{io, marker};
use byteorder::{BigEndian, ByteOrder};
use bytes::{Buf, BufMut, BytesMut};
use crate::{FromUsize, IsNull, Oid, write_nullable};
#[inline]
fn write_body<F, E>(buf: &mut BytesMut, f: F) -> Result<(), E>
where
F: FnOnce(&mut BytesMut) -> Result<(), E>,
E: From<io::Error>,
{
let base = buf.len();
buf.extend_from_slice(&[0; 4]);
f(buf)?;
let size = i32::from_usize(buf.len() - base)?;
BigEndian::write_i32(&mut buf[base..], size);
Ok(())
}
#[derive(Debug)]
pub enum BindError {
Conversion(Box<dyn Error + marker::Sync + Send>),
Serialization(io::Error),
}
impl From<Box<dyn Error + marker::Sync + Send>> for BindError {
#[inline]
fn from(e: Box<dyn Error + marker::Sync + Send>) -> BindError {
BindError::Conversion(e)
}
}
impl From<io::Error> for BindError {
#[inline]
fn from(e: io::Error) -> BindError {
BindError::Serialization(e)
}
}
#[inline]
pub fn bind<I, J, F, T, K>(
portal: &str,
statement: &str,
formats: I,
values: J,
mut serializer: F,
result_formats: K,
buf: &mut BytesMut,
) -> Result<(), BindError>
where
I: IntoIterator<Item = i16>,
J: IntoIterator<Item = T>,
F: FnMut(T, &mut BytesMut) -> Result<IsNull, Box<dyn Error + marker::Sync + Send>>,
K: IntoIterator<Item = i16>,
{
buf.put_u8(b'B');
write_body(buf, |buf| {
write_cstr(portal.as_bytes(), buf)?;
write_cstr(statement.as_bytes(), buf)?;
write_counted(
formats,
|f, buf| {
buf.put_i16(f);
Ok::<_, io::Error>(())
},
buf,
)?;
write_counted(
values,
|v, buf| write_nullable(|buf| serializer(v, buf), buf),
buf,
)?;
write_counted(
result_formats,
|f, buf| {
buf.put_i16(f);
Ok::<_, io::Error>(())
},
buf,
)?;
Ok(())
})
}
#[inline]
fn write_counted<I, T, F, E>(items: I, mut serializer: F, buf: &mut BytesMut) -> Result<(), E>
where
I: IntoIterator<Item = T>,
F: FnMut(T, &mut BytesMut) -> Result<(), E>,
E: From<io::Error>,
{
let base = buf.len();
buf.extend_from_slice(&[0; 2]);
let mut count = 0;
for item in items {
serializer(item, buf)?;
count += 1;
}
let count = i16::from_usize(count)?;
BigEndian::write_i16(&mut buf[base..], count);
Ok(())
}
#[inline]
pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut BytesMut) {
write_body(buf, |buf| {
buf.put_i32(80_877_102);
buf.put_i32(process_id);
buf.put_i32(secret_key);
Ok::<_, io::Error>(())
})
.unwrap();
}
#[inline]
pub fn close(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'C');
write_body(buf, |buf| {
buf.put_u8(variant);
write_cstr(name.as_bytes(), buf)
})
}
pub struct CopyData<T> {
buf: T,
len: i32,
}
impl<T> CopyData<T>
where
T: Buf,
{
pub fn new(buf: T) -> io::Result<CopyData<T>> {
let len = buf
.remaining()
.checked_add(4)
.and_then(|l| i32::try_from(l).ok())
.ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidInput, "message length overflow")
})?;
Ok(CopyData { buf, len })
}
pub fn write(self, out: &mut BytesMut) {
out.put_u8(b'd');
out.put_i32(self.len);
out.put(self.buf);
}
}
#[inline]
pub fn copy_done(buf: &mut BytesMut) {
buf.put_u8(b'c');
write_body(buf, |_| Ok::<(), io::Error>(())).unwrap();
}
#[inline]
pub fn copy_fail(message: &str, buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'f');
write_body(buf, |buf| write_cstr(message.as_bytes(), buf))
}
#[inline]
pub fn describe(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'D');
write_body(buf, |buf| {
buf.put_u8(variant);
write_cstr(name.as_bytes(), buf)
})
}
#[inline]
pub fn execute(portal: &str, max_rows: i32, buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'E');
write_body(buf, |buf| {
write_cstr(portal.as_bytes(), buf)?;
buf.put_i32(max_rows);
Ok(())
})
}
#[inline]
pub fn parse<I>(name: &str, query: &str, param_types: I, buf: &mut BytesMut) -> io::Result<()>
where
I: IntoIterator<Item = Oid>,
{
buf.put_u8(b'P');
write_body(buf, |buf| {
write_cstr(name.as_bytes(), buf)?;
write_cstr(query.as_bytes(), buf)?;
write_counted(
param_types,
|t, buf| {
buf.put_u32(t);
Ok::<_, io::Error>(())
},
buf,
)?;
Ok(())
})
}
#[inline]
pub fn password_message(password: &[u8], buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'p');
write_body(buf, |buf| write_cstr(password, buf))
}
#[inline]
pub fn query(query: &str, buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'Q');
write_body(buf, |buf| write_cstr(query.as_bytes(), buf))
}
#[inline]
pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'p');
write_body(buf, |buf| {
write_cstr(mechanism.as_bytes(), buf)?;
let len = i32::from_usize(data.len())?;
buf.put_i32(len);
buf.put_slice(data);
Ok(())
})
}
#[inline]
pub fn sasl_response(data: &[u8], buf: &mut BytesMut) -> io::Result<()> {
buf.put_u8(b'p');
write_body(buf, |buf| {
buf.put_slice(data);
Ok(())
})
}
#[inline]
pub fn ssl_request(buf: &mut BytesMut) {
write_body(buf, |buf| {
buf.put_i32(80_877_103);
Ok::<_, io::Error>(())
})
.unwrap();
}
#[inline]
pub fn startup_message(parameters: &StartupMessageParams, buf: &mut BytesMut) -> io::Result<()> {
write_body(buf, |buf| {
// postgres protocol version 3.0(196608) in bigger-endian
buf.put_i32(0x00_03_00_00);
buf.put_slice(¶meters.params);
buf.put_u8(0);
Ok(())
})
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct StartupMessageParams {
pub params: BytesMut,
}
impl StartupMessageParams {
/// Set parameter's value by its name.
pub fn insert(&mut self, name: &str, value: &str) {
if name.contains('\0') || value.contains('\0') {
panic!("startup parameter name or value contained a null")
}
self.params.put_slice(name.as_bytes());
self.params.put_u8(0);
self.params.put_slice(value.as_bytes());
self.params.put_u8(0);
}
}
#[inline]
pub fn sync(buf: &mut BytesMut) {
buf.put_u8(b'S');
write_body(buf, |_| Ok::<(), io::Error>(())).unwrap();
}
#[inline]
pub fn flush(buf: &mut BytesMut) {
buf.put_u8(b'H');
write_body(buf, |_| Ok::<(), io::Error>(())).unwrap();
}
#[inline]
pub fn terminate(buf: &mut BytesMut) {
buf.put_u8(b'X');
write_body(buf, |_| Ok::<(), io::Error>(())).unwrap();
}
#[inline]
fn write_cstr(s: &[u8], buf: &mut BytesMut) -> Result<(), io::Error> {
if s.contains(&0) {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"string contains embedded null",
));
}
buf.put_slice(s);
buf.put_u8(0);
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/json/src/lib.rs | libs/proxy/json/src/lib.rs | //! A JSON serialization lib, designed for more flexibility than `serde_json` offers.
//!
//! Features:
//!
//! ## Dynamic construction
//!
//! Sometimes you have dynamic values you want to serialize, that are not already in a serde-aware model like a struct or a Vec etc.
//! To achieve this with serde, you need to implement a lot of different traits on a lot of different new-types.
//! Because of this, it's often easier to give-in and pull all the data into a serde-aware model (`serde_json::Value` or some intermediate struct),
//! but that is often not very efficient.
//!
//! This crate allows full control over the JSON encoding without needing to implement any extra traits. Just call the
//! relevant functions, and it will guarantee a correctly encoded JSON value.
//!
//! ## Async construction
//!
//! Similar to the above, sometimes the values arrive asynchronously. Often collecting those values in memory
//! is more expensive than writing them as JSON, since the overheads of `Vec` and `String` is much higher, however
//! there are exceptions.
//!
//! Serializing to JSON all in one go is also more CPU intensive and can cause lag spikes,
//! whereas serializing values incrementally spreads out the CPU load and reduces lag.
//!
//! ## Examples
//!
//! To represent the following JSON as a compact string
//!
//! ```json
//! {
//! "results": {
//! "rows": [
//! {
//! "id": 1,
//! "value": null
//! },
//! {
//! "id": 2,
//! "value": "hello"
//! }
//! ]
//! }
//! }
//! ```
//!
//! We can use the following code:
//!
//! ```
//! // create the outer object
//! let s = json::value_to_string!(|v| json::value_as_object!(|v| {
//! // create an entry with key "results" and start an object value associated with it.
//! let results = v.key("results");
//! json::value_as_object!(|results| {
//! // create an entry with key "rows" and start an list value associated with it.
//! let rows = results.key("rows");
//! json::value_as_list!(|rows| {
//! // create a list entry and start an object value associated with it.
//! let row = rows.entry();
//! json::value_as_object!(|row| {
//! // add entry "id": 1
//! row.entry("id", 1);
//! // add entry "value": null
//! row.entry("value", json::Null);
//! });
//!
//! // create a list entry and start an object value associated with it.
//! let row = rows.entry();
//! json::value_as_object!(|row| {
//! // add entry "id": 2
//! row.entry("id", 2);
//! // add entry "value": "hello"
//! row.entry("value", "hello");
//! });
//! });
//! });
//! }));
//!
//! assert_eq!(s, r#"{"results":{"rows":[{"id":1,"value":null},{"id":2,"value":"hello"}]}}"#);
//! ```
mod macros;
mod str;
mod value;
pub use value::{Null, ValueEncoder};
#[must_use]
/// Serialize a single json value.
pub struct ValueSer<'buf> {
buf: &'buf mut Vec<u8>,
start: usize,
}
impl<'buf> ValueSer<'buf> {
/// Create a new json value serializer.
pub fn new(buf: &'buf mut Vec<u8>) -> Self {
Self { buf, start: 0 }
}
/// Borrow the underlying buffer
pub fn as_buffer(&self) -> &[u8] {
self.buf
}
#[inline]
pub fn value(self, e: impl ValueEncoder) {
e.encode(self);
}
/// Write raw bytes to the buf. This must be already JSON encoded.
#[inline]
pub fn write_raw_json(self, data: &[u8]) {
self.buf.extend_from_slice(data);
self.finish();
}
/// Start a new object serializer.
#[inline]
pub fn object(self) -> ObjectSer<'buf> {
ObjectSer::new(self)
}
/// Start a new list serializer.
#[inline]
pub fn list(self) -> ListSer<'buf> {
ListSer::new(self)
}
/// Finish the value ser.
#[inline]
fn finish(self) {
// don't trigger the drop handler which triggers a rollback.
// this won't cause memory leaks because `ValueSet` owns no allocations.
std::mem::forget(self);
}
}
impl Drop for ValueSer<'_> {
fn drop(&mut self) {
self.buf.truncate(self.start);
}
}
#[must_use]
/// Serialize a json object.
pub struct ObjectSer<'buf> {
value: ValueSer<'buf>,
start: usize,
}
impl<'buf> ObjectSer<'buf> {
/// Start a new object serializer.
#[inline]
pub fn new(value: ValueSer<'buf>) -> Self {
value.buf.push(b'{');
let start = value.buf.len();
Self { value, start }
}
/// Borrow the underlying buffer
pub fn as_buffer(&self) -> &[u8] {
self.value.as_buffer()
}
/// Start a new object entry with the given string key, returning a [`ValueSer`] for the associated value.
#[inline]
pub fn key(&mut self, key: impl KeyEncoder) -> ValueSer<'_> {
key.write_key(self)
}
/// Write an entry (key-value pair) to the object.
#[inline]
pub fn entry(&mut self, key: impl KeyEncoder, val: impl ValueEncoder) {
self.key(key).value(val);
}
#[inline]
fn entry_inner(&mut self, f: impl FnOnce(&mut Vec<u8>)) -> ValueSer<'_> {
// track before the separator so we the value is rolled back it also removes the separator.
let start = self.value.buf.len();
// push separator if necessary
if self.value.buf.len() > self.start {
self.value.buf.push(b',');
}
// push key
f(self.value.buf);
// push value separator
self.value.buf.push(b':');
// return value writer.
ValueSer {
buf: self.value.buf,
start,
}
}
/// Reset the buffer back to before this object was started.
#[inline]
pub fn rollback(self) -> ValueSer<'buf> {
// Do not fully reset the value, only reset it to before the `{`.
// This ensures any `,` before this value are not clobbered.
self.value.buf.truncate(self.start - 1);
self.value
}
/// Finish the object ser.
#[inline]
pub fn finish(self) {
self.value.buf.push(b'}');
self.value.finish();
}
}
pub trait KeyEncoder {
fn write_key<'a>(self, obj: &'a mut ObjectSer) -> ValueSer<'a>;
}
#[must_use]
/// Serialize a json object.
pub struct ListSer<'buf> {
value: ValueSer<'buf>,
start: usize,
}
impl<'buf> ListSer<'buf> {
/// Start a new list serializer.
#[inline]
pub fn new(value: ValueSer<'buf>) -> Self {
value.buf.push(b'[');
let start = value.buf.len();
Self { value, start }
}
/// Borrow the underlying buffer
pub fn as_buffer(&self) -> &[u8] {
self.value.as_buffer()
}
/// Write an value to the list.
#[inline]
pub fn push(&mut self, val: impl ValueEncoder) {
self.entry().value(val);
}
/// Start a new value entry in this list.
#[inline]
pub fn entry(&mut self) -> ValueSer<'_> {
// track before the separator so we the value is rolled back it also removes the separator.
let start = self.value.buf.len();
// push separator if necessary
if self.value.buf.len() > self.start {
self.value.buf.push(b',');
}
// return value writer.
ValueSer {
buf: self.value.buf,
start,
}
}
/// Reset the buffer back to before this object was started.
#[inline]
pub fn rollback(self) -> ValueSer<'buf> {
// Do not fully reset the value, only reset it to before the `[`.
// This ensures any `,` before this value are not clobbered.
self.value.buf.truncate(self.start - 1);
self.value
}
/// Finish the object ser.
#[inline]
pub fn finish(self) {
self.value.buf.push(b']');
self.value.finish();
}
}
#[cfg(test)]
mod tests {
use crate::{Null, ValueSer};
#[test]
fn object() {
let mut buf = vec![];
let mut object = ValueSer::new(&mut buf).object();
object.entry("foo", "bar");
object.entry("baz", Null);
object.finish();
assert_eq!(buf, br#"{"foo":"bar","baz":null}"#);
}
#[test]
fn list() {
let mut buf = vec![];
let mut list = ValueSer::new(&mut buf).list();
list.entry().value("bar");
list.entry().value(Null);
list.finish();
assert_eq!(buf, br#"["bar",null]"#);
}
#[test]
fn object_macro() {
let res = crate::value_to_string!(|obj| {
crate::value_as_object!(|obj| {
obj.entry("foo", "bar");
obj.entry("baz", Null);
})
});
assert_eq!(res, r#"{"foo":"bar","baz":null}"#);
}
#[test]
fn list_macro() {
let res = crate::value_to_string!(|list| {
crate::value_as_list!(|list| {
list.entry().value("bar");
list.entry().value(Null);
})
});
assert_eq!(res, r#"["bar",null]"#);
}
#[test]
fn rollback_on_drop() {
let res = crate::value_to_string!(|list| {
crate::value_as_list!(|list| {
list.entry().value("bar");
'cancel: {
let nested_list = list.entry();
crate::value_as_list!(|nested_list| {
nested_list.entry().value(1);
assert_eq!(nested_list.as_buffer(), br#"["bar",[1"#);
if true {
break 'cancel;
}
})
}
assert_eq!(list.as_buffer(), br#"["bar""#);
list.entry().value(Null);
})
});
assert_eq!(res, r#"["bar",null]"#);
}
#[test]
fn rollback_object() {
let res = crate::value_to_string!(|obj| {
crate::value_as_object!(|obj| {
let entry = obj.key("1");
entry.value(1_i32);
let entry = obj.key("2");
let entry = {
let mut nested_obj = entry.object();
nested_obj.entry("foo", "bar");
nested_obj.rollback()
};
entry.value(2_i32);
})
});
assert_eq!(res, r#"{"1":1,"2":2}"#);
}
#[test]
fn rollback_list() {
let res = crate::value_to_string!(|list| {
crate::value_as_list!(|list| {
let entry = list.entry();
entry.value(1_i32);
let entry = list.entry();
let entry = {
let mut nested_list = entry.list();
nested_list.push("foo");
nested_list.rollback()
};
entry.value(2_i32);
})
});
assert_eq!(res, r#"[1,2]"#);
}
#[test]
fn string_escaping() {
let mut buf = vec![];
let mut object = ValueSer::new(&mut buf).object();
let key = "hello";
let value = "\n world";
object.entry(format_args!("{key:?}"), value);
object.finish();
assert_eq!(buf, br#"{"\"hello\"":"\n world"}"#);
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/json/src/value.rs | libs/proxy/json/src/value.rs | use core::fmt;
use std::collections::{BTreeMap, HashMap};
use crate::str::{format_escaped_fmt, format_escaped_str};
use crate::{KeyEncoder, ObjectSer, ValueSer, value_as_list, value_as_object};
/// Write a value to the underlying json representation.
pub trait ValueEncoder {
fn encode(self, v: ValueSer<'_>);
}
pub(crate) fn write_int(x: impl itoa::Integer, b: &mut Vec<u8>) {
b.extend_from_slice(itoa::Buffer::new().format(x).as_bytes());
}
pub(crate) fn write_float(x: impl ryu::Float, b: &mut Vec<u8>) {
b.extend_from_slice(ryu::Buffer::new().format(x).as_bytes());
}
impl<T: Copy + ValueEncoder> ValueEncoder for &T {
#[inline]
fn encode(self, v: ValueSer<'_>) {
T::encode(*self, v);
}
}
impl ValueEncoder for &str {
#[inline]
fn encode(self, v: ValueSer<'_>) {
format_escaped_str(v.buf, self);
v.finish();
}
}
impl ValueEncoder for fmt::Arguments<'_> {
#[inline]
fn encode(self, v: ValueSer<'_>) {
if let Some(s) = self.as_str() {
format_escaped_str(v.buf, s);
} else {
format_escaped_fmt(v.buf, self);
}
v.finish();
}
}
macro_rules! int {
[$($t:ty),*] => {
$(
impl ValueEncoder for $t {
#[inline]
fn encode(self, v: ValueSer<'_>) {
write_int(self, v.buf);
v.finish();
}
}
)*
};
}
int![u8, u16, u32, u64, usize, u128];
int![i8, i16, i32, i64, isize, i128];
macro_rules! float {
[$($t:ty),*] => {
$(
impl ValueEncoder for $t {
#[inline]
fn encode(self, v: ValueSer<'_>) {
write_float(self, v.buf);
v.finish();
}
}
)*
};
}
float![f32, f64];
impl ValueEncoder for bool {
#[inline]
fn encode(self, v: ValueSer<'_>) {
v.write_raw_json(if self { b"true" } else { b"false" });
}
}
impl<T: ValueEncoder> ValueEncoder for Option<T> {
#[inline]
fn encode(self, v: ValueSer<'_>) {
match self {
Some(value) => value.encode(v),
None => Null.encode(v),
}
}
}
impl KeyEncoder for &str {
#[inline]
fn write_key<'a>(self, obj: &'a mut ObjectSer) -> ValueSer<'a> {
let obj = &mut *obj;
obj.entry_inner(|b| format_escaped_str(b, self))
}
}
impl KeyEncoder for fmt::Arguments<'_> {
#[inline]
fn write_key<'a>(self, obj: &'a mut ObjectSer) -> ValueSer<'a> {
if let Some(key) = self.as_str() {
obj.entry_inner(|b| format_escaped_str(b, key))
} else {
obj.entry_inner(|b| format_escaped_fmt(b, self))
}
}
}
/// Represents the JSON null value.
pub struct Null;
impl ValueEncoder for Null {
#[inline]
fn encode(self, v: ValueSer<'_>) {
v.write_raw_json(b"null");
}
}
impl<T: ValueEncoder> ValueEncoder for Vec<T> {
#[inline]
fn encode(self, v: ValueSer<'_>) {
value_as_list!(|v| {
for t in self {
v.entry().value(t);
}
});
}
}
impl<T: Copy + ValueEncoder> ValueEncoder for &[T] {
#[inline]
fn encode(self, v: ValueSer<'_>) {
value_as_list!(|v| {
for t in self {
v.entry().value(t);
}
});
}
}
impl<K: KeyEncoder, V: ValueEncoder, S> ValueEncoder for HashMap<K, V, S> {
#[inline]
fn encode(self, o: ValueSer<'_>) {
value_as_object!(|o| {
for (k, v) in self {
o.entry(k, v);
}
});
}
}
impl<K: KeyEncoder, V: ValueEncoder> ValueEncoder for BTreeMap<K, V> {
#[inline]
fn encode(self, o: ValueSer<'_>) {
value_as_object!(|o| {
for (k, v) in self {
o.entry(k, v);
}
});
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/json/src/macros.rs | libs/proxy/json/src/macros.rs | //! # Examples
//!
//! ```
//! use futures::{StreamExt, TryStream, TryStreamExt};
//!
//! async fn stream_to_json_list<S, T, E>(mut s: S) -> Result<String, E>
//! where
//! S: TryStream<Ok = T, Error = E> + Unpin,
//! T: json::ValueEncoder
//! {
//! Ok(json::value_to_string!(|val| json::value_as_list!(|val| {
//! // note how we can use `.await` and `?` in here.
//! while let Some(value) = s.try_next().await? {
//! val.push(value);
//! }
//! })))
//! }
//!
//! let stream = futures::stream::iter([1, 2, 3]).map(Ok::<i32, ()>);
//! let json_string = futures::executor::block_on(stream_to_json_list(stream)).unwrap();
//! assert_eq!(json_string, "[1,2,3]");
//! ```
/// A helper to create a new JSON vec.
///
/// Implemented as a macro to preserve all control flow.
#[macro_export]
macro_rules! value_to_vec {
(|$val:ident| $body:expr) => {{
let mut buf = vec![];
let $val = $crate::ValueSer::new(&mut buf);
let _: () = $body;
buf
}};
}
/// A helper to create a new JSON string.
///
/// Implemented as a macro to preserve all control flow.
#[macro_export]
macro_rules! value_to_string {
(|$val:ident| $body:expr) => {{
::std::string::String::from_utf8($crate::value_to_vec!(|$val| $body))
.expect("json should be valid utf8")
}};
}
/// A helper that ensures the [`ObjectSer::finish`](crate::ObjectSer::finish) method is called on completion.
///
/// Consumes `$val` and assigns it as an [`ObjectSer`](crate::ObjectSer) serializer.
/// The serializer is only 'finished' if the body completes.
/// The serializer is rolled back if `break`/`return` escapes the body.
///
/// Implemented as a macro to preserve all control flow.
#[macro_export]
macro_rules! value_as_object {
(|$val:ident| $body:expr) => {{
let mut obj = $crate::ObjectSer::new($val);
let $val = &mut obj;
let res = $body;
obj.finish();
res
}};
}
/// A helper that ensures the [`ListSer::finish`](crate::ListSer::finish) method is called on completion.
///
/// Consumes `$val` and assigns it as an [`ListSer`](crate::ListSer) serializer.
/// The serializer is only 'finished' if the body completes.
/// The serializer is rolled back if `break`/`return` escapes the body.
///
/// Implemented as a macro to preserve all control flow.
#[macro_export]
macro_rules! value_as_list {
(|$val:ident| $body:expr) => {{
let mut list = $crate::ListSer::new($val);
let $val = &mut list;
let res = $body;
list.finish();
res
}};
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/json/src/str.rs | libs/proxy/json/src/str.rs | //! Helpers for serializing escaped strings.
//!
//! ## License
//!
//! <https://github.com/serde-rs/json/blob/c1826ebcccb1a520389c6b78ad3da15db279220d/src/ser.rs#L1514-L1552>
//! <https://github.com/serde-rs/json/blob/c1826ebcccb1a520389c6b78ad3da15db279220d/src/ser.rs#L2081-L2157>
//! Licensed by David Tolnay under MIT or Apache-2.0.
//!
//! With modifications by Conrad Ludgate on behalf of Databricks.
use std::fmt::{self, Write};
/// Represents a character escape code in a type-safe manner.
pub enum CharEscape {
/// An escaped quote `"`
Quote,
/// An escaped reverse solidus `\`
ReverseSolidus,
// /// An escaped solidus `/`
// Solidus,
/// An escaped backspace character (usually escaped as `\b`)
Backspace,
/// An escaped form feed character (usually escaped as `\f`)
FormFeed,
/// An escaped line feed character (usually escaped as `\n`)
LineFeed,
/// An escaped carriage return character (usually escaped as `\r`)
CarriageReturn,
/// An escaped tab character (usually escaped as `\t`)
Tab,
/// An escaped ASCII plane control character (usually escaped as
/// `\u00XX` where `XX` are two hex characters)
AsciiControl(u8),
}
impl CharEscape {
#[inline]
fn from_escape_table(escape: u8, byte: u8) -> CharEscape {
match escape {
self::BB => CharEscape::Backspace,
self::TT => CharEscape::Tab,
self::NN => CharEscape::LineFeed,
self::FF => CharEscape::FormFeed,
self::RR => CharEscape::CarriageReturn,
self::QU => CharEscape::Quote,
self::BS => CharEscape::ReverseSolidus,
self::UU => CharEscape::AsciiControl(byte),
_ => unreachable!(),
}
}
}
pub(crate) fn format_escaped_str(writer: &mut Vec<u8>, value: &str) {
writer.reserve(2 + value.len());
writer.push(b'"');
let rest = format_escaped_str_contents(writer, value);
writer.extend_from_slice(rest);
writer.push(b'"');
}
pub(crate) fn format_escaped_fmt(writer: &mut Vec<u8>, args: fmt::Arguments) {
writer.push(b'"');
Collect { buf: writer }
.write_fmt(args)
.expect("formatting should not error");
writer.push(b'"');
}
struct Collect<'buf> {
buf: &'buf mut Vec<u8>,
}
impl fmt::Write for Collect<'_> {
fn write_str(&mut self, s: &str) -> fmt::Result {
let last = format_escaped_str_contents(self.buf, s);
self.buf.extend(last);
Ok(())
}
}
// writes any escape sequences, and returns the suffix still needed to be written.
fn format_escaped_str_contents<'a>(writer: &mut Vec<u8>, value: &'a str) -> &'a [u8] {
let bytes = value.as_bytes();
let mut start = 0;
for (i, &byte) in bytes.iter().enumerate() {
let escape = ESCAPE[byte as usize];
if escape == 0 {
continue;
}
writer.extend_from_slice(&bytes[start..i]);
let char_escape = CharEscape::from_escape_table(escape, byte);
write_char_escape(writer, char_escape);
start = i + 1;
}
&bytes[start..]
}
const BB: u8 = b'b'; // \x08
const TT: u8 = b't'; // \x09
const NN: u8 = b'n'; // \x0A
const FF: u8 = b'f'; // \x0C
const RR: u8 = b'r'; // \x0D
const QU: u8 = b'"'; // \x22
const BS: u8 = b'\\'; // \x5C
const UU: u8 = b'u'; // \x00...\x1F except the ones above
const __: u8 = 0;
// Lookup table of escape sequences. A value of b'x' at index i means that byte
// i is escaped as "\x" in JSON. A value of 0 means that byte i is not escaped.
static ESCAPE: [u8; 256] = [
// 1 2 3 4 5 6 7 8 9 A B C D E F
UU, UU, UU, UU, UU, UU, UU, UU, BB, TT, NN, UU, FF, RR, UU, UU, // 0
UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, // 1
__, __, QU, __, __, __, __, __, __, __, __, __, __, __, __, __, // 2
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 3
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 4
__, __, __, __, __, __, __, __, __, __, __, __, BS, __, __, __, // 5
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 6
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 7
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E
__, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F
];
fn write_char_escape(writer: &mut Vec<u8>, char_escape: CharEscape) {
let s = match char_escape {
CharEscape::Quote => b"\\\"",
CharEscape::ReverseSolidus => b"\\\\",
// CharEscape::Solidus => b"\\/",
CharEscape::Backspace => b"\\b",
CharEscape::FormFeed => b"\\f",
CharEscape::LineFeed => b"\\n",
CharEscape::CarriageReturn => b"\\r",
CharEscape::Tab => b"\\t",
CharEscape::AsciiControl(byte) => {
static HEX_DIGITS: [u8; 16] = *b"0123456789abcdef";
let bytes = &[
b'\\',
b'u',
b'0',
b'0',
HEX_DIGITS[(byte >> 4) as usize],
HEX_DIGITS[(byte & 0xF) as usize],
];
return writer.extend_from_slice(bytes);
}
};
writer.extend_from_slice(s);
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/proxy/subzero_core/src/lib.rs | libs/proxy/subzero_core/src/lib.rs | // This is a stub for the subzero-core crate.
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/consumption_metrics/src/lib.rs | libs/consumption_metrics/src/lib.rs | //! Shared code for consumption metics collection
#![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)]
use chrono::{DateTime, Utc};
use rand::Rng;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
#[serde(tag = "type")]
pub enum EventType {
#[serde(rename = "absolute")]
Absolute { time: DateTime<Utc> },
#[serde(rename = "incremental")]
Incremental {
start_time: DateTime<Utc>,
stop_time: DateTime<Utc>,
},
}
impl EventType {
pub fn absolute_time(&self) -> Option<&DateTime<Utc>> {
use EventType::*;
match self {
Absolute { time } => Some(time),
_ => None,
}
}
pub fn incremental_timerange(&self) -> Option<std::ops::Range<&DateTime<Utc>>> {
// these can most likely be thought of as Range or RangeFull, at least pageserver creates
// incremental ranges where the stop and next start are equal.
use EventType::*;
match self {
Incremental {
start_time,
stop_time,
} => Some(start_time..stop_time),
_ => None,
}
}
pub fn is_incremental(&self) -> bool {
matches!(self, EventType::Incremental { .. })
}
/// Returns the absolute time, or for incremental ranges, the stop time.
pub fn recorded_at(&self) -> &DateTime<Utc> {
use EventType::*;
match self {
Absolute { time } => time,
Incremental { stop_time, .. } => stop_time,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct Event<Extra, Metric> {
#[serde(flatten)]
#[serde(rename = "type")]
pub kind: EventType,
pub metric: Metric,
pub idempotency_key: String,
pub value: u64,
#[serde(flatten)]
pub extra: Extra,
}
pub fn idempotency_key(node_id: &str) -> String {
IdempotencyKey::generate(node_id).to_string()
}
/// Downstream users will use these to detect upload retries.
pub struct IdempotencyKey<'a> {
now: chrono::DateTime<Utc>,
node_id: &'a str,
nonce: u16,
}
impl std::fmt::Display for IdempotencyKey<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}-{}-{:04}", self.now, self.node_id, self.nonce)
}
}
impl<'a> IdempotencyKey<'a> {
pub fn generate(node_id: &'a str) -> Self {
IdempotencyKey {
now: Utc::now(),
node_id,
nonce: rand::rng().random_range(0..=9999),
}
}
pub fn for_tests(now: DateTime<Utc>, node_id: &'a str, nonce: u16) -> Self {
IdempotencyKey {
now,
node_id,
nonce,
}
}
}
/// Split into chunks of 1000 metrics to avoid exceeding the max request size.
pub const CHUNK_SIZE: usize = 1000;
// Just a wrapper around a slice of events
// to serialize it as `{"events" : [ ] }
#[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)]
pub struct EventChunk<'a, T: Clone + PartialEq> {
pub events: std::borrow::Cow<'a, [T]>,
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/posthog_client_lite/src/lib.rs | libs/posthog_client_lite/src/lib.rs | //! A lite version of the PostHog client that only supports local evaluation of feature flags.
mod background_loop;
pub use background_loop::FeatureResolverBackgroundLoop;
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use serde_json::json;
use sha2::Digest;
#[derive(Debug, thiserror::Error)]
pub enum PostHogEvaluationError {
/// The feature flag is not available, for example, because the local evaluation data is not populated yet.
#[error("Feature flag not available: {0}")]
NotAvailable(String),
#[error("No condition group is matched")]
NoConditionGroupMatched,
/// Real errors, e.g., the rollout percentage does not add up to 100.
#[error("Failed to evaluate feature flag: {0}")]
Internal(String),
}
impl PostHogEvaluationError {
pub fn as_variant_str(&self) -> &'static str {
match self {
PostHogEvaluationError::NotAvailable(_) => "not_available",
PostHogEvaluationError::NoConditionGroupMatched => "no_condition_group_matched",
PostHogEvaluationError::Internal(_) => "internal",
}
}
}
#[derive(Deserialize)]
pub struct LocalEvaluationResponse {
pub flags: Vec<LocalEvaluationFlag>,
}
#[derive(Deserialize)]
pub struct LocalEvaluationFlag {
#[allow(dead_code)]
id: u64,
team_id: u64,
key: String,
filters: LocalEvaluationFlagFilters,
active: bool,
}
#[derive(Deserialize)]
pub struct LocalEvaluationFlagFilters {
groups: Vec<LocalEvaluationFlagFilterGroup>,
multivariate: Option<LocalEvaluationFlagMultivariate>,
}
#[derive(Deserialize)]
pub struct LocalEvaluationFlagFilterGroup {
variant: Option<String>,
properties: Option<Vec<LocalEvaluationFlagFilterProperty>>,
rollout_percentage: i64,
}
#[derive(Deserialize)]
pub struct LocalEvaluationFlagFilterProperty {
key: String,
value: PostHogFlagFilterPropertyValue,
operator: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(untagged)]
pub enum PostHogFlagFilterPropertyValue {
String(String),
Number(f64),
Boolean(bool),
List(Vec<String>),
}
#[derive(Deserialize)]
pub struct LocalEvaluationFlagMultivariate {
variants: Vec<LocalEvaluationFlagMultivariateVariant>,
}
#[derive(Deserialize)]
pub struct LocalEvaluationFlagMultivariateVariant {
key: String,
rollout_percentage: i64,
}
pub struct FeatureStore {
flags: HashMap<String, LocalEvaluationFlag>,
}
impl Default for FeatureStore {
fn default() -> Self {
Self::new()
}
}
enum GroupEvaluationResult {
MatchedAndOverride(String),
MatchedAndEvaluate,
Unmatched,
}
impl FeatureStore {
pub fn new() -> Self {
Self {
flags: HashMap::new(),
}
}
pub fn new_with_flags(
flags: Vec<LocalEvaluationFlag>,
project_id: Option<u64>,
) -> Result<Self, &'static str> {
let mut store = Self::new();
store.set_flags(flags, project_id)?;
Ok(store)
}
pub fn set_flags(
&mut self,
flags: Vec<LocalEvaluationFlag>,
project_id: Option<u64>,
) -> Result<(), &'static str> {
self.flags.clear();
for flag in flags {
if let Some(project_id) = project_id {
if flag.team_id != project_id {
return Err(
"Retrieved a spec with different project id, wrong config? Discarding the feature flags.",
);
}
}
self.flags.insert(flag.key.clone(), flag);
}
Ok(())
}
/// Generate a consistent hash for a user ID (e.g., tenant ID).
///
/// The implementation is different from PostHog SDK. In PostHog SDK, it is sha1 of `user_id.distinct_id.salt`.
/// However, as we do not upload all of our tenant IDs to PostHog, we do not have the PostHog distinct_id for a
/// tenant. Therefore, the way we compute it is sha256 of `user_id.feature_id.salt`.
fn consistent_hash(user_id: &str, flag_key: &str, salt: &str) -> f64 {
let mut hasher = sha2::Sha256::new();
hasher.update(user_id);
hasher.update(".");
hasher.update(flag_key);
hasher.update(".");
hasher.update(salt);
let hash = hasher.finalize();
let hash_int = u64::from_le_bytes(hash[..8].try_into().unwrap());
hash_int as f64 / u64::MAX as f64
}
/// Evaluate a condition. Returns an error if the condition cannot be evaluated due to parsing error or missing
/// property.
fn evaluate_condition(
&self,
operator: &str,
provided: &PostHogFlagFilterPropertyValue,
requested: &PostHogFlagFilterPropertyValue,
) -> Result<bool, PostHogEvaluationError> {
match operator {
"exact" => {
let PostHogFlagFilterPropertyValue::String(provided) = provided else {
// Left should be a string
return Err(PostHogEvaluationError::Internal(format!(
"The left side of the condition is not a string: {provided:?}"
)));
};
let PostHogFlagFilterPropertyValue::List(requested) = requested else {
// Right should be a list of string
return Err(PostHogEvaluationError::Internal(format!(
"The right side of the condition is not a list: {requested:?}"
)));
};
Ok(requested.contains(provided))
}
"lt" | "gt" => {
let PostHogFlagFilterPropertyValue::String(requested) = requested else {
// Right should be a string
return Err(PostHogEvaluationError::Internal(format!(
"The right side of the condition is not a string: {requested:?}"
)));
};
let Ok(requested) = requested.parse::<f64>() else {
return Err(PostHogEvaluationError::Internal(format!(
"Can not parse the right side of the condition as a number: {requested:?}"
)));
};
// Left can either be a number or a string
let provided = match provided {
PostHogFlagFilterPropertyValue::Number(provided) => *provided,
PostHogFlagFilterPropertyValue::String(provided) => {
let Ok(provided) = provided.parse::<f64>() else {
return Err(PostHogEvaluationError::Internal(format!(
"Can not parse the left side of the condition as a number: {provided:?}"
)));
};
provided
}
_ => {
return Err(PostHogEvaluationError::Internal(format!(
"The left side of the condition is not a number or a string: {provided:?}"
)));
}
};
match operator {
"lt" => Ok(provided < requested),
"gt" => Ok(provided > requested),
op => Err(PostHogEvaluationError::Internal(format!(
"Unsupported operator: {op}"
))),
}
}
_ => Err(PostHogEvaluationError::Internal(format!(
"Unsupported operator: {operator}"
))),
}
}
/// Evaluate a percentage.
fn evaluate_percentage(&self, mapped_user_id: f64, percentage: i64) -> bool {
mapped_user_id <= percentage as f64 / 100.0
}
/// Evaluate a filter group for a feature flag. Returns an error if there are errors during the evaluation.
///
/// Return values:
/// Ok(GroupEvaluationResult::MatchedAndOverride(variant)): matched and evaluated to this value
/// Ok(GroupEvaluationResult::MatchedAndEvaluate): condition matched but no variant override, use the global rollout percentage
/// Ok(GroupEvaluationResult::Unmatched): condition unmatched
fn evaluate_group(
&self,
group: &LocalEvaluationFlagFilterGroup,
hash_on_group_rollout_percentage: f64,
provided_properties: &HashMap<String, PostHogFlagFilterPropertyValue>,
) -> Result<GroupEvaluationResult, PostHogEvaluationError> {
if let Some(ref properties) = group.properties {
for property in properties {
if let Some(value) = provided_properties.get(&property.key) {
// The user provided the property value
if !self.evaluate_condition(
property.operator.as_ref(),
value,
&property.value,
)? {
return Ok(GroupEvaluationResult::Unmatched);
}
} else {
// We cannot evaluate, the property is not available
return Err(PostHogEvaluationError::NotAvailable(format!(
"The required property in the condition is not available: {}",
property.key
)));
}
}
}
// The group has no condition matchers or we matched the properties
if self.evaluate_percentage(hash_on_group_rollout_percentage, group.rollout_percentage) {
if let Some(ref variant_override) = group.variant {
Ok(GroupEvaluationResult::MatchedAndOverride(
variant_override.clone(),
))
} else {
Ok(GroupEvaluationResult::MatchedAndEvaluate)
}
} else {
Ok(GroupEvaluationResult::Unmatched)
}
}
/// Evaluate a multivariate feature flag. Returns an error if the flag is not available or if there are errors
/// during the evaluation.
///
/// The parsing logic is as follows:
///
/// * Match each filter group.
/// - If a group is matched, it will first determine whether the user is in the range of the group's rollout
/// percentage. We will generate a consistent hash for the user ID on the group rollout percentage. This hash
/// is shared across all groups.
/// - If the hash falls within the group's rollout percentage, return the variant if it's overridden, or
/// - Evaluate the variant using the global config and the global rollout percentage.
/// * Otherwise, continue with the next group until all groups are evaluated and no group is within the
/// rollout percentage.
/// * If there are no matching groups, return an error.
///
/// Example: we have a multivariate flag with 3 groups of the configured global rollout percentage: A (10%), B (20%), C (70%).
/// There is a single group with a condition that has a rollout percentage of 10% and it does not have a variant override.
/// Then, we will have 1% of the users evaluated to A, 2% to B, and 7% to C.
///
/// Error handling: the caller should inspect the error and decide the behavior when a feature flag
/// cannot be evaluated (i.e., default to false if it cannot be resolved). The error should *not* be
/// propagated beyond where the feature flag gets resolved.
pub fn evaluate_multivariate(
&self,
flag_key: &str,
user_id: &str,
properties: &HashMap<String, PostHogFlagFilterPropertyValue>,
) -> Result<String, PostHogEvaluationError> {
let hash_on_global_rollout_percentage =
Self::consistent_hash(user_id, flag_key, "multivariate");
let hash_on_group_rollout_percentage =
Self::consistent_hash(user_id, flag_key, "within_group");
self.evaluate_multivariate_inner(
flag_key,
hash_on_global_rollout_percentage,
hash_on_group_rollout_percentage,
properties,
)
}
/// Evaluate a boolean feature flag. Returns an error if the flag is not available or if there are errors
/// during the evaluation.
///
/// The parsing logic is as follows:
///
/// * Generate a consistent hash for the tenant-feature.
/// * Match each filter group.
/// - If a group is matched, it will first determine whether the user is in the range of the rollout
/// percentage.
/// - If the hash falls within the group's rollout percentage, return true.
/// * Otherwise, continue with the next group until all groups are evaluated and no group is within the
/// rollout percentage.
/// * If there are no matching groups, return an error.
///
/// Returns `Ok(())` if the feature flag evaluates to true. In the future, it will return a payload.
///
/// Error handling: the caller should inspect the error and decide the behavior when a feature flag
/// cannot be evaluated (i.e., default to false if it cannot be resolved). The error should *not* be
/// propagated beyond where the feature flag gets resolved.
pub fn evaluate_boolean(
&self,
flag_key: &str,
user_id: &str,
properties: &HashMap<String, PostHogFlagFilterPropertyValue>,
) -> Result<(), PostHogEvaluationError> {
let hash_on_global_rollout_percentage = Self::consistent_hash(user_id, flag_key, "boolean");
self.evaluate_boolean_inner(flag_key, hash_on_global_rollout_percentage, properties)
}
/// Evaluate a multivariate feature flag. Note that we directly take the mapped user ID
/// (a consistent hash ranging from 0 to 1) so that it is easier to use it in the tests
/// and avoid duplicate computations.
///
/// Use a different consistent hash for evaluating the group rollout percentage.
/// The behavior: if the condition is set to rolling out to 10% of the users, and
/// we set the variant A to 20% in the global config, then 2% of the total users will
/// be evaluated to variant A.
///
/// Note that the hash to determine group rollout percentage is shared across all groups. So if we have two
/// exactly-the-same conditions with 10% and 20% rollout percentage respectively, a total of 20% of the users
/// will be evaluated (versus 30% if group evaluation is done independently).
pub(crate) fn evaluate_multivariate_inner(
&self,
flag_key: &str,
hash_on_global_rollout_percentage: f64,
hash_on_group_rollout_percentage: f64,
properties: &HashMap<String, PostHogFlagFilterPropertyValue>,
) -> Result<String, PostHogEvaluationError> {
if let Some(flag_config) = self.flags.get(flag_key) {
if !flag_config.active {
return Err(PostHogEvaluationError::NotAvailable(format!(
"The feature flag is not active: {flag_key}"
)));
}
let Some(ref multivariate) = flag_config.filters.multivariate else {
return Err(PostHogEvaluationError::Internal(format!(
"No multivariate available, should use evaluate_boolean?: {flag_key}"
)));
};
// TODO: sort the groups so that variant overrides always get evaluated first and it follows the PostHog
// Python SDK behavior; for now we do not configure conditions without variant overrides in Neon so it
// does not matter.
for group in &flag_config.filters.groups {
match self.evaluate_group(group, hash_on_group_rollout_percentage, properties)? {
GroupEvaluationResult::MatchedAndOverride(variant) => return Ok(variant),
GroupEvaluationResult::MatchedAndEvaluate => {
let mut percentage = 0;
for variant in &multivariate.variants {
percentage += variant.rollout_percentage;
if self
.evaluate_percentage(hash_on_global_rollout_percentage, percentage)
{
return Ok(variant.key.clone());
}
}
// This should not happen because the rollout percentage always adds up to 100, but just in case that PostHog
// returned invalid spec, we return an error.
return Err(PostHogEvaluationError::Internal(format!(
"Rollout percentage does not add up to 100: {flag_key}"
)));
}
GroupEvaluationResult::Unmatched => continue,
}
}
// If no group is matched, the feature is not available, and up to the caller to decide what to do.
Err(PostHogEvaluationError::NoConditionGroupMatched)
} else {
// The feature flag is not available yet
Err(PostHogEvaluationError::NotAvailable(format!(
"Not found in the local evaluation spec: {flag_key}"
)))
}
}
/// Evaluate a multivariate feature flag. Note that we directly take the mapped user ID
/// (a consistent hash ranging from 0 to 1) so that it is easier to use it in the tests
/// and avoid duplicate computations.
///
/// Use a different consistent hash for evaluating the group rollout percentage.
/// The behavior: if the condition is set to rolling out to 10% of the users, and
/// we set the variant A to 20% in the global config, then 2% of the total users will
/// be evaluated to variant A.
///
/// Note that the hash to determine group rollout percentage is shared across all groups. So if we have two
/// exactly-the-same conditions with 10% and 20% rollout percentage respectively, a total of 20% of the users
/// will be evaluated (versus 30% if group evaluation is done independently).
pub(crate) fn evaluate_boolean_inner(
&self,
flag_key: &str,
hash_on_global_rollout_percentage: f64,
properties: &HashMap<String, PostHogFlagFilterPropertyValue>,
) -> Result<(), PostHogEvaluationError> {
if let Some(flag_config) = self.flags.get(flag_key) {
if !flag_config.active {
return Err(PostHogEvaluationError::NotAvailable(format!(
"The feature flag is not active: {flag_key}"
)));
}
if flag_config.filters.multivariate.is_some() {
return Err(PostHogEvaluationError::Internal(format!(
"This looks like a multivariate flag, should use evaluate_multivariate?: {flag_key}"
)));
};
// TODO: sort the groups so that variant overrides always get evaluated first and it follows the PostHog
// Python SDK behavior; for now we do not configure conditions without variant overrides in Neon so it
// does not matter.
for group in &flag_config.filters.groups {
match self.evaluate_group(group, hash_on_global_rollout_percentage, properties)? {
GroupEvaluationResult::MatchedAndOverride(_) => {
return Err(PostHogEvaluationError::Internal(format!(
"Boolean flag cannot have overrides: {flag_key}"
)));
}
GroupEvaluationResult::MatchedAndEvaluate => {
return Ok(());
}
GroupEvaluationResult::Unmatched => continue,
}
}
// If no group is matched, the feature is not available, and up to the caller to decide what to do.
Err(PostHogEvaluationError::NoConditionGroupMatched)
} else {
// The feature flag is not available yet
Err(PostHogEvaluationError::NotAvailable(format!(
"Not found in the local evaluation spec: {flag_key}"
)))
}
}
/// Infer whether a feature flag is a boolean flag by checking if it has a multivariate filter.
pub fn is_feature_flag_boolean(&self, flag_key: &str) -> Result<bool, PostHogEvaluationError> {
if let Some(flag_config) = self.flags.get(flag_key) {
Ok(flag_config.filters.multivariate.is_none())
} else {
Err(PostHogEvaluationError::NotAvailable(format!(
"Not found in the local evaluation spec: {flag_key}"
)))
}
}
}
pub struct PostHogClientConfig {
/// The server API key.
pub server_api_key: String,
/// The client API key.
pub client_api_key: String,
/// The project ID.
pub project_id: String,
/// The private API URL.
pub private_api_url: String,
/// The public API URL.
pub public_api_url: String,
}
/// A lite PostHog client.
///
/// At the point of writing this code, PostHog does not have a functional Rust client with feature flag support.
/// This is a lite version that only supports local evaluation of feature flags and only supports those JSON specs
/// that will be used within Neon.
///
/// PostHog is designed as a browser-server system: the browser (client) side uses the client key and is exposed
/// to the end users; the server side uses a server key and is not exposed to the end users. The client and the
/// server has different API keys and provide a different set of APIs. In Neon, we only have the server (that is
/// pageserver), and it will use both the client API and the server API. So we need to store two API keys within
/// our PostHog client.
///
/// The server API is used to fetch the feature flag specs. The client API is used to capture events in case we
/// want to report the feature flag usage back to PostHog. The current plan is to use PostHog only as an UI to
/// configure feature flags so it is very likely that the client API will not be used.
pub struct PostHogClient {
/// The config.
config: PostHogClientConfig,
/// The HTTP client.
client: reqwest::Client,
}
#[derive(Serialize, Debug)]
pub struct CaptureEvent {
pub event: String,
pub distinct_id: String,
pub properties: serde_json::Value,
}
impl PostHogClient {
pub fn new(config: PostHogClientConfig) -> Self {
let client = reqwest::Client::new();
Self { config, client }
}
pub fn new_with_us_region(
server_api_key: String,
client_api_key: String,
project_id: String,
) -> Self {
Self::new(PostHogClientConfig {
server_api_key,
client_api_key,
project_id,
private_api_url: "https://us.posthog.com".to_string(),
public_api_url: "https://us.i.posthog.com".to_string(),
})
}
/// Check if the server API key is a feature flag secure API key. This key can only be
/// used to fetch the feature flag specs and can only be used on a undocumented API
/// endpoint.
fn is_feature_flag_secure_api_key(&self) -> bool {
self.config.server_api_key.starts_with("phs_")
}
/// Get the raw JSON spec, same as `get_feature_flags_local_evaluation` but without parsing.
pub async fn get_feature_flags_local_evaluation_raw(&self) -> anyhow::Result<String> {
// BASE_URL/api/projects/:project_id/feature_flags/local_evaluation
// with bearer token of self.server_api_key
// OR
// BASE_URL/api/feature_flag/local_evaluation/
// with bearer token of feature flag specific self.server_api_key
let url = if self.is_feature_flag_secure_api_key() {
// The new feature local evaluation secure API token
format!(
"{}/api/feature_flag/local_evaluation",
self.config.private_api_url
)
} else {
// The old personal API token
format!(
"{}/api/projects/{}/feature_flags/local_evaluation",
self.config.private_api_url, self.config.project_id
)
};
let response = self
.client
.get(url)
.bearer_auth(&self.config.server_api_key)
.send()
.await?;
let status = response.status();
let body = response.text().await?;
if !status.is_success() {
return Err(anyhow::anyhow!(
"Failed to get feature flags: {}, {}",
status,
body
));
}
Ok(body)
}
/// Fetch the feature flag specs from the server.
///
/// This is unfortunately an undocumented API at:
/// - <https://posthog.com/docs/api/feature-flags#get-api-projects-project_id-feature_flags-local_evaluation>
/// - <https://posthog.com/docs/feature-flags/local-evaluation>
///
/// The handling logic in [`FeatureStore`] mostly follows the Python API implementation.
/// See `_compute_flag_locally` in <https://github.com/PostHog/posthog-python/blob/master/posthog/client.py>
pub async fn get_feature_flags_local_evaluation(
&self,
) -> Result<LocalEvaluationResponse, anyhow::Error> {
let raw = self.get_feature_flags_local_evaluation_raw().await?;
Ok(serde_json::from_str(&raw)?)
}
/// Capture an event. This will only be used to report the feature flag usage back to PostHog, though
/// it also support a lot of other functionalities.
///
/// <https://posthog.com/docs/api/capture>
pub async fn capture_event(
&self,
event: &str,
distinct_id: &str,
properties: &serde_json::Value,
) -> anyhow::Result<()> {
// PUBLIC_URL/capture/
let url = format!("{}/capture/", self.config.public_api_url);
let response = self
.client
.post(url)
.body(serde_json::to_string(&json!({
"api_key": self.config.client_api_key,
"distinct_id": distinct_id,
"event": event,
"properties": properties,
}))?)
.send()
.await?;
let status = response.status();
let body = response.text().await?;
if !status.is_success() {
return Err(anyhow::anyhow!(
"Failed to capture events: {}, {}",
status,
body
));
}
Ok(())
}
pub async fn capture_event_batch(&self, events: &[CaptureEvent]) -> anyhow::Result<()> {
// PUBLIC_URL/batch/
let url = format!("{}/batch/", self.config.public_api_url);
let response = self
.client
.post(url)
.body(serde_json::to_string(&json!({
"api_key": self.config.client_api_key,
"batch": events,
}))?)
.send()
.await?;
let status = response.status();
let body = response.text().await?;
if !status.is_success() {
return Err(anyhow::anyhow!(
"Failed to capture events: {}, {}",
status,
body
));
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
fn data() -> &'static str {
r#"{
"flags": [
{
"id": 141807,
"team_id": 152860,
"name": "",
"key": "image-compaction-boundary",
"filters": {
"groups": [
{
"variant": null,
"properties": [
{
"key": "plan_type",
"type": "person",
"value": [
"free"
],
"operator": "exact"
}
],
"rollout_percentage": 40
},
{
"variant": null,
"properties": [],
"rollout_percentage": 10
}
],
"payloads": {},
"multivariate": null
},
"deleted": false,
"active": true,
"ensure_experience_continuity": false,
"has_encrypted_payloads": false,
"version": 1
},
{
"id": 135586,
"team_id": 152860,
"name": "",
"key": "boolean-flag",
"filters": {
"groups": [
{
"variant": null,
"properties": [
{
"key": "plan_type",
"type": "person",
"value": [
"free"
],
"operator": "exact"
}
],
"rollout_percentage": 47
}
],
"payloads": {},
"multivariate": null
},
"deleted": false,
"active": true,
"ensure_experience_continuity": false,
"has_encrypted_payloads": false,
"version": 1
},
{
"id": 132794,
"team_id": 152860,
"name": "",
"key": "gc-compaction",
"filters": {
"groups": [
{
"variant": "enabled-stage-2",
"properties": [
{
"key": "plan_type",
"type": "person",
"value": [
"free"
],
"operator": "exact"
},
{
"key": "pageserver_remote_size",
"type": "person",
"value": "10000000",
"operator": "lt"
}
],
"rollout_percentage": 50
},
{
"properties": [
{
"key": "plan_type",
"type": "person",
"value": [
"free"
],
"operator": "exact"
},
{
"key": "pageserver_remote_size",
"type": "person",
"value": "10000000",
"operator": "lt"
}
],
"rollout_percentage": 80
}
],
"payloads": {},
"multivariate": {
"variants": [
{
"key": "disabled",
"name": "",
"rollout_percentage": 90
},
{
"key": "enabled-stage-1",
"name": "",
"rollout_percentage": 10
},
{
"key": "enabled-stage-2",
"name": "",
"rollout_percentage": 0
},
{
"key": "enabled-stage-3",
"name": "",
"rollout_percentage": 0
},
{
"key": "enabled",
"name": "",
"rollout_percentage": 0
}
]
}
},
"deleted": false,
"active": true,
"ensure_experience_continuity": false,
"has_encrypted_payloads": false,
"version": 7
}
],
"group_type_mapping": {},
"cohorts": {}
}"#
}
#[test]
fn parse_local_evaluation() {
let data = data();
let _: LocalEvaluationResponse = serde_json::from_str(data).unwrap();
}
#[test]
fn evaluate_multivariate() {
let mut store = FeatureStore::new();
let response: LocalEvaluationResponse = serde_json::from_str(data()).unwrap();
store.set_flags(response.flags, None).unwrap();
// This lacks the required properties and cannot be evaluated.
let variant =
store.evaluate_multivariate_inner("gc-compaction", 1.00, 0.40, &HashMap::new());
assert!(matches!(
variant,
Err(PostHogEvaluationError::NotAvailable(_))
),);
let properties_unmatched = HashMap::from([
(
"plan_type".to_string(),
PostHogFlagFilterPropertyValue::String("paid".to_string()),
),
(
"pageserver_remote_size".to_string(),
PostHogFlagFilterPropertyValue::Number(1000.0),
),
]);
// This does not match any group so there will be an error.
let variant =
store.evaluate_multivariate_inner("gc-compaction", 1.00, 0.40, &properties_unmatched);
assert!(matches!(
variant,
Err(PostHogEvaluationError::NoConditionGroupMatched)
),);
let variant =
store.evaluate_multivariate_inner("gc-compaction", 0.80, 0.80, &properties_unmatched);
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/posthog_client_lite/src/background_loop.rs | libs/posthog_client_lite/src/background_loop.rs | //! A background loop that fetches feature flags from PostHog and updates the feature store.
use std::{
sync::Arc,
time::{Duration, SystemTime},
};
use arc_swap::ArcSwap;
use tokio_util::sync::CancellationToken;
use tracing::{Instrument, info_span};
use crate::{
CaptureEvent, FeatureStore, LocalEvaluationResponse, PostHogClient, PostHogClientConfig,
};
/// A background loop that fetches feature flags from PostHog and updates the feature store.
pub struct FeatureResolverBackgroundLoop {
posthog_client: PostHogClient,
feature_store: ArcSwap<(SystemTime, Arc<FeatureStore>)>,
cancel: CancellationToken,
}
impl FeatureResolverBackgroundLoop {
pub fn new(config: PostHogClientConfig, shutdown_pageserver: CancellationToken) -> Self {
Self {
posthog_client: PostHogClient::new(config),
feature_store: ArcSwap::new(Arc::new((
SystemTime::UNIX_EPOCH,
Arc::new(FeatureStore::new()),
))),
cancel: shutdown_pageserver,
}
}
/// Update the feature store with a new feature flag spec bypassing the normal refresh loop.
pub fn update(&self, spec: String) -> anyhow::Result<()> {
let resp: LocalEvaluationResponse = serde_json::from_str(&spec)?;
self.update_feature_store_nofail(resp, "http_propagate");
Ok(())
}
fn update_feature_store_nofail(&self, resp: LocalEvaluationResponse, source: &'static str) {
let project_id = self.posthog_client.config.project_id.parse::<u64>().ok();
match FeatureStore::new_with_flags(resp.flags, project_id) {
Ok(feature_store) => {
self.feature_store
.store(Arc::new((SystemTime::now(), Arc::new(feature_store))));
tracing::info!("Feature flag updated from {}", source);
}
Err(e) => {
tracing::warn!("Cannot process feature flag spec from {}: {}", source, e);
}
}
}
pub fn spawn(
self: Arc<Self>,
handle: &tokio::runtime::Handle,
refresh_period: Duration,
fake_tenants: Vec<CaptureEvent>,
) {
let this = self.clone();
let cancel = self.cancel.clone();
// Main loop of updating the feature flags.
handle.spawn(
async move {
tracing::info!(
"Starting PostHog feature resolver with refresh period: {:?}",
refresh_period
);
let mut ticker = tokio::time::interval(refresh_period);
ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
loop {
tokio::select! {
_ = ticker.tick() => {}
_ = cancel.cancelled() => break
}
{
let last_update = this.feature_store.load().0;
if let Ok(elapsed) = last_update.elapsed() {
if elapsed < refresh_period {
tracing::debug!(
"Skipping feature flag refresh because it's too soon"
);
continue;
}
}
}
let resp = match this
.posthog_client
.get_feature_flags_local_evaluation()
.await
{
Ok(resp) => resp,
Err(e) => {
tracing::warn!("Cannot get feature flags: {}", e);
continue;
}
};
this.update_feature_store_nofail(resp, "refresh_loop");
}
tracing::info!("PostHog feature resolver stopped");
}
.instrument(info_span!("posthog_feature_resolver")),
);
// Report fake tenants to PostHog so that we have the combination of all the properties in the UI.
// Do one report per pageserver restart.
let this = self.clone();
handle.spawn(
async move {
tracing::info!("Starting PostHog feature reporter");
for tenant in &fake_tenants {
tracing::info!("Reporting fake tenant: {:?}", tenant);
}
if let Err(e) = this.posthog_client.capture_event_batch(&fake_tenants).await {
tracing::warn!("Cannot report fake tenants: {}", e);
}
}
.instrument(info_span!("posthog_feature_reporter")),
);
}
pub fn feature_store(&self) -> Arc<FeatureStore> {
self.feature_store.load().1.clone()
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/vm_monitor/src/filecache.rs | libs/vm_monitor/src/filecache.rs | //! Logic for configuring and scaling the Postgres file cache.
use std::num::NonZeroU64;
use anyhow::{Context, anyhow};
use tokio_postgres::types::ToSql;
use tokio_postgres::{Client, NoTls, Row};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
use crate::MiB;
/// Manages Postgres' file cache by keeping a connection open.
#[derive(Debug)]
pub struct FileCacheState {
client: Client,
conn_str: String,
pub(crate) config: FileCacheConfig,
/// A token for cancelling spawned threads during shutdown.
token: CancellationToken,
}
#[derive(Debug)]
pub struct FileCacheConfig {
/// The size of the file cache, in terms of the size of the resource it consumes
/// (currently: only memory)
///
/// For example, setting `resource_multipler = 0.75` gives the cache a target size of 75% of total
/// resources.
///
/// This value must be strictly between 0 and 1.
resource_multiplier: f64,
/// The required minimum amount of memory, in bytes, that must remain available
/// after subtracting the file cache.
///
/// This value must be non-zero.
min_remaining_after_cache: NonZeroU64,
/// Controls the rate of increase in the file cache's size as it grows from zero
/// (when total resources equals min_remaining_after_cache) to the desired size based on
/// `resource_multiplier`.
///
/// A `spread_factor` of zero means that all additional resources will go to the cache until it
/// reaches the desired size. Setting `spread_factor` to N roughly means "for every 1 byte added to
/// the cache's size, N bytes are reserved for the rest of the system, until the cache gets to
/// its desired size".
///
/// This value must be >= 0, and must retain an increase that is more than what would be given by
/// `resource_multiplier`. For example, setting `resource_multiplier` = 0.75 but `spread_factor` = 1
/// would be invalid, because `spread_factor` would induce only 50% usage - never reaching the 75%
/// as desired by `resource_multiplier`.
///
/// `spread_factor` is too large if `(spread_factor + 1) * resource_multiplier >= 1`.
spread_factor: f64,
}
impl Default for FileCacheConfig {
fn default() -> Self {
Self {
resource_multiplier: 0.75,
// 256 MiB - lower than when in memory because overcommitting is safe; if we don't have
// memory, the kernel will just evict from its page cache, rather than e.g. killing
// everything.
min_remaining_after_cache: NonZeroU64::new(256 * MiB).unwrap(),
spread_factor: 0.1,
}
}
}
impl FileCacheConfig {
/// Make sure fields of the config are consistent.
pub fn validate(&self) -> anyhow::Result<()> {
// Single field validity
anyhow::ensure!(
0.0 < self.resource_multiplier && self.resource_multiplier < 1.0,
"resource_multiplier must be between 0.0 and 1.0 exclusive, got {}",
self.resource_multiplier
);
anyhow::ensure!(
self.spread_factor >= 0.0,
"spread_factor must be >= 0, got {}",
self.spread_factor
);
// Check that `resource_multiplier` and `spread_factor` are valid w.r.t. each other.
//
// As shown in `calculate_cache_size`, we have two lines resulting from `resource_multiplier` and
// `spread_factor`, respectively. They are:
//
// `total` `min_remaining_after_cache`
// size = ————————————————————— - —————————————————————————————
// `spread_factor` + 1 `spread_factor` + 1
//
// and
//
// size = `resource_multiplier` × total
//
// .. where `total` is the total resources. These are isomorphic to the typical 'y = mx + b'
// form, with y = "size" and x = "total".
//
// These lines intersect at:
//
// `min_remaining_after_cache`
// ———————————————————————————————————————————————————
// 1 - `resource_multiplier` × (`spread_factor` + 1)
//
// We want to ensure that this value (a) exists, and (b) is >= `min_remaining_after_cache`. This is
// guaranteed when '`resource_multiplier` × (`spread_factor` + 1)' is less than 1.
// (We also need it to be >= 0, but that's already guaranteed.)
let intersect_factor = self.resource_multiplier * (self.spread_factor + 1.0);
anyhow::ensure!(
intersect_factor < 1.0,
"incompatible resource_multipler and spread_factor"
);
Ok(())
}
/// Calculate the desired size of the cache, given the total memory
pub fn calculate_cache_size(&self, total: u64) -> u64 {
// *Note*: all units are in bytes, until the very last line.
let available = total.saturating_sub(self.min_remaining_after_cache.get());
if available == 0 {
return 0;
}
// Conversions to ensure we don't overflow from floating-point ops
let size_from_spread =
i64::max(0, (available as f64 / (1.0 + self.spread_factor)) as i64) as u64;
let size_from_normal = (total as f64 * self.resource_multiplier) as u64;
let byte_size = u64::min(size_from_spread, size_from_normal);
// The file cache operates in units of mebibytes, so the sizes we produce should
// be rounded to a mebibyte. We round down to be conservative.
byte_size / MiB * MiB
}
}
impl FileCacheState {
/// Connect to the file cache.
#[tracing::instrument(skip_all, fields(%conn_str, ?config))]
pub async fn new(
conn_str: &str,
config: FileCacheConfig,
token: CancellationToken,
) -> anyhow::Result<Self> {
config.validate().context("file cache config is invalid")?;
info!(conn_str, "connecting to Postgres file cache");
let client = FileCacheState::connect(conn_str, token.clone())
.await
.context("failed to connect to postgres file cache")?;
let conn_str = conn_str.to_string();
Ok(Self {
client,
config,
conn_str,
token,
})
}
/// Connect to Postgres.
///
/// Aborts the spawned thread if the kill signal is received. This is not
/// a method as it is called in [`FileCacheState::new`].
#[tracing::instrument(skip_all, fields(%conn_str))]
async fn connect(conn_str: &str, token: CancellationToken) -> anyhow::Result<Client> {
let (client, conn) = tokio_postgres::connect(conn_str, NoTls)
.await
.context("failed to connect to pg client")?;
// The connection object performs the actual communication with the database,
// so spawn it off to run on its own. See tokio-postgres docs.
crate::spawn_with_cancel(
token,
|res| {
if let Err(e) = res {
error!(error = format_args!("{e:#}"), "postgres error");
}
},
conn,
);
Ok(client)
}
/// Execute a query with a retry if necessary.
///
/// If the initial query fails, we restart the database connection and attempt
/// if again.
#[tracing::instrument(skip_all, fields(%statement))]
pub async fn query_with_retry(
&mut self,
statement: &str,
params: &[&(dyn ToSql + Sync)],
) -> anyhow::Result<Vec<Row>> {
match self
.client
.query(statement, params)
.await
.context("failed to execute query")
{
Ok(rows) => Ok(rows),
Err(e) => {
error!(error = format_args!("{e:#}"), "postgres error -> retrying");
let client = FileCacheState::connect(&self.conn_str, self.token.clone())
.await
.context("failed to connect to postgres file cache")?;
info!("successfully reconnected to postgres client");
// Replace the old client and attempt the query with the new one
self.client = client;
self.client
.query(statement, params)
.await
.context("failed to execute query a second time")
}
}
}
/// Get the current size of the file cache.
#[tracing::instrument(skip_all)]
pub async fn get_file_cache_size(&mut self) -> anyhow::Result<u64> {
self.query_with_retry(
// The file cache GUC variable is in MiB, but the conversion with
// pg_size_bytes means that the end result we get is in bytes.
"SELECT pg_size_bytes(current_setting('neon.file_cache_size_limit'));",
&[],
)
.await
.context("failed to query pg for file cache size")?
.first()
.ok_or_else(|| anyhow!("file cache size query returned no rows"))?
// pg_size_bytes returns a bigint which is the same as an i64.
.try_get::<_, i64>(0)
// Since the size of the table is not negative, the cast is sound.
.map(|bytes| bytes as u64)
.context("failed to extract file cache size from query result")
}
/// Attempt to set the file cache size, returning the size it was actually
/// set to.
#[tracing::instrument(skip_all, fields(%num_bytes))]
pub async fn set_file_cache_size(&mut self, num_bytes: u64) -> anyhow::Result<u64> {
let max_bytes = self
// The file cache GUC variable is in MiB, but the conversion with pg_size_bytes
// means that the end result we get is in bytes.
.query_with_retry(
"SELECT pg_size_bytes(current_setting('neon.max_file_cache_size'));",
&[],
)
.await
.context("failed to query pg for max file cache size")?
.first()
.ok_or_else(|| anyhow!("max file cache size query returned no rows"))?
.try_get::<_, i64>(0)
.map(|bytes| bytes as u64)
.context("failed to extract max file cache size from query result")?;
let max_mb = max_bytes / MiB;
let num_mb = u64::min(num_bytes, max_bytes) / MiB;
let capped = if num_bytes > max_bytes {
" (capped by maximum size)"
} else {
""
};
info!(
size = num_mb,
max = max_mb,
"updating file cache size {capped}",
);
// note: even though the normal ways to get the cache size produce values with trailing "MB"
// (hence why we call pg_size_bytes in `get_file_cache_size`'s query), the format
// it expects to set the value is "integer number of MB" without trailing units.
// For some reason, this *really* wasn't working with normal arguments, so that's
// why we're constructing the query here.
self.client
.query(
&format!("ALTER SYSTEM SET neon.file_cache_size_limit = {num_mb};"),
&[],
)
.await
.context("failed to change file cache size limit")?;
// must use pg_reload_conf to have the settings change take effect
self.client
.execute("SELECT pg_reload_conf();", &[])
.await
.context("failed to reload config")?;
Ok(num_mb * MiB)
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/vm_monitor/src/lib.rs | libs/vm_monitor/src/lib.rs | #![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)]
#![cfg(target_os = "linux")]
use std::fmt::Debug;
use std::net::SocketAddr;
use std::time::Duration;
use anyhow::Context;
use axum::Router;
use axum::extract::ws::WebSocket;
use axum::extract::{State, WebSocketUpgrade};
use axum::response::Response;
use axum::routing::get;
use clap::Parser;
use futures::Future;
use runner::Runner;
use sysinfo::{RefreshKind, System, SystemExt};
use tokio::net::TcpListener;
use tokio::sync::broadcast;
use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
// Code that interfaces with agent
pub mod dispatcher;
pub mod protocol;
pub mod cgroup;
pub mod filecache;
pub mod runner;
/// The vm-monitor is an autoscaling component started by compute_ctl.
///
/// It carries out autoscaling decisions (upscaling/downscaling) and responds to
/// memory pressure by making requests to the autoscaler-agent.
#[derive(Debug, Parser)]
pub struct Args {
/// The name of the cgroup we should monitor for memory.high events. This
/// is the cgroup that postgres should be running in.
#[arg(short, long)]
pub cgroup: Option<String>,
/// The connection string for the Postgres file cache we should manage.
#[arg(short, long)]
pub pgconnstr: Option<String>,
/// The address we should listen on for connection requests. For the
/// agent, this is 0.0.0.0:10301. For the informant, this is 127.0.0.1:10369.
#[arg(short, long)]
pub addr: String,
}
impl Args {
pub fn addr(&self) -> &str {
&self.addr
}
}
/// The number of bytes in one mebibyte.
#[allow(non_upper_case_globals)]
const MiB: u64 = 1 << 20;
/// Convert a quantity in bytes to a quantity in mebibytes, generally for display
/// purposes. (Most calculations in this crate use bytes directly)
pub fn bytes_to_mebibytes(bytes: u64) -> f32 {
(bytes as f32) / (MiB as f32)
}
pub fn get_total_system_memory() -> u64 {
System::new_with_specifics(RefreshKind::new().with_memory()).total_memory()
}
/// Global app state for the Axum server
#[derive(Debug, Clone)]
pub struct ServerState {
/// Used to close old connections.
///
/// When a new connection is made, we send a message signalling to the old
/// connection to close.
pub sender: broadcast::Sender<()>,
/// Used to cancel all spawned threads in the monitor.
pub token: CancellationToken,
// The CLI args
pub args: &'static Args,
}
/// Spawn a thread that may get cancelled by the provided [`CancellationToken`].
///
/// This is mainly meant to be called with futures that will be pending for a very
/// long time, or are not mean to return. If it is not desirable for the future to
/// ever resolve, such as in the case of [`cgroup::CgroupWatcher::watch`], the error can
/// be logged with `f`.
pub fn spawn_with_cancel<T, F>(
token: CancellationToken,
f: F,
future: T,
) -> JoinHandle<Option<T::Output>>
where
T: Future + Send + 'static,
T::Output: Send + 'static,
F: FnOnce(&T::Output) + Send + 'static,
{
tokio::spawn(async move {
tokio::select! {
_ = token.cancelled() => {
info!("received global kill signal");
None
}
res = future => {
f(&res);
Some(res)
}
}
})
}
/// The entrypoint to the binary.
///
/// Set up tracing, parse arguments, and start an http server.
pub async fn start(args: &'static Args, token: CancellationToken) -> anyhow::Result<()> {
// This channel is used to close old connections. When a new connection is
// made, we send a message signalling to the old connection to close.
let (sender, _) = tokio::sync::broadcast::channel::<()>(1);
let app = Router::new()
// This route gets upgraded to a websocket connection. We only support
// one connection at a time, which we enforce by killing old connections
// when we receive a new one.
.route("/monitor", get(ws_handler))
.with_state(ServerState {
sender,
token,
args,
});
let addr_str = args.addr();
let addr: SocketAddr = addr_str.parse().expect("parsing address should not fail");
let listener = TcpListener::bind(&addr)
.await
.with_context(|| format!("failed to bind to {addr}"))?;
info!(addr_str, "server bound");
axum::serve(listener, app.into_make_service())
.await
.context("server exited")?;
Ok(())
}
/// Handles incoming websocket connections.
///
/// If we are already to connected to an agent, we kill that old connection
/// and accept the new one.
#[tracing::instrument(name = "/monitor", skip_all, fields(?args))]
pub async fn ws_handler(
ws: WebSocketUpgrade,
State(ServerState {
sender,
token,
args,
}): State<ServerState>,
) -> Response {
// Kill the old monitor
info!("closing old connection if there is one");
let _ = sender.send(());
// Start the new one. Wow, the cycle of death and rebirth
let closer = sender.subscribe();
ws.on_upgrade(|ws| start_monitor(ws, args, closer, token))
}
/// Starts the monitor. If startup fails or the monitor exits, an error will
/// be logged and our internal state will be reset to allow for new connections.
#[tracing::instrument(skip_all)]
async fn start_monitor(
ws: WebSocket,
args: &Args,
kill: broadcast::Receiver<()>,
token: CancellationToken,
) {
info!(
?args,
"accepted new websocket connection -> starting monitor"
);
let timeout = Duration::from_secs(4);
let monitor = tokio::time::timeout(
timeout,
Runner::new(Default::default(), args, ws, kill, token),
)
.await;
let mut monitor = match monitor {
Ok(Ok(monitor)) => monitor,
Ok(Err(e)) => {
error!(error = format_args!("{e:#}"), "failed to create monitor");
return;
}
Err(_) => {
error!(?timeout, "creating monitor timed out");
return;
}
};
info!("connected to agent");
match monitor.run().await {
Ok(()) => info!("monitor was killed due to new connection"),
Err(e) => error!(
error = format_args!("{e:#}"),
"monitor terminated unexpectedly"
),
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/vm_monitor/src/runner.rs | libs/vm_monitor/src/runner.rs | //! Exposes the `Runner`, which handles messages received from agent and
//! sends upscale requests.
//!
//! This is the "Monitor" part of the monitor binary and is the main entrypoint for
//! all functionality.
use std::fmt::Debug;
use std::time::{Duration, Instant};
use anyhow::{Context, bail};
use axum::extract::ws::{Message, WebSocket};
use futures::StreamExt;
use tokio::sync::{broadcast, watch};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
use crate::cgroup::{self, CgroupWatcher};
use crate::dispatcher::Dispatcher;
use crate::filecache::{FileCacheConfig, FileCacheState};
use crate::protocol::{InboundMsg, InboundMsgKind, OutboundMsg, OutboundMsgKind, Resources};
use crate::{Args, MiB, bytes_to_mebibytes, get_total_system_memory, spawn_with_cancel};
/// Central struct that interacts with agent, dispatcher, and cgroup to handle
/// signals from the agent.
#[derive(Debug)]
pub struct Runner {
config: Config,
filecache: Option<FileCacheState>,
cgroup: Option<CgroupState>,
dispatcher: Dispatcher,
/// We "mint" new message ids by incrementing this counter and taking the value.
///
/// **Note**: This counter is always odd, so that we avoid collisions between the IDs generated
/// by us vs the autoscaler-agent.
counter: usize,
last_upscale_request_at: Option<Instant>,
/// A signal to kill the main thread produced by `self.run()`. This is triggered
/// when the server receives a new connection. When the thread receives the
/// signal off this channel, it will gracefully shutdown.
kill: broadcast::Receiver<()>,
}
#[derive(Debug)]
struct CgroupState {
watcher: watch::Receiver<(Instant, cgroup::MemoryHistory)>,
/// If [`cgroup::MemoryHistory::avg_non_reclaimable`] exceeds `threshold`, we send upscale
/// requests.
threshold: u64,
}
/// Configuration for a `Runner`
#[derive(Debug)]
pub struct Config {
/// `sys_buffer_bytes` gives the estimated amount of memory, in bytes, that the kernel uses before
/// handing out the rest to userspace. This value is the estimated difference between the
/// *actual* physical memory and the amount reported by `grep MemTotal /proc/meminfo`.
///
/// For more information, refer to `man 5 proc`, which defines MemTotal as "Total usable RAM
/// (i.e., physical RAM minus a few reserved bits and the kernel binary code)".
///
/// We only use `sys_buffer_bytes` when calculating the system memory from the *external* memory
/// size, rather than the self-reported memory size, according to the kernel.
///
/// TODO: this field is only necessary while we still have to trust the autoscaler-agent's
/// upscale resource amounts (because we might not *actually* have been upscaled yet). This field
/// should be removed once we have a better solution there.
sys_buffer_bytes: u64,
/// Minimum fraction of total system memory reserved *before* the cgroup threshold; in
/// other words, providing a ceiling for the highest value of the threshold by enforcing that
/// there's at least `cgroup_min_overhead_fraction` of the total memory remaining beyond the
/// threshold.
///
/// For example, a value of `0.1` means that 10% of total memory must remain after exceeding
/// the threshold, so the value of the cgroup threshold would always be capped at 90% of total
/// memory.
///
/// The default value of `0.15` means that we *guarantee* sending upscale requests if the
/// cgroup is using more than 85% of total memory.
cgroup_min_overhead_fraction: f64,
cgroup_downscale_threshold_buffer_bytes: u64,
}
impl Default for Config {
fn default() -> Self {
Self {
sys_buffer_bytes: 100 * MiB,
cgroup_min_overhead_fraction: 0.15,
cgroup_downscale_threshold_buffer_bytes: 100 * MiB,
}
}
}
impl Config {
fn cgroup_threshold(&self, total_mem: u64) -> u64 {
// We want our threshold to be met gracefully instead of letting postgres get OOM-killed
// (or if there's room, spilling to swap).
// So we guarantee that there's at least `cgroup_min_overhead_fraction` of total memory
// remaining above the threshold.
(total_mem as f64 * (1.0 - self.cgroup_min_overhead_fraction)) as u64
}
}
impl Runner {
/// Create a new monitor.
#[tracing::instrument(skip_all, fields(?config, ?args))]
pub async fn new(
config: Config,
args: &Args,
ws: WebSocket,
kill: broadcast::Receiver<()>,
token: CancellationToken,
) -> anyhow::Result<Runner> {
anyhow::ensure!(
config.sys_buffer_bytes != 0,
"invalid monitor Config: sys_buffer_bytes cannot be 0"
);
let dispatcher = Dispatcher::new(ws)
.await
.context("error creating new dispatcher")?;
let mut state = Runner {
config,
filecache: None,
cgroup: None,
dispatcher,
counter: 1, // NB: must be odd, see the comment about the field for more.
last_upscale_request_at: None,
kill,
};
let mem = get_total_system_memory();
if let Some(connstr) = &args.pgconnstr {
info!("initializing file cache");
let config = FileCacheConfig::default();
let mut file_cache = FileCacheState::new(connstr, config, token.clone())
.await
.context("failed to create file cache")?;
let size = file_cache
.get_file_cache_size()
.await
.context("error getting file cache size")?;
let new_size = file_cache.config.calculate_cache_size(mem);
info!(
initial = bytes_to_mebibytes(size),
new = bytes_to_mebibytes(new_size),
"setting initial file cache size",
);
// note: even if size == new_size, we want to explicitly set it, just
// to make sure that we have the permissions to do so
let actual_size = file_cache
.set_file_cache_size(new_size)
.await
.context("failed to set file cache size, possibly due to inadequate permissions")?;
if actual_size != new_size {
info!("file cache size actually got set to {actual_size}")
}
state.filecache = Some(file_cache);
}
if let Some(name) = &args.cgroup {
// Best not to set up cgroup stuff more than once, so we'll initialize cgroup state
// now, and then set limits later.
info!("initializing cgroup");
let cgroup =
CgroupWatcher::new(name.clone()).context("failed to create cgroup manager")?;
let init_value = cgroup::MemoryHistory {
avg_non_reclaimable: 0,
samples_count: 0,
samples_span: Duration::ZERO,
};
let (hist_tx, hist_rx) = watch::channel((Instant::now(), init_value));
spawn_with_cancel(token, |_| error!("cgroup watcher terminated"), async move {
cgroup.watch(hist_tx).await
});
let threshold = state.config.cgroup_threshold(mem);
info!(threshold, "set initial cgroup threshold",);
state.cgroup = Some(CgroupState {
watcher: hist_rx,
threshold,
});
}
Ok(state)
}
/// Attempt to downscale filecache + cgroup
#[tracing::instrument(skip_all, fields(?target))]
pub async fn try_downscale(&mut self, target: Resources) -> anyhow::Result<(bool, String)> {
// Nothing to adjust
if self.cgroup.is_none() && self.filecache.is_none() {
info!("no action needed for downscale (no cgroup or file cache enabled)");
return Ok((
true,
"monitor is not managing cgroup or file cache".to_string(),
));
}
let requested_mem = target.mem;
let usable_system_memory = requested_mem.saturating_sub(self.config.sys_buffer_bytes);
let expected_file_cache_size = self
.filecache
.as_ref()
.map(|file_cache| file_cache.config.calculate_cache_size(usable_system_memory))
.unwrap_or(0);
if let Some(cgroup) = &self.cgroup {
let (last_time, last_history) = *cgroup.watcher.borrow();
// NB: The ordering of these conditions is intentional. During startup, we should deny
// downscaling until we have enough information to determine that it's safe to do so
// (i.e. enough samples have come in). But if it's been a while and we *still* haven't
// received any information, we should *fail* instead of just denying downscaling.
//
// `last_time` is set to `Instant::now()` on startup, so checking `last_time.elapsed()`
// serves double-duty: it trips if we haven't received *any* metrics for long enough,
// OR if we haven't received metrics *recently enough*.
//
// TODO: make the duration here configurable.
if last_time.elapsed() > Duration::from_secs(5) {
bail!(
"haven't gotten cgroup memory stats recently enough to determine downscaling information"
);
} else if last_history.samples_count <= 1 {
let status = "haven't received enough cgroup memory stats yet";
info!(status, "discontinuing downscale");
return Ok((false, status.to_owned()));
}
let new_threshold = self.config.cgroup_threshold(usable_system_memory);
let current = last_history.avg_non_reclaimable;
if new_threshold < current + self.config.cgroup_downscale_threshold_buffer_bytes {
let status = format!(
"{}: {} MiB (new threshold) < {} (current usage) + {} (downscale buffer)",
"calculated memory threshold too low",
bytes_to_mebibytes(new_threshold),
bytes_to_mebibytes(current),
bytes_to_mebibytes(self.config.cgroup_downscale_threshold_buffer_bytes)
);
info!(status, "discontinuing downscale");
return Ok((false, status));
}
}
// The downscaling has been approved. Downscale the file cache, then the cgroup.
let mut status = vec![];
if let Some(file_cache) = &mut self.filecache {
let actual_usage = file_cache
.set_file_cache_size(expected_file_cache_size)
.await
.context("failed to set file cache size")?;
let message = format!(
"set file cache size to {} MiB",
bytes_to_mebibytes(actual_usage),
);
info!("downscale: {message}");
status.push(message);
}
if let Some(cgroup) = &mut self.cgroup {
let new_threshold = self.config.cgroup_threshold(usable_system_memory);
let message = format!(
"set cgroup memory threshold from {} MiB to {} MiB, of new total {} MiB",
bytes_to_mebibytes(cgroup.threshold),
bytes_to_mebibytes(new_threshold),
bytes_to_mebibytes(usable_system_memory)
);
cgroup.threshold = new_threshold;
info!("downscale: {message}");
status.push(message);
}
// TODO: make this status thing less jank
let status = status.join("; ");
Ok((true, status))
}
/// Handle new resources
#[tracing::instrument(skip_all, fields(?resources))]
pub async fn handle_upscale(&mut self, resources: Resources) -> anyhow::Result<()> {
if self.filecache.is_none() && self.cgroup.is_none() {
info!("no action needed for upscale (no cgroup or file cache enabled)");
return Ok(());
}
let new_mem = resources.mem;
let usable_system_memory = new_mem.saturating_sub(self.config.sys_buffer_bytes);
if let Some(file_cache) = &mut self.filecache {
let expected_usage = file_cache.config.calculate_cache_size(usable_system_memory);
info!(
target = bytes_to_mebibytes(expected_usage),
total = bytes_to_mebibytes(new_mem),
"updating file cache size",
);
let actual_usage = file_cache
.set_file_cache_size(expected_usage)
.await
.context("failed to set file cache size")?;
if actual_usage != expected_usage {
warn!(
"file cache was set to a different size that we wanted: target = {} Mib, actual= {} Mib",
bytes_to_mebibytes(expected_usage),
bytes_to_mebibytes(actual_usage)
)
}
}
if let Some(cgroup) = &mut self.cgroup {
let new_threshold = self.config.cgroup_threshold(usable_system_memory);
info!(
"set cgroup memory threshold from {} MiB to {} MiB of new total {} MiB",
bytes_to_mebibytes(cgroup.threshold),
bytes_to_mebibytes(new_threshold),
bytes_to_mebibytes(usable_system_memory)
);
cgroup.threshold = new_threshold;
}
Ok(())
}
/// Take in a message and perform some action, such as downscaling or upscaling,
/// and return a message to be send back.
#[tracing::instrument(skip_all, fields(%id, message = ?inner))]
pub async fn process_message(
&mut self,
InboundMsg { inner, id }: InboundMsg,
) -> anyhow::Result<Option<OutboundMsg>> {
match inner {
InboundMsgKind::UpscaleNotification { granted } => {
self.handle_upscale(granted)
.await
.context("failed to handle upscale")?;
Ok(Some(OutboundMsg::new(
OutboundMsgKind::UpscaleConfirmation {},
id,
)))
}
InboundMsgKind::DownscaleRequest { target } => self
.try_downscale(target)
.await
.context("failed to downscale")
.map(|(ok, status)| {
Some(OutboundMsg::new(
OutboundMsgKind::DownscaleResult { ok, status },
id,
))
}),
InboundMsgKind::InvalidMessage { error } => {
warn!(
error = format_args!("{error:#}"),
id, "received notification of an invalid message we sent"
);
Ok(None)
}
InboundMsgKind::InternalError { error } => {
warn!(
error = format_args!("{error:#}"),
id, "agent experienced an internal error"
);
Ok(None)
}
InboundMsgKind::HealthCheck {} => {
Ok(Some(OutboundMsg::new(OutboundMsgKind::HealthCheck {}, id)))
}
}
}
// TODO: don't propagate errors, probably just warn!?
#[tracing::instrument(skip_all)]
pub async fn run(&mut self) -> anyhow::Result<()> {
info!("starting dispatcher");
loop {
tokio::select! {
signal = self.kill.recv() => {
match signal {
Ok(()) => return Ok(()),
Err(e) => bail!("failed to receive kill signal: {e}")
}
}
// New memory stats from the cgroup, *may* need to request upscaling, if we've
// exceeded the threshold
result = self.cgroup.as_mut().unwrap().watcher.changed(), if self.cgroup.is_some() => {
result.context("failed to receive from cgroup memory stats watcher")?;
let cgroup = self.cgroup.as_ref().unwrap();
let (_time, cgroup_mem_stat) = *cgroup.watcher.borrow();
// If we haven't exceeded the threshold, then we're all ok
if cgroup_mem_stat.avg_non_reclaimable < cgroup.threshold {
continue;
}
// Otherwise, we generally want upscaling. But, if it's been less than 1 second
// since the last time we requested upscaling, ignore the event, to avoid
// spamming the agent.
if let Some(t) = self.last_upscale_request_at {
let elapsed = t.elapsed();
if elapsed < Duration::from_secs(1) {
// *Ideally* we'd like to log here that we're ignoring the fact the
// memory stats are too high, but in practice this can result in
// spamming the logs with repetitive messages about ignoring the signal
//
// See https://github.com/neondatabase/neon/issues/5865 for more.
continue;
}
}
self.last_upscale_request_at = Some(Instant::now());
info!(
avg_non_reclaimable = bytes_to_mebibytes(cgroup_mem_stat.avg_non_reclaimable),
threshold = bytes_to_mebibytes(cgroup.threshold),
"cgroup memory stats are high enough to upscale, requesting upscale",
);
self.counter += 2; // Increment, preserving parity (i.e. keep the
// counter odd). See the field comment for more.
self.dispatcher
.send(OutboundMsg::new(OutboundMsgKind::UpscaleRequest {}, self.counter))
.await
.context("failed to send message")?;
},
// there is a message from the agent
msg = self.dispatcher.source.next() => {
if let Some(msg) = msg {
match &msg {
Ok(msg) => {
let message: InboundMsg = match msg {
Message::Text(text) => {
serde_json::from_str(text).context("failed to deserialize text message")?
}
other => {
warn!(
// Don't use 'message' as a key as the
// string also uses that for its key
msg = ?other,
"problem processing incoming message: agent should only send text messages but received different type"
);
continue
},
};
if matches!(&message.inner, InboundMsgKind::HealthCheck { .. }) {
debug!(?msg, "received message");
} else {
info!(?msg, "received message");
}
let out = match self.process_message(message.clone()).await {
Ok(Some(out)) => out,
Ok(None) => continue,
Err(e) => {
// use {:#} for our logging because the display impl only
// gives the outermost cause, and the debug impl
// pretty-prints the error, whereas {:#} contains all the
// causes, but is compact (no newlines).
warn!(error = format_args!("{e:#}"), "error handling message");
OutboundMsg::new(
OutboundMsgKind::InternalError {
error: e.to_string(),
},
message.id
)
}
};
self.dispatcher
.send(out)
.await
.context("failed to send message")?;
}
Err(e) => warn!(
error = format_args!("{e:#}"),
msg = ?msg,
"received error message"
),
}
} else {
anyhow::bail!("dispatcher connection closed")
}
}
}
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/vm_monitor/src/dispatcher.rs | libs/vm_monitor/src/dispatcher.rs | //! Managing the websocket connection and other signals in the monitor.
//!
//! Contains types that manage the interaction (not data interchange, see `protocol`)
//! between agent and monitor, allowing us to to process and send messages in a
//! straightforward way. The dispatcher also manages that signals that come from
//! the cgroup (requesting upscale), and the signals that go to the cgroup
//! (notifying it of upscale).
use anyhow::{Context, bail};
use axum::extract::ws::{Message, Utf8Bytes, WebSocket};
use futures::stream::{SplitSink, SplitStream};
use futures::{SinkExt, StreamExt};
use tracing::{debug, info};
use crate::protocol::{
OutboundMsg, OutboundMsgKind, PROTOCOL_MAX_VERSION, PROTOCOL_MIN_VERSION, ProtocolRange,
ProtocolResponse, ProtocolVersion,
};
/// The central handler for all communications in the monitor.
///
/// The dispatcher has two purposes:
/// 1. Manage the connection to the agent, sending and receiving messages.
/// 2. Communicate with the cgroup manager, notifying it when upscale is received,
/// and sending a message to the agent when the cgroup manager requests
/// upscale.
#[derive(Debug)]
pub struct Dispatcher {
/// We read agent messages of of `source`
pub(crate) source: SplitStream<WebSocket>,
/// We send messages to the agent through `sink`
sink: SplitSink<WebSocket, Message>,
/// The protocol version we have agreed to use with the agent. This is negotiated
/// during the creation of the dispatcher, and should be the highest shared protocol
/// version.
///
// NOTE: currently unused, but will almost certainly be used in the futures
// as the protocol changes
#[allow(unused)]
pub(crate) proto_version: ProtocolVersion,
}
impl Dispatcher {
/// Creates a new dispatcher using the passed-in connection.
///
/// Performs a negotiation with the agent to determine the highest protocol
/// version that both support. This consists of two steps:
/// 1. Wait for the agent to sent the range of protocols it supports.
/// 2. Send a protocol version that works for us as well, or an error if there
/// is no compatible version.
pub async fn new(stream: WebSocket) -> anyhow::Result<Self> {
let (mut sink, mut source) = stream.split();
// Figure out the highest protocol version we both support
info!("waiting for agent to send protocol version range");
let Some(message) = source.next().await else {
bail!("websocket connection closed while performing protocol handshake")
};
let message = message.context("failed to read protocol version range off connection")?;
let Message::Text(message_text) = message else {
// All messages should be in text form, since we don't do any
// pinging/ponging. See nhooyr/websocket's implementation and the
// agent for more info
bail!("received non-text message during proocol handshake: {message:?}")
};
let monitor_range = ProtocolRange {
min: PROTOCOL_MIN_VERSION,
max: PROTOCOL_MAX_VERSION,
};
let agent_range: ProtocolRange = serde_json::from_str(&message_text)
.context("failed to deserialize protocol version range")?;
info!(range = ?agent_range, "received protocol version range");
let highest_shared_version = match monitor_range.highest_shared_version(&agent_range) {
Ok(version) => {
sink.send(Message::Text(Utf8Bytes::from(
serde_json::to_string(&ProtocolResponse::Version(version)).unwrap(),
)))
.await
.context("failed to notify agent of negotiated protocol version")?;
version
}
Err(e) => {
sink.send(Message::Text(Utf8Bytes::from(
serde_json::to_string(&ProtocolResponse::Error(format!(
"Received protocol version range {agent_range} which does not overlap with {monitor_range}"
)))
.unwrap(),
)))
.await
.context("failed to notify agent of no overlap between protocol version ranges")?;
Err(e).context("error determining suitable protocol version range")?
}
};
Ok(Self {
sink,
source,
proto_version: highest_shared_version,
})
}
/// Send a message to the agent.
///
/// Although this function is small, it has one major benefit: it is the only
/// way to send data accross the connection, and you can only pass in a proper
/// `MonitorMessage`. Without safeguards like this, it's easy to accidentally
/// serialize the wrong thing and send it, since `self.sink.send` will take
/// any string.
pub async fn send(&mut self, message: OutboundMsg) -> anyhow::Result<()> {
if matches!(&message.inner, OutboundMsgKind::HealthCheck { .. }) {
debug!(?message, "sending message");
} else {
info!(?message, "sending message");
}
let json = serde_json::to_string(&message).context("failed to serialize message")?;
self.sink
.send(Message::Text(Utf8Bytes::from(json)))
.await
.context("stream error sending message")
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/vm_monitor/src/cgroup.rs | libs/vm_monitor/src/cgroup.rs | use std::fmt::{self, Debug, Formatter};
use std::time::{Duration, Instant};
use anyhow::{Context, anyhow};
use cgroups_rs::Subsystem;
use cgroups_rs::hierarchies::{self, is_cgroup2_unified_mode};
use cgroups_rs::memory::MemController;
use tokio::sync::watch;
use tracing::{info, warn};
/// Configuration for a `CgroupWatcher`
#[derive(Debug, Clone)]
pub struct Config {
/// Interval at which we should be fetching memory statistics
memory_poll_interval: Duration,
/// The number of samples used in constructing aggregated memory statistics
memory_history_len: usize,
/// The number of most recent samples that will be periodically logged.
///
/// Each sample is logged exactly once. Increasing this value means that recent samples will be
/// logged less frequently, and vice versa.
///
/// For simplicity, this value must be greater than or equal to `memory_history_len`.
memory_history_log_interval: usize,
/// The max number of iterations to skip before logging the next iteration
memory_history_log_noskip_interval: Duration,
}
impl Default for Config {
fn default() -> Self {
Self {
memory_poll_interval: Duration::from_millis(100),
memory_history_len: 5, // use 500ms of history for decision-making
memory_history_log_interval: 20, // but only log every ~2s (otherwise it's spammy)
memory_history_log_noskip_interval: Duration::from_secs(15), // but only if it's changed, or 60 seconds have passed
}
}
}
/// Responds to `MonitorEvents` to manage the cgroup: preventing it from being
/// OOM killed or throttling.
///
/// The `CgroupWatcher` primarily achieves this by reading from a stream of
/// `MonitorEvent`s. See `main_signals_loop` for details on how to keep the
/// cgroup happy.
#[derive(Debug)]
pub struct CgroupWatcher {
pub config: Config,
/// The actual cgroup we are watching and managing.
cgroup: cgroups_rs::Cgroup,
}
impl CgroupWatcher {
/// Create a new `CgroupWatcher`.
#[tracing::instrument(skip_all, fields(%name))]
pub fn new(name: String) -> anyhow::Result<Self> {
// TODO: clarify exactly why we need v2
// Make sure cgroups v2 (aka unified) are supported
if !is_cgroup2_unified_mode() {
anyhow::bail!("cgroups v2 not supported");
}
let cgroup = cgroups_rs::Cgroup::load(hierarchies::auto(), &name);
Ok(Self {
cgroup,
config: Default::default(),
})
}
/// The entrypoint for the `CgroupWatcher`.
#[tracing::instrument(skip_all)]
pub async fn watch(
&self,
updates: watch::Sender<(Instant, MemoryHistory)>,
) -> anyhow::Result<()> {
// this requirement makes the code a bit easier to work with; see the config for more.
assert!(self.config.memory_history_len <= self.config.memory_history_log_interval);
let mut ticker = tokio::time::interval(self.config.memory_poll_interval);
ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
// ticker.reset_immediately(); // FIXME: enable this once updating to tokio >= 1.30.0
let mem_controller = self.memory()?;
// buffer for samples that will be logged. once full, it remains so.
let history_log_len = self.config.memory_history_log_interval;
let max_skip = self.config.memory_history_log_noskip_interval;
let mut history_log_buf = vec![MemoryStatus::zeroed(); history_log_len];
let mut last_logged_memusage = MemoryStatus::zeroed();
// Ensure that we're tracking a value that's definitely in the past, as Instant::now is only guaranteed to be non-decreasing on Rust's T1-supported systems.
let mut can_skip_logs_until = Instant::now() - max_skip;
for t in 0_u64.. {
ticker.tick().await;
let now = Instant::now();
let mem = Self::memory_usage(mem_controller);
let i = t as usize % history_log_len;
history_log_buf[i] = mem;
// We're taking *at most* memory_history_len values; we may be bounded by the total
// number of samples that have come in so far.
let samples_count = (t + 1).min(self.config.memory_history_len as u64) as usize;
// NB: in `ring_buf_recent_values_iter`, `i` is *inclusive*, which matches the fact
// that we just inserted a value there, so the end of the iterator will *include* the
// value at i, rather than stopping just short of it.
let samples = ring_buf_recent_values_iter(&history_log_buf, i, samples_count);
let summary = MemoryHistory {
avg_non_reclaimable: samples.map(|h| h.non_reclaimable).sum::<u64>()
/ samples_count as u64,
samples_count,
samples_span: self.config.memory_poll_interval * (samples_count - 1) as u32,
};
// Log the current history if it's time to do so. Because `history_log_buf` has length
// equal to the logging interval, we can just log the entire buffer every time we set
// the last entry, which also means that for this log line, we can ignore that it's a
// ring buffer (because all the entries are in order of increasing time).
//
// We skip logging the data if data hasn't meaningfully changed in a while, unless
// we've already ignored previous iterations for the last max_skip period.
if i == history_log_len - 1
&& (now > can_skip_logs_until
|| !history_log_buf
.iter()
.all(|usage| last_logged_memusage.status_is_close_or_similar(usage)))
{
info!(
history = ?MemoryStatus::debug_slice(&history_log_buf),
summary = ?summary,
"Recent cgroup memory statistics history"
);
can_skip_logs_until = now + max_skip;
last_logged_memusage = *history_log_buf.last().unwrap();
}
updates
.send((now, summary))
.context("failed to send MemoryHistory")?;
}
unreachable!()
}
/// Get a handle on the memory subsystem.
fn memory(&self) -> anyhow::Result<&MemController> {
self.cgroup
.subsystems()
.iter()
.find_map(|sub| match sub {
Subsystem::Mem(c) => Some(c),
_ => None,
})
.ok_or_else(|| anyhow!("could not find memory subsystem"))
}
/// Given a handle on the memory subsystem, returns the current memory information
fn memory_usage(mem_controller: &MemController) -> MemoryStatus {
let stat = mem_controller.memory_stat().stat;
MemoryStatus {
non_reclaimable: stat.active_anon + stat.inactive_anon,
}
}
}
// Helper function for `CgroupWatcher::watch`
fn ring_buf_recent_values_iter<T>(
buf: &[T],
last_value_idx: usize,
count: usize,
) -> impl '_ + Iterator<Item = &T> {
// Assertion carried over from `CgroupWatcher::watch`, to make the logic in this function
// easier (we only have to add `buf.len()` once, rather than a dynamic number of times).
assert!(count <= buf.len());
buf.iter()
// 'cycle' because the values could wrap around
.cycle()
// with 'cycle', this skip is more like 'offset', and functionally this is
// offsettting by 'last_value_idx - count (mod buf.len())', but we have to be
// careful to avoid underflow, so we pre-add buf.len().
// The '+ 1' is because `last_value_idx` is inclusive, rather than exclusive.
.skip((buf.len() + last_value_idx + 1 - count) % buf.len())
.take(count)
}
/// Summary of recent memory usage
#[derive(Debug, Copy, Clone)]
pub struct MemoryHistory {
/// Rolling average of non-reclaimable memory usage samples over the last `history_period`
pub avg_non_reclaimable: u64,
/// The number of samples used to construct this summary
pub samples_count: usize,
/// Total timespan between the first and last sample used for this summary
pub samples_span: Duration,
}
#[derive(Debug, Copy, Clone)]
pub struct MemoryStatus {
non_reclaimable: u64,
}
impl MemoryStatus {
fn zeroed() -> Self {
MemoryStatus { non_reclaimable: 0 }
}
fn debug_slice(slice: &[Self]) -> impl '_ + Debug {
struct DS<'a>(&'a [MemoryStatus]);
impl Debug for DS<'_> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("[MemoryStatus]")
.field(
"non_reclaimable[..]",
&Fields(self.0, |stat: &MemoryStatus| {
BytesToGB(stat.non_reclaimable)
}),
)
.finish()
}
}
struct Fields<'a, F>(&'a [MemoryStatus], F);
impl<F: Fn(&MemoryStatus) -> T, T: Debug> Debug for Fields<'_, F> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_list().entries(self.0.iter().map(&self.1)).finish()
}
}
struct BytesToGB(u64);
impl Debug for BytesToGB {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"{:.3}Gi",
self.0 as f64 / (1_u64 << 30) as f64
))
}
}
DS(slice)
}
/// Check if the other memory status is a close or similar result.
/// Returns true if the larger value is not larger than the smaller value
/// by 1/8 of the smaller value, and within 128MiB.
/// See tests::check_similarity_behaviour for examples of behaviour
fn status_is_close_or_similar(&self, other: &MemoryStatus) -> bool {
let margin;
let diff;
if self.non_reclaimable >= other.non_reclaimable {
margin = other.non_reclaimable / 8;
diff = self.non_reclaimable - other.non_reclaimable;
} else {
margin = self.non_reclaimable / 8;
diff = other.non_reclaimable - self.non_reclaimable;
}
diff < margin && diff < 128 * 1024 * 1024
}
}
#[cfg(test)]
mod tests {
#[test]
fn ring_buf_iter() {
let buf = vec![0_i32, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let values = |offset, count| {
super::ring_buf_recent_values_iter(&buf, offset, count)
.copied()
.collect::<Vec<i32>>()
};
// Boundary conditions: start, end, and entire thing:
assert_eq!(values(0, 1), [0]);
assert_eq!(values(3, 4), [0, 1, 2, 3]);
assert_eq!(values(9, 4), [6, 7, 8, 9]);
assert_eq!(values(9, 10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
// "normal" operation: no wraparound
assert_eq!(values(7, 4), [4, 5, 6, 7]);
// wraparound:
assert_eq!(values(0, 4), [7, 8, 9, 0]);
assert_eq!(values(1, 4), [8, 9, 0, 1]);
assert_eq!(values(2, 4), [9, 0, 1, 2]);
assert_eq!(values(2, 10), [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]);
}
#[test]
fn check_similarity_behaviour() {
// This all accesses private methods, so we can't actually run this
// as doctests, because doctests run as an external crate.
let mut small = super::MemoryStatus {
non_reclaimable: 1024,
};
let mut large = super::MemoryStatus {
non_reclaimable: 1024 * 1024 * 1024 * 1024,
};
// objects are self-similar, no matter the size
assert!(small.status_is_close_or_similar(&small));
assert!(large.status_is_close_or_similar(&large));
// inequality is symmetric
assert!(!small.status_is_close_or_similar(&large));
assert!(!large.status_is_close_or_similar(&small));
small.non_reclaimable = 64;
large.non_reclaimable = (small.non_reclaimable / 8) * 9;
// objects are self-similar, no matter the size
assert!(small.status_is_close_or_similar(&small));
assert!(large.status_is_close_or_similar(&large));
// values are similar if the larger value is larger by less than
// 12.5%, i.e. 1/8 of the smaller value.
// In the example above, large is exactly 12.5% larger, so this doesn't
// match.
assert!(!small.status_is_close_or_similar(&large));
assert!(!large.status_is_close_or_similar(&small));
large.non_reclaimable -= 1;
assert!(large.status_is_close_or_similar(&large));
assert!(small.status_is_close_or_similar(&large));
assert!(large.status_is_close_or_similar(&small));
// The 1/8 rule only applies up to 128MiB of difference
small.non_reclaimable = 1024 * 1024 * 1024 * 1024;
large.non_reclaimable = small.non_reclaimable / 8 * 9;
assert!(small.status_is_close_or_similar(&small));
assert!(large.status_is_close_or_similar(&large));
assert!(!small.status_is_close_or_similar(&large));
assert!(!large.status_is_close_or_similar(&small));
// the large value is put just above the threshold
large.non_reclaimable = small.non_reclaimable + 128 * 1024 * 1024;
assert!(large.status_is_close_or_similar(&large));
assert!(!small.status_is_close_or_similar(&large));
assert!(!large.status_is_close_or_similar(&small));
// now below
large.non_reclaimable -= 1;
assert!(large.status_is_close_or_similar(&large));
assert!(small.status_is_close_or_similar(&large));
assert!(large.status_is_close_or_similar(&small));
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/vm_monitor/src/protocol.rs | libs/vm_monitor/src/protocol.rs | //! Types representing protocols and actual agent-monitor messages.
//!
//! The pervasive use of serde modifiers throughout this module is to ease
//! serialization on the go side. Because go does not have enums (which model
//! messages well), it is harder to model messages, and we accomodate that with
//! serde.
//!
//! *Note*: the agent sends and receives messages in different ways.
//!
//! The agent serializes messages in the form and then sends them. The use
//! of `#[serde(tag = "type", content = "content")]` allows us to use `Type`
//! to determine how to deserialize `Content`.
//! ```ignore
//! struct {
//! Content any
//! Type string
//! Id uint64
//! }
//! ```
//! and receives messages in the form:
//! ```ignore
//! struct {
//! {fields embedded}
//! Type string
//! Id uint64
//! }
//! ```
//! After reading the type field, the agent will decode the entire message
//! again, this time into the correct type using the embedded fields.
//! Because the agent cannot just extract the json contained in a certain field
//! (it initially deserializes to `map[string]interface{}`), we keep the fields
//! at the top level, so the entire piece of json can be deserialized into a struct,
//! such as a `DownscaleResult`, with the `Type` and `Id` fields ignored.
use core::fmt;
use std::cmp;
use serde::de::Error;
use serde::{Deserialize, Serialize};
/// A Message we send to the agent.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct OutboundMsg {
#[serde(flatten)]
pub(crate) inner: OutboundMsgKind,
pub(crate) id: usize,
}
impl OutboundMsg {
pub fn new(inner: OutboundMsgKind, id: usize) -> Self {
Self { inner, id }
}
}
/// The different underlying message types we can send to the agent.
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(tag = "type")]
pub enum OutboundMsgKind {
/// Indicates that the agent sent an invalid message, i.e, we couldn't
/// properly deserialize it.
InvalidMessage { error: String },
/// Indicates that we experienced an internal error while processing a message.
/// For example, if a cgroup operation fails while trying to handle an upscale,
/// we return `InternalError`.
InternalError { error: String },
/// Returned to the agent once we have finished handling an upscale. If the
/// handling was unsuccessful, an `InternalError` will get returned instead.
/// *Note*: this is a struct variant because of the way go serializes struct{}
UpscaleConfirmation {},
/// Indicates to the monitor that we are urgently requesting resources.
/// *Note*: this is a struct variant because of the way go serializes struct{}
UpscaleRequest {},
/// Returned to the agent once we have finished attempting to downscale. If
/// an error occured trying to do so, an `InternalError` will get returned instead.
/// However, if we are simply unsuccessful (for example, do to needing the resources),
/// that gets included in the `DownscaleResult`.
DownscaleResult {
// FIXME for the future (once the informant is deprecated)
// As of the time of writing, the agent/informant version of this struct is
// called api.DownscaleResult. This struct has uppercase fields which are
// serialized as such. Thus, we serialize using uppercase names so we don't
// have to make a breaking change to the agent<->informant protocol. Once
// the informant has been superseded by the monitor, we can add the correct
// struct tags to api.DownscaleResult without causing a breaking change,
// since we don't need to support the agent<->informant protocol anymore.
#[serde(rename = "Ok")]
ok: bool,
#[serde(rename = "Status")]
status: String,
},
/// Part of the bidirectional heartbeat. The heartbeat is initiated by the
/// agent.
/// *Note*: this is a struct variant because of the way go serializes struct{}
HealthCheck {},
}
/// A message received form the agent.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct InboundMsg {
#[serde(flatten)]
pub(crate) inner: InboundMsgKind,
pub(crate) id: usize,
}
/// The different underlying message types we can receive from the agent.
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(tag = "type", content = "content")]
pub enum InboundMsgKind {
/// Indicates that the we sent an invalid message, i.e, we couldn't
/// properly deserialize it.
InvalidMessage { error: String },
/// Indicates that the informan experienced an internal error while processing
/// a message. For example, if it failed to request upsacle from the agent, it
/// would return an `InternalError`.
InternalError { error: String },
/// Indicates to us that we have been granted more resources. We should respond
/// with an `UpscaleConfirmation` when done handling the resources (increasins
/// file cache size, cgorup memory limits).
UpscaleNotification { granted: Resources },
/// A request to reduce resource usage. We should response with a `DownscaleResult`,
/// when done.
DownscaleRequest { target: Resources },
/// Part of the bidirectional heartbeat. The heartbeat is initiated by the
/// agent.
/// *Note*: this is a struct variant because of the way go serializes struct{}
HealthCheck {},
}
/// Represents the resources granted to a VM.
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
// Renamed because the agent has multiple resources types:
// `Resources` (milliCPU/memory slots)
// `Allocation` (vCPU/bytes) <- what we correspond to
#[serde(rename(serialize = "Allocation", deserialize = "Allocation"))]
pub struct Resources {
/// Number of vCPUs
pub(crate) cpu: f64,
/// Bytes of memory
pub(crate) mem: u64,
}
impl Resources {
pub fn new(cpu: f64, mem: u64) -> Self {
Self { cpu, mem }
}
}
pub const PROTOCOL_MIN_VERSION: ProtocolVersion = ProtocolVersion::V1_0;
pub const PROTOCOL_MAX_VERSION: ProtocolVersion = ProtocolVersion::V1_0;
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq, Serialize, Deserialize)]
pub struct ProtocolVersion(u8);
impl ProtocolVersion {
/// Represents v1.0 of the agent<-> monitor protocol - the initial version
///
/// Currently the latest version.
const V1_0: ProtocolVersion = ProtocolVersion(1);
}
impl fmt::Display for ProtocolVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ProtocolVersion(0) => f.write_str("<invalid: zero>"),
ProtocolVersion::V1_0 => f.write_str("v1.0"),
other => write!(f, "<unknown: {other}>"),
}
}
}
/// A set of protocol bounds that determines what we are speaking.
///
/// These bounds are inclusive.
#[derive(Debug)]
pub struct ProtocolRange {
pub min: ProtocolVersion,
pub max: ProtocolVersion,
}
// Use a custom deserialize impl to ensure that `self.min <= self.max`
impl<'de> Deserialize<'de> for ProtocolRange {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Deserialize)]
struct InnerProtocolRange {
min: ProtocolVersion,
max: ProtocolVersion,
}
let InnerProtocolRange { min, max } = InnerProtocolRange::deserialize(deserializer)?;
if min > max {
Err(D::Error::custom(format!(
"min version = {min} is greater than max version = {max}",
)))
} else {
Ok(ProtocolRange { min, max })
}
}
}
impl fmt::Display for ProtocolRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.min == self.max {
f.write_fmt(format_args!("{}", self.max))
} else {
f.write_fmt(format_args!("{} to {}", self.min, self.max))
}
}
}
impl ProtocolRange {
/// Find the highest shared version between two `ProtocolRange`'s
pub fn highest_shared_version(&self, other: &Self) -> anyhow::Result<ProtocolVersion> {
// We first have to make sure the ranges are overlapping. Once we know
// this, we can merge the ranges by taking the max of the mins and the
// mins of the maxes.
if self.min > other.max {
anyhow::bail!(
"Non-overlapping bounds: other.max = {} was less than self.min = {}",
other.max,
self.min,
)
} else if self.max < other.min {
anyhow::bail!(
"Non-overlappinng bounds: self.max = {} was less than other.min = {}",
self.max,
other.min
)
} else {
Ok(cmp::min(self.max, other.max))
}
}
}
/// We send this to the monitor after negotiating which protocol to use
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub enum ProtocolResponse {
Error(String),
Version(ProtocolVersion),
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/vm_monitor/src/bin/monitor.rs | libs/vm_monitor/src/bin/monitor.rs | // We expose a standalone binary _and_ start the monitor in `compute_ctl` so that
// we can test the monitor as part of the entire autoscaling system in
// neondatabase/autoscaling.
//
// The monitor was previously started by vm-builder, and for testing purposes,
// we can mimic that setup with this binary.
#[cfg(target_os = "linux")]
#[tokio::main]
async fn main() -> anyhow::Result<()> {
use clap::Parser;
use tokio_util::sync::CancellationToken;
use tracing_subscriber::EnvFilter;
use vm_monitor::Args;
let subscriber = tracing_subscriber::fmt::Subscriber::builder()
.json()
.with_file(true)
.with_line_number(true)
.with_span_list(true)
.with_env_filter(EnvFilter::from_default_env())
.finish();
tracing::subscriber::set_global_default(subscriber)?;
let args: &'static Args = Box::leak(Box::new(Args::parse()));
let token = CancellationToken::new();
vm_monitor::start(args, token).await
}
#[cfg(not(target_os = "linux"))]
fn main() {
panic!("the monitor requires cgroups, which are only available on linux")
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/pq_proto/src/lib.rs | libs/pq_proto/src/lib.rs | //! Postgres protocol messages serialization-deserialization. See
//! <https://www.postgresql.org/docs/devel/protocol-message-formats.html>
//! on message formats.
#![deny(clippy::undocumented_unsafe_blocks)]
pub mod framed;
use std::borrow::Cow;
use std::{fmt, io, str};
use byteorder::{BigEndian, ReadBytesExt};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use itertools::Itertools;
// re-export for use in utils pageserver_feedback.rs
pub use postgres_protocol::PG_EPOCH;
use serde::{Deserialize, Serialize};
pub type Oid = u32;
pub type SystemId = u64;
pub const INT8_OID: Oid = 20;
pub const INT4_OID: Oid = 23;
pub const TEXT_OID: Oid = 25;
#[derive(Debug)]
pub enum FeMessage {
// Simple query.
Query(Bytes),
// Extended query protocol.
Parse(FeParseMessage),
Describe(FeDescribeMessage),
Bind(FeBindMessage),
Execute(FeExecuteMessage),
Close(FeCloseMessage),
Sync,
Terminate,
CopyData(Bytes),
CopyDone,
CopyFail,
PasswordMessage(Bytes),
}
#[derive(Clone, Copy, PartialEq, PartialOrd)]
pub struct ProtocolVersion(u32);
impl ProtocolVersion {
pub const fn new(major: u16, minor: u16) -> Self {
Self(((major as u32) << 16) | minor as u32)
}
pub const fn minor(self) -> u16 {
self.0 as u16
}
pub const fn major(self) -> u16 {
(self.0 >> 16) as u16
}
}
impl fmt::Debug for ProtocolVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list()
.entry(&self.major())
.entry(&self.minor())
.finish()
}
}
#[derive(Debug)]
pub enum FeStartupPacket {
CancelRequest(CancelKeyData),
SslRequest {
direct: bool,
},
GssEncRequest,
StartupMessage {
version: ProtocolVersion,
params: StartupMessageParams,
},
}
#[derive(Debug, Clone, Default)]
pub struct StartupMessageParamsBuilder {
params: BytesMut,
}
impl StartupMessageParamsBuilder {
/// Set parameter's value by its name.
/// name and value must not contain a \0 byte
pub fn insert(&mut self, name: &str, value: &str) {
self.params.put(name.as_bytes());
self.params.put(&b"\0"[..]);
self.params.put(value.as_bytes());
self.params.put(&b"\0"[..]);
}
pub fn freeze(self) -> StartupMessageParams {
StartupMessageParams {
params: self.params.freeze(),
}
}
}
#[derive(Debug, Clone, Default)]
pub struct StartupMessageParams {
pub params: Bytes,
}
impl StartupMessageParams {
/// Get parameter's value by its name.
pub fn get(&self, name: &str) -> Option<&str> {
self.iter().find_map(|(k, v)| (k == name).then_some(v))
}
/// Split command-line options according to PostgreSQL's logic,
/// taking into account all escape sequences but leaving them as-is.
/// [`None`] means that there's no `options` in [`Self`].
pub fn options_raw(&self) -> Option<impl Iterator<Item = &str>> {
self.get("options").map(Self::parse_options_raw)
}
/// Split command-line options according to PostgreSQL's logic,
/// applying all escape sequences (using owned strings as needed).
/// [`None`] means that there's no `options` in [`Self`].
pub fn options_escaped(&self) -> Option<impl Iterator<Item = Cow<'_, str>>> {
self.get("options").map(Self::parse_options_escaped)
}
/// Split command-line options according to PostgreSQL's logic,
/// taking into account all escape sequences but leaving them as-is.
pub fn parse_options_raw(input: &str) -> impl Iterator<Item = &str> {
// See `postgres: pg_split_opts`.
let mut last_was_escape = false;
input
.split(move |c: char| {
// We split by non-escaped whitespace symbols.
let should_split = c.is_ascii_whitespace() && !last_was_escape;
last_was_escape = c == '\\' && !last_was_escape;
should_split
})
.filter(|s| !s.is_empty())
}
/// Split command-line options according to PostgreSQL's logic,
/// applying all escape sequences (using owned strings as needed).
pub fn parse_options_escaped(input: &str) -> impl Iterator<Item = Cow<'_, str>> {
// See `postgres: pg_split_opts`.
Self::parse_options_raw(input).map(|s| {
let mut preserve_next_escape = false;
let escape = |c| {
// We should remove '\\' unless it's preceded by '\\'.
let should_remove = c == '\\' && !preserve_next_escape;
preserve_next_escape = should_remove;
should_remove
};
match s.contains('\\') {
true => Cow::Owned(s.replace(escape, "")),
false => Cow::Borrowed(s),
}
})
}
/// Iterate through key-value pairs in an arbitrary order.
pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> {
let params =
std::str::from_utf8(&self.params).expect("should be validated as utf8 already");
params.split_terminator('\0').tuples()
}
// This function is mostly useful in tests.
#[doc(hidden)]
pub fn new<'a, const N: usize>(pairs: [(&'a str, &'a str); N]) -> Self {
let mut b = StartupMessageParamsBuilder::default();
for (k, v) in pairs {
b.insert(k, v)
}
b.freeze()
}
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)]
pub struct CancelKeyData {
pub backend_pid: i32,
pub cancel_key: i32,
}
pub fn id_to_cancel_key(id: u64) -> CancelKeyData {
CancelKeyData {
backend_pid: (id >> 32) as i32,
cancel_key: (id & 0xffffffff) as i32,
}
}
impl fmt::Display for CancelKeyData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let hi = (self.backend_pid as u64) << 32;
let lo = (self.cancel_key as u64) & 0xffffffff;
let id = hi | lo;
// This format is more compact and might work better for logs.
f.debug_tuple("CancelKeyData")
.field(&format_args!("{id:x}"))
.finish()
}
}
use rand::distr::{Distribution, StandardUniform};
impl Distribution<CancelKeyData> for StandardUniform {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> CancelKeyData {
CancelKeyData {
backend_pid: rng.random(),
cancel_key: rng.random(),
}
}
}
// We only support the simple case of Parse on unnamed prepared statement and
// no params
#[derive(Debug)]
pub struct FeParseMessage {
pub query_string: Bytes,
}
#[derive(Debug)]
pub struct FeDescribeMessage {
pub kind: u8, // 'S' to describe a prepared statement; or 'P' to describe a portal.
// we only support unnamed prepared stmt or portal
}
// we only support unnamed prepared stmt and portal
#[derive(Debug)]
pub struct FeBindMessage;
// we only support unnamed prepared stmt or portal
#[derive(Debug)]
pub struct FeExecuteMessage {
/// max # of rows
pub maxrows: i32,
}
// we only support unnamed prepared stmt and portal
#[derive(Debug)]
pub struct FeCloseMessage;
/// An error occurred while parsing or serializing raw stream into Postgres
/// messages.
#[derive(thiserror::Error, Debug)]
pub enum ProtocolError {
/// Invalid packet was received from the client (e.g. unexpected message
/// type or broken len).
#[error("Protocol error: {0}")]
Protocol(String),
/// Failed to parse or, (unlikely), serialize a protocol message.
#[error("Message parse error: {0}")]
BadMessage(String),
}
impl ProtocolError {
/// Proxy stream.rs uses only io::Error; provide it.
pub fn into_io_error(self) -> io::Error {
io::Error::other(self.to_string())
}
}
impl FeMessage {
/// Read and parse one message from the `buf` input buffer. If there is at
/// least one valid message, returns it, advancing `buf`; redundant copies
/// are avoided, as thanks to `bytes` crate ptrs in parsed message point
/// directly into the `buf` (processed data is garbage collected after
/// parsed message is dropped).
///
/// Returns None if `buf` doesn't contain enough data for a single message.
/// For efficiency, tries to reserve large enough space in `buf` for the
/// next message in this case to save the repeated calls.
///
/// Returns Error if message is malformed, the only possible ErrorKind is
/// InvalidInput.
//
// Inspired by rust-postgres Message::parse.
pub fn parse(buf: &mut BytesMut) -> Result<Option<FeMessage>, ProtocolError> {
// Every message contains message type byte and 4 bytes len; can't do
// much without them.
if buf.len() < 5 {
let to_read = 5 - buf.len();
buf.reserve(to_read);
return Ok(None);
}
// We shouldn't advance `buf` as probably full message is not there yet,
// so can't directly use Bytes::get_u32 etc.
let tag = buf[0];
let len = (&buf[1..5]).read_u32::<BigEndian>().unwrap();
if len < 4 {
return Err(ProtocolError::Protocol(format!(
"invalid message length {len}"
)));
}
// length field includes itself, but not message type.
let total_len = len as usize + 1;
if buf.len() < total_len {
// Don't have full message yet.
let to_read = total_len - buf.len();
buf.reserve(to_read);
return Ok(None);
}
// got the message, advance buffer
let mut msg = buf.split_to(total_len).freeze();
msg.advance(5); // consume message type and len
match tag {
b'Q' => Ok(Some(FeMessage::Query(msg))),
b'P' => Ok(Some(FeParseMessage::parse(msg)?)),
b'D' => Ok(Some(FeDescribeMessage::parse(msg)?)),
b'E' => Ok(Some(FeExecuteMessage::parse(msg)?)),
b'B' => Ok(Some(FeBindMessage::parse(msg)?)),
b'C' => Ok(Some(FeCloseMessage::parse(msg)?)),
b'S' => Ok(Some(FeMessage::Sync)),
b'X' => Ok(Some(FeMessage::Terminate)),
b'd' => Ok(Some(FeMessage::CopyData(msg))),
b'c' => Ok(Some(FeMessage::CopyDone)),
b'f' => Ok(Some(FeMessage::CopyFail)),
b'p' => Ok(Some(FeMessage::PasswordMessage(msg))),
tag => Err(ProtocolError::Protocol(format!(
"unknown message tag: {tag},'{msg:?}'"
))),
}
}
}
impl FeStartupPacket {
/// Read and parse startup message from the `buf` input buffer. It is
/// different from [`FeMessage::parse`] because startup messages don't have
/// message type byte; otherwise, its comments apply.
pub fn parse(buf: &mut BytesMut) -> Result<Option<FeStartupPacket>, ProtocolError> {
/// <https://github.com/postgres/postgres/blob/ca481d3c9ab7bf69ff0c8d71ad3951d407f6a33c/src/include/libpq/pqcomm.h#L118>
const MAX_STARTUP_PACKET_LENGTH: usize = 10000;
const RESERVED_INVALID_MAJOR_VERSION: u16 = 1234;
/// <https://github.com/postgres/postgres/blob/ca481d3c9ab7bf69ff0c8d71ad3951d407f6a33c/src/include/libpq/pqcomm.h#L132>
const CANCEL_REQUEST_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5678);
/// <https://github.com/postgres/postgres/blob/ca481d3c9ab7bf69ff0c8d71ad3951d407f6a33c/src/include/libpq/pqcomm.h#L166>
const NEGOTIATE_SSL_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5679);
/// <https://github.com/postgres/postgres/blob/ca481d3c9ab7bf69ff0c8d71ad3951d407f6a33c/src/include/libpq/pqcomm.h#L167>
const NEGOTIATE_GSS_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5680);
// <https://github.com/postgres/postgres/blob/04bcf9e19a4261fe9c7df37c777592c2e10c32a7/src/backend/tcop/backend_startup.c#L378-L382>
// First byte indicates standard SSL handshake message
// (It can't be a Postgres startup length because in network byte order
// that would be a startup packet hundreds of megabytes long)
if buf.first() == Some(&0x16) {
return Ok(Some(FeStartupPacket::SslRequest { direct: true }));
}
// need at least 4 bytes with packet len
if buf.len() < 4 {
let to_read = 4 - buf.len();
buf.reserve(to_read);
return Ok(None);
}
// We shouldn't advance `buf` as probably full message is not there yet,
// so can't directly use Bytes::get_u32 etc.
let len = (&buf[0..4]).read_u32::<BigEndian>().unwrap() as usize;
// The proposed replacement is `!(8..=MAX_STARTUP_PACKET_LENGTH).contains(&len)`
// which is less readable
#[allow(clippy::manual_range_contains)]
if len < 8 || len > MAX_STARTUP_PACKET_LENGTH {
return Err(ProtocolError::Protocol(format!(
"invalid startup packet message length {len}"
)));
}
if buf.len() < len {
// Don't have full message yet.
let to_read = len - buf.len();
buf.reserve(to_read);
return Ok(None);
}
// got the message, advance buffer
let mut msg = buf.split_to(len).freeze();
msg.advance(4); // consume len
let request_code = ProtocolVersion(msg.get_u32());
// StartupMessage, CancelRequest, SSLRequest etc are differentiated by request code.
let message = match request_code {
CANCEL_REQUEST_CODE => {
if msg.remaining() != 8 {
return Err(ProtocolError::BadMessage(
"CancelRequest message is malformed, backend PID / secret key missing"
.to_owned(),
));
}
FeStartupPacket::CancelRequest(CancelKeyData {
backend_pid: msg.get_i32(),
cancel_key: msg.get_i32(),
})
}
NEGOTIATE_SSL_CODE => {
// Requested upgrade to SSL (aka TLS)
FeStartupPacket::SslRequest { direct: false }
}
NEGOTIATE_GSS_CODE => {
// Requested upgrade to GSSAPI
FeStartupPacket::GssEncRequest
}
version if version.major() == RESERVED_INVALID_MAJOR_VERSION => {
return Err(ProtocolError::Protocol(format!(
"Unrecognized request code {}",
version.minor()
)));
}
// TODO bail if protocol major_version is not 3?
version => {
// StartupMessage
let s = str::from_utf8(&msg).map_err(|_e| {
ProtocolError::BadMessage("StartupMessage params: invalid utf-8".to_owned())
})?;
let s = s.strip_suffix('\0').ok_or_else(|| {
ProtocolError::Protocol(
"StartupMessage params: missing null terminator".to_string(),
)
})?;
FeStartupPacket::StartupMessage {
version,
params: StartupMessageParams {
params: msg.slice_ref(s.as_bytes()),
},
}
}
};
Ok(Some(message))
}
}
impl FeParseMessage {
fn parse(mut buf: Bytes) -> Result<FeMessage, ProtocolError> {
// FIXME: the rust-postgres driver uses a named prepared statement
// for copy_out(). We're not prepared to handle that correctly. For
// now, just ignore the statement name, assuming that the client never
// uses more than one prepared statement at a time.
let _pstmt_name = read_cstr(&mut buf)?;
let query_string = read_cstr(&mut buf)?;
if buf.remaining() < 2 {
return Err(ProtocolError::BadMessage(
"Parse message is malformed, nparams missing".to_string(),
));
}
let nparams = buf.get_i16();
if nparams != 0 {
return Err(ProtocolError::BadMessage(
"query params not implemented".to_string(),
));
}
Ok(FeMessage::Parse(FeParseMessage { query_string }))
}
}
impl FeDescribeMessage {
fn parse(mut buf: Bytes) -> Result<FeMessage, ProtocolError> {
let kind = buf.get_u8();
let _pstmt_name = read_cstr(&mut buf)?;
// FIXME: see FeParseMessage::parse
if kind != b'S' {
return Err(ProtocolError::BadMessage(
"only prepared statemement Describe is implemented".to_string(),
));
}
Ok(FeMessage::Describe(FeDescribeMessage { kind }))
}
}
impl FeExecuteMessage {
fn parse(mut buf: Bytes) -> Result<FeMessage, ProtocolError> {
let portal_name = read_cstr(&mut buf)?;
if buf.remaining() < 4 {
return Err(ProtocolError::BadMessage(
"FeExecuteMessage message is malformed, maxrows missing".to_string(),
));
}
let maxrows = buf.get_i32();
if !portal_name.is_empty() {
return Err(ProtocolError::BadMessage(
"named portals not implemented".to_string(),
));
}
if maxrows != 0 {
return Err(ProtocolError::BadMessage(
"row limit in Execute message not implemented".to_string(),
));
}
Ok(FeMessage::Execute(FeExecuteMessage { maxrows }))
}
}
impl FeBindMessage {
fn parse(mut buf: Bytes) -> Result<FeMessage, ProtocolError> {
let portal_name = read_cstr(&mut buf)?;
let _pstmt_name = read_cstr(&mut buf)?;
// FIXME: see FeParseMessage::parse
if !portal_name.is_empty() {
return Err(ProtocolError::BadMessage(
"named portals not implemented".to_string(),
));
}
Ok(FeMessage::Bind(FeBindMessage))
}
}
impl FeCloseMessage {
fn parse(mut buf: Bytes) -> Result<FeMessage, ProtocolError> {
let _kind = buf.get_u8();
let _pstmt_or_portal_name = read_cstr(&mut buf)?;
// FIXME: we do nothing with Close
Ok(FeMessage::Close(FeCloseMessage))
}
}
// Backend
#[derive(Debug)]
pub enum BeMessage<'a> {
AuthenticationOk,
AuthenticationMD5Password([u8; 4]),
AuthenticationSasl(BeAuthenticationSaslMessage<'a>),
AuthenticationCleartextPassword,
BackendKeyData(CancelKeyData),
BindComplete,
CommandComplete(&'a [u8]),
CopyData(&'a [u8]),
CopyDone,
CopyFail,
CopyInResponse,
CopyOutResponse,
CopyBothResponse,
CloseComplete,
// None means column is NULL
DataRow(&'a [Option<&'a [u8]>]),
// None errcode means internal_error will be sent.
ErrorResponse(&'a str, Option<&'a [u8; 5]>),
/// Single byte - used in response to SSLRequest/GSSENCRequest.
EncryptionResponse(bool),
NoData,
ParameterDescription,
ParameterStatus {
name: &'a [u8],
value: &'a [u8],
},
ParseComplete,
ReadyForQuery,
RowDescription(&'a [RowDescriptor<'a>]),
XLogData(XLogDataBody<'a>),
NoticeResponse(&'a str),
NegotiateProtocolVersion {
version: ProtocolVersion,
options: &'a [&'a str],
},
KeepAlive(WalSndKeepAlive),
/// Batch of interpreted, shard filtered WAL records,
/// ready for the pageserver to ingest
InterpretedWalRecords(InterpretedWalRecordsBody<'a>),
Raw(u8, &'a [u8]),
}
/// Common shorthands.
impl<'a> BeMessage<'a> {
/// A [`BeMessage::ParameterStatus`] holding the client encoding, i.e. UTF-8.
/// This is a sensible default, given that:
/// * rust strings only support this encoding out of the box.
/// * tokio-postgres, postgres-jdbc (and probably more) mandate it.
///
/// TODO: do we need to report `server_encoding` as well?
pub const CLIENT_ENCODING: Self = Self::ParameterStatus {
name: b"client_encoding",
value: b"UTF8",
};
pub const INTEGER_DATETIMES: Self = Self::ParameterStatus {
name: b"integer_datetimes",
value: b"on",
};
/// Build a [`BeMessage::ParameterStatus`] holding the server version.
pub fn server_version(version: &'a str) -> Self {
Self::ParameterStatus {
name: b"server_version",
value: version.as_bytes(),
}
}
}
#[derive(Debug)]
pub enum BeAuthenticationSaslMessage<'a> {
Methods(&'a [&'a str]),
Continue(&'a [u8]),
Final(&'a [u8]),
}
#[derive(Debug)]
pub enum BeParameterStatusMessage<'a> {
Encoding(&'a str),
ServerVersion(&'a str),
}
// One row description in RowDescription packet.
#[derive(Debug)]
pub struct RowDescriptor<'a> {
pub name: &'a [u8],
pub tableoid: Oid,
pub attnum: i16,
pub typoid: Oid,
pub typlen: i16,
pub typmod: i32,
pub formatcode: i16,
}
impl Default for RowDescriptor<'_> {
fn default() -> RowDescriptor<'static> {
RowDescriptor {
name: b"",
tableoid: 0,
attnum: 0,
typoid: 0,
typlen: 0,
typmod: 0,
formatcode: 0,
}
}
}
impl RowDescriptor<'_> {
/// Convenience function to create a RowDescriptor message for an int8 column
pub const fn int8_col(name: &[u8]) -> RowDescriptor {
RowDescriptor {
name,
tableoid: 0,
attnum: 0,
typoid: INT8_OID,
typlen: 8,
typmod: 0,
formatcode: 0,
}
}
pub const fn text_col(name: &[u8]) -> RowDescriptor {
RowDescriptor {
name,
tableoid: 0,
attnum: 0,
typoid: TEXT_OID,
typlen: -1,
typmod: 0,
formatcode: 0,
}
}
}
#[derive(Debug)]
pub struct XLogDataBody<'a> {
pub wal_start: u64,
pub wal_end: u64, // current end of WAL on the server
pub timestamp: i64,
pub data: &'a [u8],
}
#[derive(Debug)]
pub struct WalSndKeepAlive {
pub wal_end: u64, // current end of WAL on the server
pub timestamp: i64,
pub request_reply: bool,
}
/// Batch of interpreted WAL records used in the interpreted
/// safekeeper to pageserver protocol.
///
/// Note that the pageserver uses the RawInterpretedWalRecordsBody
/// counterpart of this from the neondatabase/rust-postgres repo.
/// If you're changing this struct, you likely need to change its
/// twin as well.
#[derive(Debug)]
pub struct InterpretedWalRecordsBody<'a> {
/// End of raw WAL in [`Self::data`]
pub streaming_lsn: u64,
/// Current end of WAL on the server
pub commit_lsn: u64,
pub data: &'a [u8],
}
pub static HELLO_WORLD_ROW: BeMessage = BeMessage::DataRow(&[Some(b"hello world")]);
// single text column
pub static SINGLE_COL_ROWDESC: BeMessage = BeMessage::RowDescription(&[RowDescriptor {
name: b"data",
tableoid: 0,
attnum: 0,
typoid: TEXT_OID,
typlen: -1,
typmod: 0,
formatcode: 0,
}]);
/// Call f() to write body of the message and prepend it with 4-byte len as
/// prescribed by the protocol.
fn write_body<R>(buf: &mut BytesMut, f: impl FnOnce(&mut BytesMut) -> R) -> R {
let base = buf.len();
buf.extend_from_slice(&[0; 4]);
let res = f(buf);
let size = i32::try_from(buf.len() - base).expect("message too big to transmit");
(&mut buf[base..]).put_slice(&size.to_be_bytes());
res
}
/// Safe write of s into buf as cstring (String in the protocol).
fn write_cstr(s: impl AsRef<[u8]>, buf: &mut BytesMut) -> Result<(), ProtocolError> {
let bytes = s.as_ref();
if bytes.contains(&0) {
return Err(ProtocolError::BadMessage(
"string contains embedded null".to_owned(),
));
}
buf.put_slice(bytes);
buf.put_u8(0);
Ok(())
}
/// Read cstring from buf, advancing it.
pub fn read_cstr(buf: &mut Bytes) -> Result<Bytes, ProtocolError> {
let pos = buf
.iter()
.position(|x| *x == 0)
.ok_or_else(|| ProtocolError::BadMessage("missing cstring terminator".to_owned()))?;
let result = buf.split_to(pos);
buf.advance(1); // drop the null terminator
Ok(result)
}
pub const SQLSTATE_INTERNAL_ERROR: &[u8; 5] = b"XX000";
pub const SQLSTATE_ADMIN_SHUTDOWN: &[u8; 5] = b"57P01";
pub const SQLSTATE_SUCCESSFUL_COMPLETION: &[u8; 5] = b"00000";
impl BeMessage<'_> {
/// Serialize `message` to the given `buf`.
/// Apart from smart memory managemet, BytesMut is good here as msg len
/// precedes its body and it is handy to write it down first and then fill
/// the length. With Write we would have to either calc it manually or have
/// one more buffer.
pub fn write(buf: &mut BytesMut, message: &BeMessage) -> Result<(), ProtocolError> {
match message {
BeMessage::Raw(code, data) => {
buf.put_u8(*code);
write_body(buf, |b| b.put_slice(data))
}
BeMessage::AuthenticationOk => {
buf.put_u8(b'R');
write_body(buf, |buf| {
buf.put_i32(0); // Specifies that the authentication was successful.
});
}
BeMessage::AuthenticationCleartextPassword => {
buf.put_u8(b'R');
write_body(buf, |buf| {
buf.put_i32(3); // Specifies that clear text password is required.
});
}
BeMessage::AuthenticationMD5Password(salt) => {
buf.put_u8(b'R');
write_body(buf, |buf| {
buf.put_i32(5); // Specifies that an MD5-encrypted password is required.
buf.put_slice(&salt[..]);
});
}
BeMessage::AuthenticationSasl(msg) => {
buf.put_u8(b'R');
write_body(buf, |buf| {
use BeAuthenticationSaslMessage::*;
match msg {
Methods(methods) => {
buf.put_i32(10); // Specifies that SASL auth method is used.
for method in methods.iter() {
write_cstr(method, buf)?;
}
buf.put_u8(0); // zero terminator for the list
}
Continue(extra) => {
buf.put_i32(11); // Continue SASL auth.
buf.put_slice(extra);
}
Final(extra) => {
buf.put_i32(12); // Send final SASL message.
buf.put_slice(extra);
}
}
Ok(())
})?;
}
BeMessage::BackendKeyData(key_data) => {
buf.put_u8(b'K');
write_body(buf, |buf| {
buf.put_i32(key_data.backend_pid);
buf.put_i32(key_data.cancel_key);
});
}
BeMessage::BindComplete => {
buf.put_u8(b'2');
write_body(buf, |_| {});
}
BeMessage::CloseComplete => {
buf.put_u8(b'3');
write_body(buf, |_| {});
}
BeMessage::CommandComplete(cmd) => {
buf.put_u8(b'C');
write_body(buf, |buf| write_cstr(cmd, buf))?;
}
BeMessage::CopyData(data) => {
buf.put_u8(b'd');
write_body(buf, |buf| {
buf.put_slice(data);
});
}
BeMessage::CopyDone => {
buf.put_u8(b'c');
write_body(buf, |_| {});
}
BeMessage::CopyFail => {
buf.put_u8(b'f');
write_body(buf, |_| {});
}
BeMessage::CopyInResponse => {
buf.put_u8(b'G');
write_body(buf, |buf| {
buf.put_u8(1); // copy_is_binary
buf.put_i16(0); // numAttributes
});
}
BeMessage::CopyOutResponse => {
buf.put_u8(b'H');
write_body(buf, |buf| {
buf.put_u8(0); // copy_is_binary
buf.put_i16(0); // numAttributes
});
}
BeMessage::CopyBothResponse => {
buf.put_u8(b'W');
write_body(buf, |buf| {
// doesn't matter, used only for replication
buf.put_u8(0); // copy_is_binary
buf.put_i16(0); // numAttributes
});
}
BeMessage::DataRow(vals) => {
buf.put_u8(b'D');
write_body(buf, |buf| {
buf.put_u16(vals.len() as u16); // num of cols
for val_opt in vals.iter() {
if let Some(val) = val_opt {
buf.put_u32(val.len() as u32);
buf.put_slice(val);
} else {
buf.put_i32(-1);
}
}
});
}
// ErrorResponse is a zero-terminated array of zero-terminated fields.
// First byte of each field represents type of this field. Set just enough fields
// to satisfy rust-postgres client: 'S' -- severity, 'C' -- error, 'M' -- error
// message text.
BeMessage::ErrorResponse(error_msg, pg_error_code) => {
// 'E' signalizes ErrorResponse messages
buf.put_u8(b'E');
write_body(buf, |buf| {
buf.put_u8(b'S'); // severity
buf.put_slice(b"ERROR\0");
buf.put_u8(b'C'); // SQLSTATE error code
buf.put_slice(&terminate_code(
pg_error_code.unwrap_or(SQLSTATE_INTERNAL_ERROR),
));
buf.put_u8(b'M'); // the message
write_cstr(error_msg, buf)?;
buf.put_u8(0); // terminator
Ok(())
})?;
}
// NoticeResponse has the same format as ErrorResponse. From doc: "The frontend should display the
// message but continue listening for ReadyForQuery or ErrorResponse"
BeMessage::NoticeResponse(error_msg) => {
// For all the errors set Severity to Error and error code to
// 'internal error'.
// 'N' signalizes NoticeResponse messages
buf.put_u8(b'N');
write_body(buf, |buf| {
buf.put_u8(b'S'); // severity
buf.put_slice(b"NOTICE\0");
buf.put_u8(b'C'); // SQLSTATE error code
buf.put_slice(&terminate_code(SQLSTATE_INTERNAL_ERROR));
buf.put_u8(b'M'); // the message
write_cstr(error_msg.as_bytes(), buf)?;
buf.put_u8(0); // terminator
Ok(())
})?;
}
BeMessage::NoData => {
buf.put_u8(b'n');
write_body(buf, |_| {});
}
BeMessage::EncryptionResponse(should_negotiate) => {
let response = if *should_negotiate { b'S' } else { b'N' };
buf.put_u8(response);
}
BeMessage::ParameterStatus { name, value } => {
buf.put_u8(b'S');
write_body(buf, |buf| {
write_cstr(name, buf)?;
write_cstr(value, buf)
})?;
}
BeMessage::ParameterDescription => {
buf.put_u8(b't');
write_body(buf, |buf| {
// we don't support params, so always 0
buf.put_i16(0);
});
}
BeMessage::ParseComplete => {
buf.put_u8(b'1');
write_body(buf, |_| {});
}
BeMessage::ReadyForQuery => {
buf.put_u8(b'Z');
write_body(buf, |buf| {
buf.put_u8(b'I');
});
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/pq_proto/src/framed.rs | libs/pq_proto/src/framed.rs | //! Provides `Framed` -- writing/flushing and reading Postgres messages to/from
//! the async stream based on (and buffered with) BytesMut. All functions are
//! cancellation safe.
//!
//! It is similar to what tokio_util::codec::Framed with appropriate codec
//! provides, but `FramedReader` and `FramedWriter` read/write parts can be used
//! separately without using split from futures::stream::StreamExt (which
//! allocates a [Box] in polling internally). tokio::io::split is used for splitting
//! instead. Plus we customize error messages more than a single type for all io
//! calls.
//!
//! [Box]: https://docs.rs/futures-util/0.3.26/src/futures_util/lock/bilock.rs.html#107
use std::future::Future;
use std::io::{self, ErrorKind};
use bytes::{Buf, BytesMut};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf};
use crate::{BeMessage, FeMessage, FeStartupPacket, ProtocolError};
const INITIAL_CAPACITY: usize = 8 * 1024;
/// Error on postgres connection: either IO (physical transport error) or
/// protocol violation.
#[derive(thiserror::Error, Debug)]
pub enum ConnectionError {
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
Protocol(#[from] ProtocolError),
}
impl ConnectionError {
/// Proxy stream.rs uses only io::Error; provide it.
pub fn into_io_error(self) -> io::Error {
match self {
ConnectionError::Io(io) => io,
ConnectionError::Protocol(pe) => io::Error::other(pe.to_string()),
}
}
}
/// Wraps async io `stream`, providing messages to write/flush + read Postgres
/// messages.
pub struct Framed<S> {
pub stream: S,
pub read_buf: BytesMut,
pub write_buf: BytesMut,
}
impl<S> Framed<S> {
pub fn new(stream: S) -> Self {
Self {
stream,
read_buf: BytesMut::with_capacity(INITIAL_CAPACITY),
write_buf: BytesMut::with_capacity(INITIAL_CAPACITY),
}
}
/// Get a shared reference to the underlying stream.
pub fn get_ref(&self) -> &S {
&self.stream
}
/// Deconstruct into the underlying stream and read buffer.
pub fn into_inner(self) -> (S, BytesMut) {
(self.stream, self.read_buf)
}
/// Return new Framed with stream type transformed by async f, for TLS
/// upgrade.
pub async fn map_stream<S2, E, F, Fut>(self, f: F) -> Result<Framed<S2>, E>
where
F: FnOnce(S) -> Fut,
Fut: Future<Output = Result<S2, E>>,
{
let stream = f(self.stream).await?;
Ok(Framed {
stream,
read_buf: self.read_buf,
write_buf: self.write_buf,
})
}
}
impl<S: AsyncRead + Unpin> Framed<S> {
pub async fn read_startup_message(
&mut self,
) -> Result<Option<FeStartupPacket>, ConnectionError> {
read_message(&mut self.stream, &mut self.read_buf, FeStartupPacket::parse).await
}
pub async fn read_message(&mut self) -> Result<Option<FeMessage>, ConnectionError> {
read_message(&mut self.stream, &mut self.read_buf, FeMessage::parse).await
}
}
impl<S: AsyncWrite + Unpin> Framed<S> {
/// Write next message to the output buffer; doesn't flush.
pub fn write_message(&mut self, msg: &BeMessage<'_>) -> Result<(), ProtocolError> {
BeMessage::write(&mut self.write_buf, msg)
}
/// Flush out the buffer. This function is cancellation safe: it can be
/// interrupted and flushing will be continued in the next call.
pub async fn flush(&mut self) -> Result<(), io::Error> {
flush(&mut self.stream, &mut self.write_buf).await
}
/// Flush out the buffer and shutdown the stream.
pub async fn shutdown(&mut self) -> Result<(), io::Error> {
shutdown(&mut self.stream, &mut self.write_buf).await
}
}
impl<S: AsyncRead + AsyncWrite + Unpin> Framed<S> {
/// Split into owned read and write parts. Beware of potential issues with
/// using halves in different tasks on TLS stream:
/// <https://github.com/tokio-rs/tls/issues/40>
pub fn split(self) -> (FramedReader<S>, FramedWriter<S>) {
let (read_half, write_half) = tokio::io::split(self.stream);
let reader = FramedReader {
stream: read_half,
read_buf: self.read_buf,
};
let writer = FramedWriter {
stream: write_half,
write_buf: self.write_buf,
};
(reader, writer)
}
/// Join read and write parts back.
pub fn unsplit(reader: FramedReader<S>, writer: FramedWriter<S>) -> Self {
Self {
stream: reader.stream.unsplit(writer.stream),
read_buf: reader.read_buf,
write_buf: writer.write_buf,
}
}
}
/// Read-only version of `Framed`.
pub struct FramedReader<S> {
stream: ReadHalf<S>,
read_buf: BytesMut,
}
impl<S: AsyncRead + Unpin> FramedReader<S> {
pub async fn read_message(&mut self) -> Result<Option<FeMessage>, ConnectionError> {
read_message(&mut self.stream, &mut self.read_buf, FeMessage::parse).await
}
}
/// Write-only version of `Framed`.
pub struct FramedWriter<S> {
stream: WriteHalf<S>,
write_buf: BytesMut,
}
impl<S: AsyncWrite + Unpin> FramedWriter<S> {
/// Write next message to the output buffer; doesn't flush.
pub fn write_message_noflush(&mut self, msg: &BeMessage<'_>) -> Result<(), ProtocolError> {
BeMessage::write(&mut self.write_buf, msg)
}
/// Flush out the buffer. This function is cancellation safe: it can be
/// interrupted and flushing will be continued in the next call.
pub async fn flush(&mut self) -> Result<(), io::Error> {
flush(&mut self.stream, &mut self.write_buf).await
}
/// Flush out the buffer and shutdown the stream.
pub async fn shutdown(&mut self) -> Result<(), io::Error> {
shutdown(&mut self.stream, &mut self.write_buf).await
}
}
/// Read next message from the stream. Returns Ok(None), if EOF happened and we
/// don't have remaining data in the buffer. This function is cancellation safe:
/// you can drop future which is not yet complete and finalize reading message
/// with the next call.
///
/// Parametrized to allow reading startup or usual message, having different
/// format.
async fn read_message<S: AsyncRead + Unpin, M, P>(
stream: &mut S,
read_buf: &mut BytesMut,
parse: P,
) -> Result<Option<M>, ConnectionError>
where
P: Fn(&mut BytesMut) -> Result<Option<M>, ProtocolError>,
{
loop {
if let Some(msg) = parse(read_buf)? {
return Ok(Some(msg));
}
// If we can't build a frame yet, try to read more data and try again.
// Make sure we've got room for at least one byte to read to ensure
// that we don't get a spurious 0 that looks like EOF.
read_buf.reserve(1);
if stream.read_buf(read_buf).await? == 0 {
if read_buf.has_remaining() {
return Err(io::Error::new(
ErrorKind::UnexpectedEof,
"EOF with unprocessed data in the buffer",
)
.into());
} else {
return Ok(None); // clean EOF
}
}
}
}
/// Cancellation safe as long as the AsyncWrite is cancellation safe.
async fn flush<S: AsyncWrite + Unpin>(
stream: &mut S,
write_buf: &mut BytesMut,
) -> Result<(), io::Error> {
while write_buf.has_remaining() {
let bytes_written = stream.write_buf(write_buf).await?;
if bytes_written == 0 {
return Err(io::Error::new(
ErrorKind::WriteZero,
"failed to write message",
));
}
}
stream.flush().await
}
/// Cancellation safe as long as the AsyncWrite is cancellation safe.
async fn shutdown<S: AsyncWrite + Unpin>(
stream: &mut S,
write_buf: &mut BytesMut,
) -> Result<(), io::Error> {
flush(stream, write_buf).await?;
stream.shutdown().await
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/failpoints.rs | libs/http-utils/src/failpoints.rs | use hyper::{Body, Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use tokio_util::sync::CancellationToken;
use utils::failpoint_support::apply_failpoint;
use crate::error::ApiError;
use crate::json::{json_request, json_response};
pub type ConfigureFailpointsRequest = Vec<FailpointConfig>;
/// Information for configuring a single fail point
#[derive(Debug, Serialize, Deserialize)]
pub struct FailpointConfig {
/// Name of the fail point
pub name: String,
/// List of actions to take, using the format described in `fail::cfg`
///
/// We also support `actions = "exit"` to cause the fail point to immediately exit.
pub actions: String,
}
/// Configure failpoints through http.
pub async fn failpoints_handler(
mut request: Request<Body>,
_cancel: CancellationToken,
) -> Result<Response<Body>, ApiError> {
if !fail::has_failpoints() {
return Err(ApiError::BadRequest(anyhow::anyhow!(
"Cannot manage failpoints because neon was compiled without failpoints support"
)));
}
let failpoints: ConfigureFailpointsRequest = json_request(&mut request).await?;
for fp in failpoints {
tracing::info!("cfg failpoint: {} {}", fp.name, fp.actions);
// We recognize one extra "action" that's not natively recognized
// by the failpoints crate: exit, to immediately kill the process
let cfg_result = apply_failpoint(&fp.name, &fp.actions);
if let Err(err_msg) = cfg_result {
return Err(ApiError::BadRequest(anyhow::anyhow!(
"Failed to configure failpoints: {err_msg}"
)));
}
}
json_response(StatusCode::OK, ())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/libs/http-utils/src/lib.rs | libs/http-utils/src/lib.rs | pub mod endpoint;
pub mod error;
pub mod failpoints;
pub mod json;
pub mod request;
pub mod server;
pub mod tls_certs;
extern crate hyper0 as hyper;
/// Current fast way to apply simple http routing in various Neon binaries.
/// Re-exported for sake of uniform approach, that could be later replaced with better alternatives, if needed.
pub use routerify::{RequestServiceBuilder, RouterBuilder, RouterService, ext::RequestExt};
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.