repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
atuinsh/atuin | https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/shell/fish.rs | crates/atuin-dotfiles/src/shell/fish.rs | // Configuration for fish
use std::path::PathBuf;
use crate::store::{AliasStore, var::VarStore};
async fn cached_aliases(path: PathBuf, store: &AliasStore) -> String {
match tokio::fs::read_to_string(path).await {
Ok(aliases) => aliases,
Err(r) => {
// we failed to read the file for some reason, but the file does exist
// fallback to generating new aliases on the fly
store.posix().await.unwrap_or_else(|e| {
format!("echo 'Atuin: failed to read and generate aliases: \n{r}\n{e}'",)
})
}
}
}
async fn cached_vars(path: PathBuf, store: &VarStore) -> String {
match tokio::fs::read_to_string(path).await {
Ok(vars) => vars,
Err(r) => {
// we failed to read the file for some reason, but the file does exist
// fallback to generating new vars on the fly
store.posix().await.unwrap_or_else(|e| {
format!("echo 'Atuin: failed to read and generate vars: \n{r}\n{e}'",)
})
}
}
}
/// Return fish dotfile config
///
/// Do not return an error. We should not prevent the shell from starting.
///
/// In the worst case, Atuin should not function but the shell should start correctly.
///
/// While currently this only returns aliases, it will be extended to also return other synced dotfiles
pub async fn alias_config(store: &AliasStore) -> String {
// First try to read the cached config
let aliases = atuin_common::utils::dotfiles_cache_dir().join("aliases.fish");
if aliases.exists() {
return cached_aliases(aliases, store).await;
}
if let Err(e) = store.build().await {
return format!("echo 'Atuin: failed to generate aliases: {e}'");
}
cached_aliases(aliases, store).await
}
pub async fn var_config(store: &VarStore) -> String {
// First try to read the cached config
let vars = atuin_common::utils::dotfiles_cache_dir().join("vars.fish");
if vars.exists() {
return cached_vars(vars, store).await;
}
if let Err(e) = store.build().await {
return format!("echo 'Atuin: failed to generate vars: {e}'");
}
cached_vars(vars, store).await
}
| rust | MIT | 8a010fed33ce19a9ddc589196c73c07ba7ba88e7 | 2026-01-04T15:36:14.139439Z | false |
atuinsh/atuin | https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/shell/powershell.rs | crates/atuin-dotfiles/src/shell/powershell.rs | use crate::shell::{Alias, Var};
use crate::store::{AliasStore, var::VarStore};
use std::path::PathBuf;
async fn cached_aliases(path: PathBuf, store: &AliasStore) -> String {
match tokio::fs::read_to_string(path).await {
Ok(aliases) => aliases,
Err(r) => {
// we failed to read the file for some reason, but the file does exist
// fallback to generating new aliases on the fly
store.powershell().await.unwrap_or_else(|e| {
format!("echo 'Atuin: failed to read and generate aliases: \n{r}\n{e}'",)
})
}
}
}
async fn cached_vars(path: PathBuf, store: &VarStore) -> String {
match tokio::fs::read_to_string(path).await {
Ok(vars) => vars,
Err(r) => {
// we failed to read the file for some reason, but the file does exist
// fallback to generating new vars on the fly
store.powershell().await.unwrap_or_else(|e| {
format!("echo 'Atuin: failed to read and generate vars: \n{r}\n{e}'",)
})
}
}
}
/// Return powershell dotfile config
///
/// Do not return an error. We should not prevent the shell from starting.
///
/// In the worst case, Atuin should not function but the shell should start correctly.
///
/// While currently this only returns aliases, it will be extended to also return other synced dotfiles
pub async fn alias_config(store: &AliasStore) -> String {
// First try to read the cached config
let aliases = atuin_common::utils::dotfiles_cache_dir().join("aliases.ps1");
if aliases.exists() {
return cached_aliases(aliases, store).await;
}
if let Err(e) = store.build().await {
return format!("echo 'Atuin: failed to generate aliases: {e}'");
}
cached_aliases(aliases, store).await
}
pub async fn var_config(store: &VarStore) -> String {
// First try to read the cached config
let vars = atuin_common::utils::dotfiles_cache_dir().join("vars.ps1");
if vars.exists() {
return cached_vars(vars, store).await;
}
if let Err(e) = store.build().await {
return format!("echo 'Atuin: failed to generate vars: {e}'");
}
cached_vars(vars, store).await
}
pub fn format_alias(alias: &Alias) -> String {
// Set-Alias doesn't support adding implicit arguments, so use a function.
// See https://github.com/PowerShell/PowerShell/issues/12962
let mut result = secure_command(&format!(
"function {} {{\n {}{} @args\n}}",
alias.name,
if alias.value.starts_with(['"', '\'']) {
"& "
} else {
""
},
alias.value
));
// This makes the file layout prettier
result.insert(0, '\n');
result
}
pub fn format_var(var: &Var) -> String {
secure_command(&format!(
"${}{} = '{}'",
if var.export { "env:" } else { "" },
var.name,
var.value.replace("'", "''")
))
}
/// Wraps the given command in an Invoke-Expression to ensure the outer script is not halted
/// if the inner command contains a syntax error.
fn secure_command(command: &str) -> String {
format!(
"Invoke-Expression -ErrorAction Continue -Command '{}'\n",
command.replace("'", "''")
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn aliases() {
assert_eq!(
format_alias(&Alias {
name: "gp".to_string(),
value: "git push".to_string(),
}),
"\n".to_string()
+ &secure_command(
"function gp {
git push @args
}"
)
);
assert_eq!(
format_alias(&Alias {
name: "spc".to_string(),
value: "\"path with spaces\" arg".to_string(),
}),
"\n".to_string()
+ &secure_command(
"function spc {
& \"path with spaces\" arg @args
}"
)
);
}
#[test]
fn vars() {
assert_eq!(
format_var(&Var {
name: "FOO".to_owned(),
value: "bar 'baz'".to_owned(),
export: true,
}),
secure_command("$env:FOO = 'bar ''baz'''")
);
assert_eq!(
format_var(&Var {
name: "TEST".to_owned(),
value: "1".to_owned(),
export: false,
}),
secure_command("$TEST = '1'")
);
}
#[test]
fn invoke_expression() {
assert_eq!(
secure_command("echo 'foo'"),
"Invoke-Expression -ErrorAction Continue -Command 'echo ''foo'''\n"
)
}
}
| rust | MIT | 8a010fed33ce19a9ddc589196c73c07ba7ba88e7 | 2026-01-04T15:36:14.139439Z | false |
atuinsh/atuin | https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-kv/src/lib.rs | crates/atuin-kv/src/lib.rs | pub mod database;
pub mod store;
| rust | MIT | 8a010fed33ce19a9ddc589196c73c07ba7ba88e7 | 2026-01-04T15:36:14.139439Z | false |
atuinsh/atuin | https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-kv/src/store.rs | crates/atuin-kv/src/store.rs | use std::collections::HashSet;
use eyre::{Result, bail};
use atuin_client::record::sqlite_store::SqliteStore;
use atuin_client::record::{encryption::PASETO_V4, store::Store};
use atuin_common::record::{Host, HostId, Record, RecordId, RecordIdx};
use entry::KvEntry;
use record::{KV_TAG, KV_VERSION, KvRecord};
use crate::database::Database;
pub mod entry;
pub mod record;
#[derive(Debug, Clone)]
pub struct KvStore {
pub record_store: SqliteStore,
pub kv_db: Database,
pub host_id: HostId,
pub encryption_key: [u8; 32],
}
impl KvStore {
pub fn new(
record_store: SqliteStore,
kv_db: Database,
host_id: HostId,
encryption_key: [u8; 32],
) -> Self {
KvStore {
record_store,
kv_db,
host_id,
encryption_key,
}
}
pub async fn set(&self, namespace: &str, key: &str, value: &str) -> Result<()> {
let kv_record = KvRecord::builder()
.namespace(namespace.to_string())
.key(key.to_string())
.value(Some(value.to_string()))
.build();
self.push_record(kv_record).await?;
let kv = KvEntry::builder()
.namespace(namespace.to_string())
.key(key.to_string())
.value(value.to_string())
.build();
self.kv_db.save(&kv).await?;
Ok(())
}
pub async fn get(&self, namespace: &str, key: &str) -> Result<Option<String>> {
let kv = self.kv_db.load(namespace, key).await?;
Ok(kv.map(|kv| kv.value))
}
pub async fn delete(&self, namespace: &str, keys: &[String]) -> Result<()> {
for key in keys {
let record = KvRecord::builder()
.namespace(namespace.to_string())
.key(key.to_string())
.value(None)
.build();
self.push_record(record).await?;
self.kv_db.delete(namespace, key).await?;
}
Ok(())
}
pub async fn list(&self, namespace: Option<&str>) -> Result<Vec<KvEntry>> {
let entries = self.kv_db.list(namespace).await?;
Ok(entries)
}
async fn push_record(&self, record: KvRecord) -> Result<(RecordId, RecordIdx)> {
let bytes = record.serialize()?;
let idx = self
.record_store
.last(self.host_id, KV_TAG)
.await?
.map_or(0, |p| p.idx + 1);
let record = Record::builder()
.host(Host::new(self.host_id))
.version(KV_VERSION.to_string())
.tag(KV_TAG.to_string())
.idx(idx)
.data(bytes)
.build();
let id = record.id;
self.record_store
.push(&record.encrypt::<PASETO_V4>(&self.encryption_key))
.await?;
Ok((id, idx))
}
pub async fn build(&self) -> Result<()> {
let mut tagged = self.record_store.all_tagged(KV_TAG).await?;
tagged.reverse();
let cached = self.kv_db.list(None).await?;
let mut visited = HashSet::new();
// Iterate through all KV records from newest to oldest;
// only visit each KV once, inserting or deleting based on the first time we see it
for record in tagged {
let decrypted = match record.version.as_str() {
"v0" | KV_VERSION => record.decrypt::<PASETO_V4>(&self.encryption_key)?,
version => bail!("unknown version {version:?}"),
};
let kv = KvRecord::deserialize(&decrypted.data, &decrypted.version)?;
let uniq_id = format!("{}.{}", kv.namespace, kv.key);
if visited.insert(uniq_id) {
match kv.value {
Some(value) => {
self.kv_db
.save(
&KvEntry::builder()
.namespace(kv.namespace.clone())
.key(kv.key.clone())
.value(value)
.build(),
)
.await?;
}
None => {
self.kv_db
.delete(kv.namespace.as_str(), kv.key.as_str())
.await?;
}
}
}
}
// Any KVs that were in the cache but not in the tagged list should be deleted;
// this should never happen in practice since the cache is always built from the tagged list,
// but just in case because ** S O F T W A R E **
for kv in cached {
if !visited.contains(&format!("{}.{}", kv.namespace, kv.key)) {
self.kv_db
.delete(kv.namespace.as_str(), kv.key.as_str())
.await?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
async fn setup() -> Result<KvStore> {
let record_store = SqliteStore::new("sqlite::memory:", 1.0).await.unwrap();
let kv_db = Database::new("sqlite::memory:", 1.0).await.unwrap();
let host_id = atuin_common::record::HostId(atuin_common::utils::uuid_v7());
let encryption_key = [0; 32];
Ok(KvStore::new(record_store, kv_db, host_id, encryption_key))
}
#[tokio::test]
async fn test_kv_store() -> Result<()> {
let store = setup().await?;
store.set("test", "key", "value").await.unwrap();
let value = store.get("test", "key").await.unwrap();
assert_eq!(value, Some("value".to_string()));
let records = store.record_store.all_tagged(KV_TAG).await?;
assert_eq!(records.len(), 1);
let list = store.list(Some("test")).await.unwrap();
let expected = vec![
KvEntry::builder()
.namespace("test".to_string())
.key("key".to_string())
.value("value".to_string())
.build(),
];
assert_eq!(list, expected);
let ns_list = store.list(None).await.unwrap();
assert_eq!(ns_list, expected);
store.delete("test", &["key".to_string()]).await.unwrap();
let value = store.get("test", "key").await.unwrap();
assert_eq!(value, None);
let records = store.record_store.all_tagged(KV_TAG).await?;
assert_eq!(records.len(), 2);
Ok(())
}
}
| rust | MIT | 8a010fed33ce19a9ddc589196c73c07ba7ba88e7 | 2026-01-04T15:36:14.139439Z | false |
atuinsh/atuin | https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-kv/src/database.rs | crates/atuin-kv/src/database.rs | use std::{path::Path, str::FromStr, time::Duration};
use atuin_common::utils;
use sqlx::{
Result, Row,
sqlite::{
SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqlitePoolOptions, SqliteRow,
SqliteSynchronous,
},
};
use tokio::fs;
use tracing::debug;
use crate::store::entry::KvEntry;
#[derive(Debug, Clone)]
pub struct Database {
pub pool: SqlitePool,
}
impl Database {
pub async fn new(path: impl AsRef<Path>, timeout: f64) -> Result<Self> {
let path = path.as_ref();
debug!("opening KV sqlite database at {:?}", path);
if utils::broken_symlink(path) {
eprintln!(
"Atuin: KV sqlite db path ({path:?}) is a broken symlink. Unable to read or create replacement."
);
std::process::exit(1);
}
if !path.exists()
&& let Some(dir) = path.parent()
{
fs::create_dir_all(dir).await?;
}
let opts = SqliteConnectOptions::from_str(path.as_os_str().to_str().unwrap())?
.journal_mode(SqliteJournalMode::Wal)
.optimize_on_close(true, None)
.synchronous(SqliteSynchronous::Normal)
.with_regexp()
.foreign_keys(true)
.create_if_missing(true);
let pool = SqlitePoolOptions::new()
.acquire_timeout(Duration::from_secs_f64(timeout))
.connect_with(opts)
.await?;
Self::setup_db(&pool).await?;
Ok(Self { pool })
}
pub async fn sqlite_version(&self) -> Result<String> {
sqlx::query_scalar("SELECT sqlite_version()")
.fetch_one(&self.pool)
.await
}
async fn setup_db(pool: &SqlitePool) -> Result<()> {
debug!("running sqlite database setup");
sqlx::migrate!("./migrations").run(pool).await?;
Ok(())
}
async fn save_raw(tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>, e: &KvEntry) -> Result<()> {
sqlx::query(
"insert into kv(namespace, key, value)
values(?1, ?2, ?3)
on conflict(namespace, key) do update set
namespace = excluded.namespace,
key = excluded.key,
value = excluded.value",
)
.bind(e.namespace.as_str())
.bind(e.key.as_str())
.bind(e.value.as_str())
.execute(&mut **tx)
.await?;
Ok(())
}
async fn delete_raw(
tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>,
namespace: &str,
key: &str,
) -> Result<()> {
sqlx::query("delete from kv where namespace = ?1 and key = ?2")
.bind(namespace)
.bind(key)
.execute(&mut **tx)
.await?;
Ok(())
}
pub async fn save(&self, e: &KvEntry) -> Result<()> {
debug!("saving kv entry to sqlite");
let mut tx = self.pool.begin().await?;
Self::save_raw(&mut tx, e).await?;
tx.commit().await?;
Ok(())
}
pub async fn delete(&self, namespace: &str, key: &str) -> Result<()> {
debug!("deleting kv entry {namespace}/{key}");
let mut tx = self.pool.begin().await?;
Self::delete_raw(&mut tx, namespace, key).await?;
tx.commit().await?;
Ok(())
}
fn query_kv_entry(row: SqliteRow) -> KvEntry {
let namespace = row.get("namespace");
let key = row.get("key");
let value = row.get("value");
KvEntry::builder()
.namespace(namespace)
.key(key)
.value(value)
.build()
}
pub async fn load(&self, namespace: &str, key: &str) -> Result<Option<KvEntry>> {
debug!("loading kv entry {namespace}.{key}");
let res = sqlx::query("select * from kv where namespace = ?1 and key = ?2")
.bind(namespace)
.bind(key)
.map(Self::query_kv_entry)
.fetch_optional(&self.pool)
.await?;
Ok(res)
}
pub async fn list(&self, namespace: Option<&str>) -> Result<Vec<KvEntry>> {
debug!("listing kv entries");
let res = if let Some(namespace) = namespace {
sqlx::query("select * from kv where namespace = ?1 order by key asc")
.bind(namespace)
.map(Self::query_kv_entry)
.fetch_all(&self.pool)
.await?
} else {
sqlx::query("select * from kv order by namespace, key asc")
.map(Self::query_kv_entry)
.fetch_all(&self.pool)
.await?
};
Ok(res)
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn test_list() {
let db = Database::new("sqlite::memory:", 1.0).await.unwrap();
let scripts = db.list(None).await.unwrap();
assert_eq!(scripts.len(), 0);
let entry = KvEntry::builder()
.namespace("test".to_string())
.key("test".to_string())
.value("test".to_string())
.build();
db.save(&entry).await.unwrap();
let entries = db.list(None).await.unwrap();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].namespace, "test");
assert_eq!(entries[0].key, "test");
assert_eq!(entries[0].value, "test");
}
#[tokio::test]
async fn test_save_load() {
let db = Database::new("sqlite::memory:", 1.0).await.unwrap();
let entry = KvEntry::builder()
.namespace("test".to_string())
.key("test".to_string())
.value("test".to_string())
.build();
db.save(&entry).await.unwrap();
let loaded = db
.load(&entry.namespace, &entry.key)
.await
.unwrap()
.unwrap();
assert_eq!(loaded, entry);
}
#[tokio::test]
async fn test_delete() {
let db = Database::new("sqlite::memory:", 1.0).await.unwrap();
let entry = KvEntry::builder()
.namespace("test".to_string())
.key("test".to_string())
.value("test".to_string())
.build();
db.save(&entry).await.unwrap();
assert_eq!(db.list(None).await.unwrap().len(), 1);
db.delete(&entry.namespace, &entry.key).await.unwrap();
let loaded = db.list(None).await.unwrap();
assert_eq!(loaded.len(), 0);
}
}
| rust | MIT | 8a010fed33ce19a9ddc589196c73c07ba7ba88e7 | 2026-01-04T15:36:14.139439Z | false |
atuinsh/atuin | https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-kv/src/store/record.rs | crates/atuin-kv/src/store/record.rs | use atuin_common::record::DecryptedData;
use eyre::{Result, bail, ensure, eyre};
use typed_builder::TypedBuilder;
pub const KV_VERSION: &str = "v1";
pub const KV_TAG: &str = "kv";
pub const KV_VAL_MAX_LEN: usize = 100 * 1024;
#[derive(Debug, Clone, PartialEq, Eq, TypedBuilder)]
pub struct KvRecord {
pub namespace: String,
pub key: String,
pub value: Option<String>,
}
impl KvRecord {
pub fn serialize(&self) -> Result<DecryptedData> {
use rmp::encode;
let mut output = vec![];
// INFO: ensure this is updated when adding new fields
encode::write_array_len(&mut output, 4)?;
encode::write_str(&mut output, &self.namespace)?;
encode::write_str(&mut output, &self.key)?;
encode::write_bool(&mut output, self.value.is_some())?;
if let Some(value) = &self.value {
encode::write_str(&mut output, value)?;
}
Ok(DecryptedData(output))
}
pub fn deserialize(data: &DecryptedData, version: &str) -> Result<Self> {
use rmp::decode;
fn error_report<E: std::fmt::Debug>(err: E) -> eyre::Report {
eyre!("{err:?}")
}
match version {
"v0" => {
let mut bytes = decode::Bytes::new(&data.0);
let nfields = decode::read_array_len(&mut bytes).map_err(error_report)?;
ensure!(nfields == 3, "too many entries in v0 kv record");
let bytes = bytes.remaining_slice();
let (namespace, bytes) =
decode::read_str_from_slice(bytes).map_err(error_report)?;
let (key, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?;
let (value, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?;
if !bytes.is_empty() {
bail!("trailing bytes in encoded kvrecord. malformed")
}
Ok(KvRecord {
namespace: namespace.to_owned(),
key: key.to_owned(),
value: Some(value.to_owned()),
})
}
KV_VERSION => {
let mut bytes = decode::Bytes::new(&data.0);
let nfields = decode::read_array_len(&mut bytes).map_err(error_report)?;
ensure!(nfields == 4, "too many entries in v1 kv record");
let bytes = bytes.remaining_slice();
let (namespace, bytes) =
decode::read_str_from_slice(bytes).map_err(error_report)?;
let (key, mut bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?;
let has_value = decode::read_bool(&mut bytes).map_err(error_report)?;
let (value, bytes) = if has_value {
let (value, bytes) =
decode::read_str_from_slice(bytes).map_err(error_report)?;
(Some(value.to_owned()), bytes)
} else {
(None, bytes)
};
if !bytes.is_empty() {
bail!("trailing bytes in encoded kvrecord. malformed")
}
Ok(KvRecord {
namespace: namespace.to_owned(),
key: key.to_owned(),
value,
})
}
_ => {
bail!("unknown version {version:?}")
}
}
}
}
#[cfg(test)]
mod tests {
use super::{DecryptedData, KV_VERSION, KvRecord};
#[test]
fn encode_decode_some() {
let kv = KvRecord {
namespace: "foo".to_owned(),
key: "bar".to_owned(),
value: Some("baz".to_owned()),
};
let snapshot = [
0x94, 0xa3, b'f', b'o', b'o', 0xa3, b'b', b'a', b'r', 0xc3, 0xa3, b'b', b'a', b'z',
];
let encoded = kv.serialize().unwrap();
let decoded = KvRecord::deserialize(&encoded, KV_VERSION).unwrap();
assert_eq!(encoded.0, &snapshot);
assert_eq!(decoded, kv);
}
#[test]
fn encode_decode_none() {
let kv = KvRecord {
namespace: "foo".to_owned(),
key: "bar".to_owned(),
value: None,
};
let snapshot = [0x94, 0xa3, b'f', b'o', b'o', 0xa3, b'b', b'a', b'r', 0xc2];
let encoded = kv.serialize().unwrap();
let decoded = KvRecord::deserialize(&encoded, KV_VERSION).unwrap();
assert_eq!(encoded.0, &snapshot);
assert_eq!(decoded, kv);
}
#[test]
fn decode_v0() {
let kv = KvRecord {
namespace: "foo".to_owned(),
key: "bar".to_owned(),
value: Some("baz".to_owned()),
};
let snapshot = vec![
0x93, 0xa3, b'f', b'o', b'o', 0xa3, b'b', b'a', b'r', 0xa3, b'b', b'a', b'z',
];
let decoded = KvRecord::deserialize(&DecryptedData(snapshot), "v0").unwrap();
assert_eq!(decoded, kv);
}
}
| rust | MIT | 8a010fed33ce19a9ddc589196c73c07ba7ba88e7 | 2026-01-04T15:36:14.139439Z | false |
atuinsh/atuin | https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-kv/src/store/entry.rs | crates/atuin-kv/src/store/entry.rs | use typed_builder::TypedBuilder;
#[derive(Debug, Clone, PartialEq, Eq, TypedBuilder)]
pub struct KvEntry {
pub namespace: String,
pub key: String,
pub value: String,
}
| rust | MIT | 8a010fed33ce19a9ddc589196c73c07ba7ba88e7 | 2026-01-04T15:36:14.139439Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-timeout/src/lib.rs | pingora-timeout/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![warn(clippy::all)]
//! A drop-in replacement of [tokio::time::timeout] which is much more efficient.
//!
//! Similar to [tokio::time::timeout] but more efficient on busy concurrent IOs where timeouts are
//! created and canceled very frequently.
//!
//! This crate provides the following optimizations
//! - The timeouts lazily initializes their timer when the Future is pending for the first time.
//! - There is no global lock for creating and cancelling timeouts.
//! - Timeout timers are rounded to the next 10ms tick and timers are shared across all timeouts with the same deadline.
//!
//! Benchmark:
//!
//! 438.302µs total, 4ns avg per iteration
//!
//! v.s. Tokio timeout():
//!
//! 10.716192ms total, 107ns avg per iteration
//!
pub mod fast_timeout;
pub mod timer;
pub use fast_timeout::fast_sleep as sleep;
pub use fast_timeout::fast_timeout as timeout;
use pin_project_lite::pin_project;
use std::future::Future;
use std::pin::Pin;
use std::task::{self, Poll};
use tokio::time::{sleep as tokio_sleep, Duration};
/// The interface to start a timeout
///
/// Users don't need to interact with this trait
pub trait ToTimeout {
fn timeout(&self) -> Pin<Box<dyn Future<Output = ()> + Send + Sync>>;
fn create(d: Duration) -> Self;
}
/// The timeout generated by [tokio_timeout()].
///
/// Users don't need to interact with this object.
pub struct TokioTimeout(Duration);
impl ToTimeout for TokioTimeout {
fn timeout(&self) -> Pin<Box<dyn Future<Output = ()> + Send + Sync>> {
Box::pin(tokio_sleep(self.0))
}
fn create(d: Duration) -> Self {
TokioTimeout(d)
}
}
/// The error type returned when the timeout is reached.
#[derive(Debug)]
pub struct Elapsed;
impl std::fmt::Display for Elapsed {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Timeout Elapsed")
}
}
impl std::error::Error for Elapsed {}
/// The [tokio::time::timeout] with just lazy timer initialization.
///
/// The timer is created the first time the `future` is pending. This avoids unnecessary timer
/// creation and cancellation on busy IOs with a good chance to be already ready (e.g., reading
/// data from TCP where the recv buffer already has a lot of data to read right away).
pub fn tokio_timeout<T>(duration: Duration, future: T) -> Timeout<T, TokioTimeout>
where
T: Future,
{
Timeout::<T, TokioTimeout>::new_with_delay(future, duration)
}
pin_project! {
/// The timeout future returned by the timeout functions
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Timeout<T, F> {
#[pin]
value: T,
#[pin]
delay: Option<Pin<Box<dyn Future<Output = ()> + Send + Sync>>>,
callback: F, // callback to create the timer
}
}
impl<T, F> Timeout<T, F>
where
F: ToTimeout,
{
pub(crate) fn new_with_delay(value: T, d: Duration) -> Timeout<T, F> {
Timeout {
value,
delay: None,
callback: F::create(d),
}
}
}
impl<T, F> Future for Timeout<T, F>
where
T: Future,
F: ToTimeout,
{
type Output = Result<T::Output, Elapsed>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
// First, try polling the future
if let Poll::Ready(v) = me.value.poll(cx) {
return Poll::Ready(Ok(v));
}
let delay = me
.delay
.get_or_insert_with(|| Box::pin(me.callback.timeout()));
match delay.as_mut().poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(()) => Poll::Ready(Err(Elapsed {})),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_timeout() {
let fut = tokio_sleep(Duration::from_secs(1000));
let to = timeout(Duration::from_secs(1), fut);
assert!(to.await.is_err())
}
#[tokio::test]
async fn test_instantly_return() {
let fut = async { 1 };
let to = timeout(Duration::from_secs(1), fut);
assert_eq!(to.await.unwrap(), 1)
}
#[tokio::test]
async fn test_delayed_return() {
let fut = async {
tokio_sleep(Duration::from_secs(1)).await;
1
};
let to = timeout(Duration::from_secs(1000), fut);
assert_eq!(to.await.unwrap(), 1)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-timeout/src/timer.rs | pingora-timeout/src/timer.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Lightweight timer for systems with high rate of operations with timeout
//! associated with them
//!
//! Users don't need to interact with this module.
//!
//! The idea is to bucket timers into finite time slots so that operations that
//! start and end quickly don't have to create their own timers all the time
//!
//! Benchmark:
//! - create 7.809622ms total, 78ns avg per iteration
//! - drop: 1.348552ms total, 13ns avg per iteration
//!
//! tokio timer:
//! - create 34.317439ms total, 343ns avg per iteration
//! - drop: 10.694154ms total, 106ns avg per iteration
use parking_lot::RwLock;
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicBool, AtomicI64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use thread_local::ThreadLocal;
use tokio::sync::Notify;
const RESOLUTION_MS: u64 = 10;
const RESOLUTION_DURATION: Duration = Duration::from_millis(RESOLUTION_MS);
// round to the NEXT timestamp based on the resolution
#[inline]
fn round_to(raw: u128, resolution: u128) -> u128 {
raw - 1 + resolution - (raw - 1) % resolution
}
// millisecond resolution as most
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(u128);
impl From<u128> for Time {
fn from(raw_ms: u128) -> Self {
Time(round_to(raw_ms, RESOLUTION_MS as u128))
}
}
impl From<Duration> for Time {
fn from(d: Duration) -> Self {
Time(round_to(d.as_millis(), RESOLUTION_MS as u128))
}
}
impl Time {
pub fn not_after(&self, ts: u128) -> bool {
self.0 <= ts
}
}
/// the stub for waiting for a timer to be expired.
pub struct TimerStub(Arc<Notify>, Arc<AtomicBool>);
impl TimerStub {
/// Wait for the timer to expire.
pub async fn poll(self) {
if self.1.load(Ordering::SeqCst) {
return;
}
self.0.notified().await;
}
}
struct Timer(Arc<Notify>, Arc<AtomicBool>);
impl Timer {
pub fn new() -> Self {
Timer(Arc::new(Notify::new()), Arc::new(AtomicBool::new(false)))
}
pub fn fire(&self) {
self.1.store(true, Ordering::SeqCst);
self.0.notify_waiters();
}
pub fn subscribe(&self) -> TimerStub {
TimerStub(self.0.clone(), self.1.clone())
}
}
/// The object that holds all the timers registered to it.
pub struct TimerManager {
// each thread insert into its local timer tree to avoid lock contention
timers: ThreadLocal<RwLock<BTreeMap<Time, Timer>>>,
zero: Instant, // the reference zero point of Timestamp
// Start a new clock thread if this is -1 or staled. The clock thread should keep updating this
clock_watchdog: AtomicI64,
paused: AtomicBool,
}
// Consider the clock thread is dead after it fails to update the thread in DELAYS_SEC
const DELAYS_SEC: i64 = 2; // TODO: make sure this value is larger than RESOLUTION_DURATION
impl Default for TimerManager {
fn default() -> Self {
TimerManager {
timers: ThreadLocal::new(),
zero: Instant::now(),
clock_watchdog: AtomicI64::new(-DELAYS_SEC),
paused: AtomicBool::new(false),
}
}
}
impl TimerManager {
/// Create a new [TimerManager]
pub fn new() -> Self {
Self::default()
}
// This thread sleeps for a resolution time and then fires all the timers that are due to fire
pub(crate) fn clock_thread(&self) {
loop {
std::thread::sleep(RESOLUTION_DURATION);
let now = Instant::now() - self.zero;
self.clock_watchdog
.store(now.as_secs() as i64, Ordering::Relaxed);
if self.is_paused_for_fork() {
// just stop acquiring the locks, waiting for fork to happen
continue;
}
let now = now.as_millis();
// iterate through the timer tree for all threads
for thread_timer in self.timers.iter() {
let mut timers = thread_timer.write();
// Fire all timers until now
loop {
let key_to_remove = timers.iter().next().and_then(|(k, _)| {
if k.not_after(now) {
Some(*k)
} else {
None
}
});
if let Some(k) = key_to_remove {
let timer = timers.remove(&k);
// safe to unwrap, the key is from iter().next()
timer.unwrap().fire();
} else {
break;
}
}
// write lock drops here
}
}
}
// False if the clock is already started
// If true, the caller must start the clock thread next
pub(crate) fn should_i_start_clock(&self) -> bool {
let Err(prev) = self.is_clock_running() else {
return false;
};
let now = Instant::now().duration_since(self.zero).as_secs() as i64;
let res =
self.clock_watchdog
.compare_exchange(prev, now, Ordering::SeqCst, Ordering::SeqCst);
res.is_ok()
}
// Ok(()) if clock is running (watch dog is within DELAYS_SEC of now)
// Err(time) if watch do stopped at `time`
pub(crate) fn is_clock_running(&self) -> Result<(), i64> {
let now = Instant::now().duration_since(self.zero).as_secs() as i64;
let prev = self.clock_watchdog.load(Ordering::SeqCst);
if now < prev + DELAYS_SEC {
Ok(())
} else {
Err(prev)
}
}
/// Register a timer.
///
/// When the timer expires, the [TimerStub] will be notified.
pub fn register_timer(&self, duration: Duration) -> TimerStub {
if self.is_paused_for_fork() {
// Return a dummy TimerStub that will trigger right away.
// This is fine assuming pause_for_fork() is called right before fork().
// The only possible register_timer() is from another thread which will
// be entirely lost after fork()
// TODO: buffer these register calls instead (without a lock)
let timer = Timer::new();
timer.fire();
return timer.subscribe();
}
let now: Time = (Instant::now() + duration - self.zero).into();
{
let timers = self.timers.get_or(|| RwLock::new(BTreeMap::new())).read();
if let Some(t) = timers.get(&now) {
return t.subscribe();
}
} // drop read lock
let timer = Timer::new();
let mut timers = self.timers.get_or(|| RwLock::new(BTreeMap::new())).write();
// Usually we check if another thread has insert the same node before we get the write lock,
// but because only this thread will insert anything to its local timers tree, there
// is no possible race that can happen. The only other thread is the clock thread who
// only removes timer from the tree
let stub = timer.subscribe();
timers.insert(now, timer);
stub
}
fn is_paused_for_fork(&self) -> bool {
self.paused.load(Ordering::SeqCst)
}
/// Pause the timer for fork()
///
/// Because RwLock across fork() is undefined behavior, this function makes sure that no one
/// holds any locks.
///
/// This function should be called right before fork().
pub fn pause_for_fork(&self) {
self.paused.store(true, Ordering::SeqCst);
// wait for everything to get out of their locks
std::thread::sleep(RESOLUTION_DURATION * 2);
}
/// Unpause the timer after fork()
///
/// This function should be called right after fork().
pub fn unpause(&self) {
self.paused.store(false, Ordering::SeqCst)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_round() {
assert_eq!(round_to(30, 10), 30);
assert_eq!(round_to(31, 10), 40);
assert_eq!(round_to(29, 10), 30);
}
#[test]
fn test_time() {
let t: Time = 128.into(); // t will round to 130
assert_eq!(t, Duration::from_millis(130).into());
assert!(!t.not_after(128));
assert!(!t.not_after(129));
assert!(t.not_after(130));
assert!(t.not_after(131));
}
#[tokio::test]
async fn test_timer_manager() {
let tm_a = Arc::new(TimerManager::new());
let tm = tm_a.clone();
std::thread::spawn(move || tm_a.clock_thread());
let now = Instant::now();
let t1 = tm.register_timer(Duration::from_secs(1));
let t2 = tm.register_timer(Duration::from_secs(1));
t1.poll().await;
assert_eq!(now.elapsed().as_secs(), 1);
let now = Instant::now();
t2.poll().await;
// t2 fired along t1 so no extra wait time
assert_eq!(now.elapsed().as_secs(), 0);
}
#[test]
fn test_timer_manager_start_check() {
let tm = Arc::new(TimerManager::new());
assert!(tm.should_i_start_clock());
assert!(!tm.should_i_start_clock());
assert!(tm.is_clock_running().is_ok());
}
#[test]
fn test_timer_manager_watchdog() {
let tm = Arc::new(TimerManager::new());
assert!(tm.should_i_start_clock());
assert!(!tm.should_i_start_clock());
// we don't actually start the clock thread, sleep for the watchdog to expire
std::thread::sleep(Duration::from_secs(DELAYS_SEC as u64 + 1));
assert!(tm.is_clock_running().is_err());
assert!(tm.should_i_start_clock());
}
#[tokio::test]
async fn test_timer_manager_pause() {
let tm_a = Arc::new(TimerManager::new());
let tm = tm_a.clone();
std::thread::spawn(move || tm_a.clock_thread());
let now = Instant::now();
let t1 = tm.register_timer(Duration::from_secs(2));
tm.pause_for_fork();
// no actual fork happen, we just test that pause and unpause work
// any timer in this critical section is timed out right away
let t2 = tm.register_timer(Duration::from_secs(2));
t2.poll().await;
assert_eq!(now.elapsed().as_secs(), 0);
std::thread::sleep(Duration::from_secs(1));
tm.unpause();
t1.poll().await;
assert_eq!(now.elapsed().as_secs(), 2);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-timeout/src/fast_timeout.rs | pingora-timeout/src/fast_timeout.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The fast and more complicated version of pingora-timeout
//!
//! The following optimizations are applied:
//! - The timeouts lazily initialize their timer when the Future is pending for the first time.
//! - There is no global lock for creating and cancelling timeouts.
//! - Timeout timers are rounded to the next 10ms tick and timers are shared across all timeouts with the same deadline.
//!
//! In order for this to work, a standalone thread is created to arm the timers, which has some
//! overhead. As a general rule, the benefits of this don't outweigh the overhead unless
//! there are more than about 100 timeout() calls/sec in the system. Use regular tokio timeout or
//! [super::tokio_timeout] in the low usage case.
use super::timer::*;
use super::*;
use once_cell::sync::Lazy;
use std::sync::Arc;
static TIMER_MANAGER: Lazy<Arc<TimerManager>> = Lazy::new(|| {
let tm = Arc::new(TimerManager::new());
check_clock_thread(&tm);
tm
});
fn check_clock_thread(tm: &Arc<TimerManager>) {
if tm.should_i_start_clock() {
std::thread::Builder::new()
.name("Timer thread".into())
.spawn(|| TIMER_MANAGER.clock_thread())
.unwrap();
}
}
/// The timeout generated by [fast_timeout()].
///
/// Users don't need to interact with this object.
pub struct FastTimeout(Duration);
impl ToTimeout for FastTimeout {
fn timeout(&self) -> Pin<Box<dyn Future<Output = ()> + Send + Sync>> {
Box::pin(TIMER_MANAGER.register_timer(self.0).poll())
}
fn create(d: Duration) -> Self {
FastTimeout(d)
}
}
/// Similar to [tokio::time::timeout] but more efficient.
pub fn fast_timeout<T>(duration: Duration, future: T) -> Timeout<T, FastTimeout>
where
T: Future,
{
check_clock_thread(&TIMER_MANAGER);
Timeout::new_with_delay(future, duration)
}
/// Similar to [tokio::time::sleep] but more efficient.
pub async fn fast_sleep(duration: Duration) {
check_clock_thread(&TIMER_MANAGER);
TIMER_MANAGER.register_timer(duration).poll().await
}
/// Pause the timer for fork()
///
/// Because RwLock across fork() is undefined behavior, this function makes sure that no one
/// holds any locks.
///
/// This function should be called right before fork().
pub fn pause_for_fork() {
TIMER_MANAGER.pause_for_fork();
}
/// Unpause the timer after fork()
///
/// This function should be called right after fork().
pub fn unpause() {
TIMER_MANAGER.unpause();
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_timeout() {
let fut = tokio_sleep(Duration::from_secs(1000));
let to = fast_timeout(Duration::from_secs(1), fut);
assert!(to.await.is_err())
}
#[tokio::test]
async fn test_instantly_return() {
let fut = async { 1 };
let to = fast_timeout(Duration::from_secs(1), fut);
assert_eq!(to.await.unwrap(), 1)
}
#[tokio::test]
async fn test_delayed_return() {
let fut = async {
tokio_sleep(Duration::from_secs(1)).await;
1
};
let to = fast_timeout(Duration::from_secs(1000), fut);
assert_eq!(to.await.unwrap(), 1)
}
#[tokio::test]
async fn test_sleep() {
let fut = async {
fast_sleep(Duration::from_secs(1)).await;
1
};
let to = fast_timeout(Duration::from_secs(1000), fut);
assert_eq!(to.await.unwrap(), 1)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-timeout/benches/benchmark.rs | pingora-timeout/benches/benchmark.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use pingora_timeout::*;
use std::time::{Duration, Instant};
use tokio::time::sleep;
use tokio::time::timeout as tokio_timeout;
const LOOP_SIZE: u32 = 100000;
async fn bench_timeout() -> u32 {
let mut n = 0;
for _ in 0..LOOP_SIZE {
let fut = async { 1 };
let to = timeout(Duration::from_secs(1), fut);
n += to.await.unwrap();
}
n
}
async fn bench_tokio_timeout() -> u32 {
let mut n = 0;
for _ in 0..LOOP_SIZE {
let fut = async { 1 };
let to = tokio_timeout(Duration::from_secs(1), fut);
n += to.await.unwrap();
}
n
}
async fn bench_fast_timeout() -> u32 {
let mut n = 0;
for _ in 0..LOOP_SIZE {
let fut = async { 1 };
let to = fast_timeout::fast_timeout(Duration::from_secs(1), fut);
n += to.await.unwrap();
}
n
}
fn bench_tokio_timer() {
let mut list = Vec::with_capacity(LOOP_SIZE as usize);
let before = Instant::now();
for _ in 0..LOOP_SIZE {
list.push(sleep(Duration::from_secs(1)));
}
let elapsed = before.elapsed();
println!(
"tokio timer create {:?} total, {:?} avg per iteration",
elapsed,
elapsed / LOOP_SIZE
);
let before = Instant::now();
drop(list);
let elapsed = before.elapsed();
println!(
"tokio timer drop {:?} total, {:?} avg per iteration",
elapsed,
elapsed / LOOP_SIZE
);
}
async fn bench_multi_thread_tokio_timer(threads: usize) {
let mut handlers = vec![];
for _ in 0..threads {
let handler = tokio::spawn(async {
bench_tokio_timer();
});
handlers.push(handler);
}
for thread in handlers {
thread.await.unwrap();
}
}
use std::sync::Arc;
async fn bench_multi_thread_timer(threads: usize, tm: Arc<TimerManager>) {
let mut handlers = vec![];
for _ in 0..threads {
let tm_ref = tm.clone();
let handler = tokio::spawn(async move {
bench_timer(&tm_ref);
});
handlers.push(handler);
}
for thread in handlers {
thread.await.unwrap();
}
}
use pingora_timeout::timer::TimerManager;
fn bench_timer(tm: &TimerManager) {
let mut list = Vec::with_capacity(LOOP_SIZE as usize);
let before = Instant::now();
for _ in 0..LOOP_SIZE {
list.push(tm.register_timer(Duration::from_secs(1)));
}
let elapsed = before.elapsed();
println!(
"pingora timer create {:?} total, {:?} avg per iteration",
elapsed,
elapsed / LOOP_SIZE
);
let before = Instant::now();
drop(list);
let elapsed = before.elapsed();
println!(
"pingora timer drop {:?} total, {:?} avg per iteration",
elapsed,
elapsed / LOOP_SIZE
);
}
#[tokio::main(worker_threads = 4)]
async fn main() {
let before = Instant::now();
bench_timeout().await;
let elapsed = before.elapsed();
println!(
"pingora timeout {:?} total, {:?} avg per iteration",
elapsed,
elapsed / LOOP_SIZE
);
let before = Instant::now();
bench_fast_timeout().await;
let elapsed = before.elapsed();
println!(
"pingora fast timeout {:?} total, {:?} avg per iteration",
elapsed,
elapsed / LOOP_SIZE
);
let before = Instant::now();
bench_tokio_timeout().await;
let elapsed = before.elapsed();
println!(
"tokio timeout {:?} total, {:?} avg per iteration",
elapsed,
elapsed / LOOP_SIZE
);
println!("===========================");
let tm = pingora_timeout::timer::TimerManager::new();
bench_timer(&tm);
bench_tokio_timer();
println!("===========================");
let tm = Arc::new(tm);
bench_multi_thread_timer(4, tm).await;
bench_multi_thread_tokio_timer(4).await;
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-boringssl/src/lib.rs | pingora-boringssl/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The BoringSSL API compatibility layer.
//!
//! This crate aims at making [boring] APIs exchangeable with [openssl-rs](https://docs.rs/openssl/latest/openssl/).
//! In other words, this crate and [`pingora-openssl`](https://docs.rs/pingora-openssl) expose identical rust APIs.
#![warn(clippy::all)]
use boring as ssl_lib;
pub use boring_sys as ssl_sys;
pub mod boring_tokio;
pub use boring_tokio as tokio_ssl;
pub mod ext;
// export commonly used libs
pub use ssl_lib::error;
pub use ssl_lib::hash;
pub use ssl_lib::nid;
pub use ssl_lib::pkey;
pub use ssl_lib::ssl;
pub use ssl_lib::x509;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-boringssl/src/ext.rs | pingora-boringssl/src/ext.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! the extended functionalities that are yet exposed via the [`boring`] APIs
use boring::error::ErrorStack;
use boring::pkey::{HasPrivate, PKeyRef};
use boring::ssl::{Ssl, SslAcceptor, SslRef};
use boring::x509::store::X509StoreRef;
use boring::x509::verify::X509VerifyParamRef;
use boring::x509::X509Ref;
use foreign_types_shared::ForeignTypeRef;
use libc::*;
use std::ffi::CString;
fn cvt(r: c_int) -> Result<c_int, ErrorStack> {
if r != 1 {
Err(ErrorStack::get())
} else {
Ok(r)
}
}
/// Add name as an additional reference identifier that can match the peer's certificate
///
/// See [X509_VERIFY_PARAM_set1_host](https://www.openssl.org/docs/man3.1/man3/X509_VERIFY_PARAM_set1_host.html).
pub fn add_host(verify_param: &mut X509VerifyParamRef, host: &str) -> Result<(), ErrorStack> {
if host.is_empty() {
return Ok(());
}
unsafe {
cvt(boring_sys::X509_VERIFY_PARAM_add1_host(
verify_param.as_ptr(),
host.as_ptr() as *const _,
host.len(),
))
.map(|_| ())
}
}
/// Set the verify cert store of `ssl`
///
/// See [SSL_set1_verify_cert_store](https://www.openssl.org/docs/man1.1.1/man3/SSL_set1_verify_cert_store.html).
pub fn ssl_set_verify_cert_store(
ssl: &mut SslRef,
cert_store: &X509StoreRef,
) -> Result<(), ErrorStack> {
unsafe {
cvt(boring_sys::SSL_set1_verify_cert_store(
ssl.as_ptr(),
cert_store.as_ptr(),
))?;
}
Ok(())
}
/// Load the certificate into `ssl`
///
/// See [SSL_use_certificate](https://www.openssl.org/docs/man1.1.1/man3/SSL_use_certificate.html).
pub fn ssl_use_certificate(ssl: &mut SslRef, cert: &X509Ref) -> Result<(), ErrorStack> {
unsafe {
cvt(boring_sys::SSL_use_certificate(ssl.as_ptr(), cert.as_ptr()))?;
}
Ok(())
}
/// Load the private key into `ssl`
///
/// See [SSL_use_certificate](https://www.openssl.org/docs/man1.1.1/man3/SSL_use_PrivateKey.html).
pub fn ssl_use_private_key<T>(ssl: &mut SslRef, key: &PKeyRef<T>) -> Result<(), ErrorStack>
where
T: HasPrivate,
{
unsafe {
cvt(boring_sys::SSL_use_PrivateKey(ssl.as_ptr(), key.as_ptr()))?;
}
Ok(())
}
/// Add the certificate into the cert chain of `ssl`
///
/// See [SSL_add1_chain_cert](https://www.openssl.org/docs/man1.1.1/man3/SSL_add1_chain_cert.html)
pub fn ssl_add_chain_cert(ssl: &mut SslRef, cert: &X509Ref) -> Result<(), ErrorStack> {
unsafe {
cvt(boring_sys::SSL_add1_chain_cert(ssl.as_ptr(), cert.as_ptr()))?;
}
Ok(())
}
/// Set renegotiation
///
/// This function is specific to BoringSSL
/// See <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#SSL_set_renegotiate_mode>
pub fn ssl_set_renegotiate_mode_freely(ssl: &mut SslRef) {
unsafe {
boring_sys::SSL_set_renegotiate_mode(
ssl.as_ptr(),
boring_sys::ssl_renegotiate_mode_t::ssl_renegotiate_freely,
);
}
}
/// Set the curves/groups of `ssl`
///
/// See [set_groups_list](https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set1_curves.html).
pub fn ssl_set_groups_list(ssl: &mut SslRef, groups: &str) -> Result<(), ErrorStack> {
let groups = CString::new(groups).unwrap();
unsafe {
// somehow SSL_set1_groups_list doesn't exist but SSL_set1_curves_list means the same anyways
cvt(boring_sys::SSL_set1_curves_list(
ssl.as_ptr(),
groups.as_ptr(),
))?;
}
Ok(())
}
/// Set's whether a second keyshare to be sent in client hello when PQ is used.
///
/// Default is true. When `true`, the first PQ (if any) and none-PQ keyshares are sent.
/// When `false`, only the first configured keyshares are sent.
#[cfg(feature = "pq_use_second_keyshare")]
pub fn ssl_use_second_key_share(ssl: &mut SslRef, enabled: bool) {
unsafe { boring_sys::SSL_use_second_keyshare(ssl.as_ptr(), enabled as _) }
}
#[cfg(not(feature = "pq_use_second_keyshare"))]
pub fn ssl_use_second_key_share(_ssl: &mut SslRef, _enabled: bool) {}
/// Clear the error stack
///
/// SSL calls should check and clear the BoringSSL error stack. But some calls fail to do so.
/// This causes the next unrelated SSL call to fail due to the leftover errors. This function allows
/// the caller to clear the error stack before performing SSL calls to avoid this issue.
pub fn clear_error_stack() {
let _ = ErrorStack::get();
}
/// Create a new [Ssl] from &[SslAcceptor]
///
/// This function is needed because [Ssl::new()] doesn't take `&SslContextRef` like openssl-rs
pub fn ssl_from_acceptor(acceptor: &SslAcceptor) -> Result<Ssl, ErrorStack> {
Ssl::new_from_ref(acceptor.context())
}
/// Suspend the TLS handshake when a certificate is needed.
///
/// This function will cause tls handshake to pause and return the error: SSL_ERROR_WANT_X509_LOOKUP.
/// The caller should set the certificate and then call [unblock_ssl_cert()] before continue the
/// handshake on the tls connection.
pub fn suspend_when_need_ssl_cert(ssl: &mut SslRef) {
unsafe {
boring_sys::SSL_set_cert_cb(ssl.as_ptr(), Some(raw_cert_block), std::ptr::null_mut());
}
}
/// Unblock a TLS handshake after the certificate is set.
///
/// The user should continue to call tls handshake after this function is called.
pub fn unblock_ssl_cert(ssl: &mut SslRef) {
unsafe {
boring_sys::SSL_set_cert_cb(ssl.as_ptr(), None, std::ptr::null_mut());
}
}
// Just block the handshake
extern "C" fn raw_cert_block(_ssl: *mut boring_sys::SSL, _arg: *mut c_void) -> c_int {
-1
}
/// Whether the TLS error is SSL_ERROR_WANT_X509_LOOKUP
pub fn is_suspended_for_cert(error: &boring::ssl::Error) -> bool {
error.code().as_raw() == boring_sys::SSL_ERROR_WANT_X509_LOOKUP
}
#[allow(clippy::mut_from_ref)]
/// Get a mutable SslRef ouf of SslRef. which is a missing functionality for certain SslStream
/// # Safety
/// the caller needs to make sure that they hold a &mut SslRef
pub unsafe fn ssl_mut(ssl: &SslRef) -> &mut SslRef {
unsafe { SslRef::from_ptr_mut(ssl.as_ptr()) }
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-boringssl/src/boring_tokio.rs | pingora-boringssl/src/boring_tokio.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This file reimplements tokio-boring with the [overhauled](https://github.com/sfackler/tokio-openssl/commit/56f6618ab619f3e431fa8feec2d20913bf1473aa)
//! tokio-openssl interface while the tokio APIs from official [boring] crate is not yet caught up to it.
use boring::error::ErrorStack;
use boring::ssl::{self, ErrorCode, ShutdownResult, Ssl, SslRef, SslStream as SslStreamCore};
use futures_util::future;
use std::fmt;
use std::io::{self, Read, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
struct StreamWrapper<S> {
stream: S,
context: usize,
}
impl<S> fmt::Debug for StreamWrapper<S>
where
S: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.stream, fmt)
}
}
impl<S> StreamWrapper<S> {
/// # Safety
///
/// Must be called with `context` set to a valid pointer to a live `Context` object, and the
/// wrapper must be pinned in memory.
unsafe fn parts(&mut self) -> (Pin<&mut S>, &mut Context<'_>) {
debug_assert_ne!(self.context, 0);
let stream = Pin::new_unchecked(&mut self.stream);
let context = &mut *(self.context as *mut _);
(stream, context)
}
}
impl<S> Read for StreamWrapper<S>
where
S: AsyncRead,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (stream, cx) = unsafe { self.parts() };
let mut buf = ReadBuf::new(buf);
match stream.poll_read(cx, &mut buf)? {
Poll::Ready(()) => Ok(buf.filled().len()),
Poll::Pending => Err(io::Error::from(io::ErrorKind::WouldBlock)),
}
}
}
impl<S> Write for StreamWrapper<S>
where
S: AsyncWrite,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let (stream, cx) = unsafe { self.parts() };
match stream.poll_write(cx, buf) {
Poll::Ready(r) => r,
Poll::Pending => Err(io::Error::from(io::ErrorKind::WouldBlock)),
}
}
fn flush(&mut self) -> io::Result<()> {
let (stream, cx) = unsafe { self.parts() };
match stream.poll_flush(cx) {
Poll::Ready(r) => r,
Poll::Pending => Err(io::Error::from(io::ErrorKind::WouldBlock)),
}
}
}
fn cvt<T>(r: io::Result<T>) -> Poll<io::Result<T>> {
match r {
Ok(v) => Poll::Ready(Ok(v)),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Poll::Pending,
Err(e) => Poll::Ready(Err(e)),
}
}
fn cvt_ossl<T>(r: Result<T, ssl::Error>) -> Poll<Result<T, ssl::Error>> {
match r {
Ok(v) => Poll::Ready(Ok(v)),
Err(e) => match e.code() {
ErrorCode::WANT_READ | ErrorCode::WANT_WRITE => Poll::Pending,
_ => Poll::Ready(Err(e)),
},
}
}
/// An asynchronous version of [`boring::ssl::SslStream`].
#[derive(Debug)]
pub struct SslStream<S>(SslStreamCore<StreamWrapper<S>>);
impl<S: AsyncRead + AsyncWrite> SslStream<S> {
/// Like [`SslStream::new`](ssl::SslStream::new).
pub fn new(ssl: Ssl, stream: S) -> Result<Self, ErrorStack> {
SslStreamCore::new(ssl, StreamWrapper { stream, context: 0 }).map(SslStream)
}
/// Like [`SslStream::connect`](ssl::SslStream::connect).
pub fn poll_connect(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), ssl::Error>> {
self.with_context(cx, |s| cvt_ossl(s.connect()))
}
/// A convenience method wrapping [`poll_connect`](Self::poll_connect).
pub async fn connect(mut self: Pin<&mut Self>) -> Result<(), ssl::Error> {
future::poll_fn(|cx| self.as_mut().poll_connect(cx)).await
}
/// Like [`SslStream::accept`](ssl::SslStream::accept).
pub fn poll_accept(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), ssl::Error>> {
self.with_context(cx, |s| cvt_ossl(s.accept()))
}
/// A convenience method wrapping [`poll_accept`](Self::poll_accept).
pub async fn accept(mut self: Pin<&mut Self>) -> Result<(), ssl::Error> {
future::poll_fn(|cx| self.as_mut().poll_accept(cx)).await
}
/// Like [`SslStream::do_handshake`](ssl::SslStream::do_handshake).
pub fn poll_do_handshake(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), ssl::Error>> {
self.with_context(cx, |s| cvt_ossl(s.do_handshake()))
}
/// A convenience method wrapping [`poll_do_handshake`](Self::poll_do_handshake).
pub async fn do_handshake(mut self: Pin<&mut Self>) -> Result<(), ssl::Error> {
future::poll_fn(|cx| self.as_mut().poll_do_handshake(cx)).await
}
// TODO: early data
}
impl<S> SslStream<S> {
/// Returns a shared reference to the `Ssl` object associated with this stream.
pub fn ssl(&self) -> &SslRef {
self.0.ssl()
}
/// Returns a shared reference to the underlying stream.
pub fn get_ref(&self) -> &S {
&self.0.get_ref().stream
}
/// Returns a mutable reference to the underlying stream.
pub fn get_mut(&mut self) -> &mut S {
&mut self.0.get_mut().stream
}
/// Returns a pinned mutable reference to the underlying stream.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut S> {
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().0.get_mut().stream) }
}
fn with_context<F, R>(self: Pin<&mut Self>, ctx: &mut Context<'_>, f: F) -> R
where
F: FnOnce(&mut SslStreamCore<StreamWrapper<S>>) -> R,
{
let this = unsafe { self.get_unchecked_mut() };
this.0.get_mut().context = ctx as *mut _ as usize;
let r = f(&mut this.0);
this.0.get_mut().context = 0;
r
}
}
#[cfg(feature = "read_uninit")]
impl<S> AsyncRead for SslStream<S>
where
S: AsyncRead + AsyncWrite,
{
fn poll_read(
self: Pin<&mut Self>,
ctx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.with_context(ctx, |s| {
// SAFETY: read_uninit does not de-initialize the buffer.
match cvt(s.read_uninit(unsafe { buf.unfilled_mut() }))? {
Poll::Ready(nread) => {
unsafe {
buf.assume_init(nread);
}
buf.advance(nread);
Poll::Ready(Ok(()))
}
Poll::Pending => Poll::Pending,
}
})
}
}
#[cfg(not(feature = "read_uninit"))]
impl<S> AsyncRead for SslStream<S>
where
S: AsyncRead + AsyncWrite,
{
fn poll_read(
self: Pin<&mut Self>,
ctx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.with_context(ctx, |s| {
// This isn't really "proper", but rust-openssl doesn't currently expose a suitable interface even though
// OpenSSL itself doesn't require the buffer to be initialized. So this is good enough for now.
let slice = unsafe {
let buf = buf.unfilled_mut();
std::slice::from_raw_parts_mut(buf.as_mut_ptr().cast::<u8>(), buf.len())
};
match cvt(s.read(slice))? {
Poll::Ready(nread) => {
unsafe {
buf.assume_init(nread);
}
buf.advance(nread);
Poll::Ready(Ok(()))
}
Poll::Pending => Poll::Pending,
}
})
}
}
impl<S> AsyncWrite for SslStream<S>
where
S: AsyncRead + AsyncWrite,
{
fn poll_write(self: Pin<&mut Self>, ctx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
self.with_context(ctx, |s| cvt(s.write(buf)))
}
fn poll_flush(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<io::Result<()>> {
self.with_context(ctx, |s| cvt(s.flush()))
}
fn poll_shutdown(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll<io::Result<()>> {
match self.as_mut().with_context(ctx, |s| s.shutdown()) {
Ok(ShutdownResult::Sent) | Ok(ShutdownResult::Received) => {}
Err(ref e) if e.code() == ErrorCode::ZERO_RETURN => {}
Err(ref e) if e.code() == ErrorCode::WANT_READ || e.code() == ErrorCode::WANT_WRITE => {
return Poll::Pending;
}
Err(e) => {
return Poll::Ready(Err(e.into_io_error().unwrap_or_else(io::Error::other)));
}
}
self.get_pin_mut().poll_shutdown(ctx)
}
}
#[tokio::test]
async fn test_google() {
use boring::ssl;
use std::net::ToSocketAddrs;
use std::pin::Pin;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream;
let addr = "8.8.8.8:443".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr).await.unwrap();
let ssl_context = ssl::SslContext::builder(ssl::SslMethod::tls())
.unwrap()
.build();
let ssl = ssl::Ssl::new(&ssl_context).unwrap();
let mut stream = crate::tokio_ssl::SslStream::new(ssl, stream).unwrap();
Pin::new(&mut stream).connect().await.unwrap();
stream.write_all(b"GET / HTTP/1.0\r\n\r\n").await.unwrap();
let mut buf = vec![];
stream.read_to_end(&mut buf).await.unwrap();
let response = String::from_utf8_lossy(&buf);
let response = response.trim_end();
// any response code is fine
assert!(response.starts_with("HTTP/1.0 "));
assert!(response.ends_with("</html>") || response.ends_with("</HTML>"));
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-header-serde/src/lib.rs | pingora-header-serde/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP Response header serialization with compression
//!
//! This crate is able to serialize http response header to about 1/3 of its original size (HTTP/1.1 wire format)
//! with trained dictionary.
#![warn(clippy::all)]
#![allow(clippy::new_without_default)]
#![allow(clippy::type_complexity)]
pub mod dict;
mod thread_zstd;
use bytes::BufMut;
use http::Version;
use pingora_error::{Error, ErrorType, ImmutStr, Result};
use pingora_http::ResponseHeader;
use std::cell::RefCell;
use std::ops::DerefMut;
use thread_local::ThreadLocal;
/// HTTP Response header serialization
///
/// This struct provides the APIs to convert HTTP response header into compressed wired format for
/// storage.
pub struct HeaderSerde {
compression: ZstdCompression,
// internal buffer for uncompressed data to be compressed and vice versa
buf: ThreadLocal<RefCell<Vec<u8>>>,
}
const MAX_HEADER_BUF_SIZE: usize = 128 * 1024; // 128KB
const COMPRESS_LEVEL: i32 = 3;
impl HeaderSerde {
/// Create a new [HeaderSerde]
///
/// An optional zstd compression dictionary can be provided to improve the compression ratio
/// and speed. See [dict] for more details.
pub fn new(dict: Option<Vec<u8>>) -> Self {
if let Some(dict) = dict {
HeaderSerde {
compression: ZstdCompression::WithDict(thread_zstd::CompressionWithDict::new(
&dict,
COMPRESS_LEVEL,
)),
buf: ThreadLocal::new(),
}
} else {
HeaderSerde {
compression: ZstdCompression::Default(
thread_zstd::Compression::new(),
COMPRESS_LEVEL,
),
buf: ThreadLocal::new(),
}
}
}
/// Serialize the given response header
pub fn serialize(&self, header: &ResponseHeader) -> Result<Vec<u8>> {
// for now we use HTTP 1.1 wire format for that
// TODO: should convert to h1 if the incoming header is for h2
let mut buf = self
.buf
.get_or(|| RefCell::new(Vec::with_capacity(MAX_HEADER_BUF_SIZE)))
.borrow_mut();
buf.clear(); // reset the buf
resp_header_to_buf(header, &mut buf);
self.compression.compress(&buf)
}
/// Deserialize the given response header
pub fn deserialize(&self, data: &[u8]) -> Result<ResponseHeader> {
let mut buf = self
.buf
.get_or(|| RefCell::new(Vec::with_capacity(MAX_HEADER_BUF_SIZE)))
.borrow_mut();
buf.clear(); // reset the buf
self.compression
.decompress_to_buffer(data, buf.deref_mut())?;
buf_to_http_header(&buf)
}
}
// Wrapper type to unify compressing with and withuot a dictionary,
// since the two structs have different inputs for their APIs.
enum ZstdCompression {
Default(thread_zstd::Compression, i32),
WithDict(thread_zstd::CompressionWithDict),
}
#[inline]
fn into_error<S: Into<ImmutStr>>(e: &'static str, context: S) -> Box<Error> {
Error::because(ErrorType::InternalError, context, e)
}
impl ZstdCompression {
fn compress(&self, data: &[u8]) -> Result<Vec<u8>> {
match &self {
ZstdCompression::Default(c, level) => c
.compress(data, *level)
.map_err(|e| into_error(e, "compress header")),
ZstdCompression::WithDict(c) => c
.compress(data)
.map_err(|e| into_error(e, "compress header")),
}
}
fn decompress_to_buffer(&self, source: &[u8], destination: &mut Vec<u8>) -> Result<usize> {
match &self {
ZstdCompression::Default(c, _) => {
c.decompress_to_buffer(source, destination).map_err(|e| {
into_error(
e,
format!(
"decompress header, frame_content_size: {}",
get_frame_content_size(source)
),
)
})
}
ZstdCompression::WithDict(c) => {
c.decompress_to_buffer(source, destination).map_err(|e| {
into_error(
e,
format!(
"decompress header, frame_content_size: {}",
get_frame_content_size(source)
),
)
})
}
}
}
}
#[inline]
fn get_frame_content_size(source: &[u8]) -> ImmutStr {
match zstd_safe::get_frame_content_size(source) {
Ok(Some(size)) => match size {
zstd_safe::CONTENTSIZE_ERROR => ImmutStr::from("invalid"),
zstd_safe::CONTENTSIZE_UNKNOWN => ImmutStr::from("unknown"),
_ => ImmutStr::from(size.to_string()),
},
Ok(None) => ImmutStr::from("none"),
Err(_e) => ImmutStr::from("failed"),
}
}
const CRLF: &[u8; 2] = b"\r\n";
// Borrowed from pingora http1
#[inline]
fn resp_header_to_buf(resp: &ResponseHeader, buf: &mut Vec<u8>) -> usize {
// Status-Line
let version = match resp.version {
Version::HTTP_10 => "HTTP/1.0 ",
Version::HTTP_11 => "HTTP/1.1 ",
_ => "HTTP/1.1 ", // store everything else (including h2) in http 1.1 format
};
buf.put_slice(version.as_bytes());
let status = resp.status;
buf.put_slice(status.as_str().as_bytes());
buf.put_u8(b' ');
let reason = status.canonical_reason();
if let Some(reason_buf) = reason {
buf.put_slice(reason_buf.as_bytes());
}
buf.put_slice(CRLF);
// headers
resp.header_to_h1_wire(buf);
buf.put_slice(CRLF);
buf.len()
}
// Should match pingora http1 setting
const MAX_HEADERS: usize = 256;
#[inline]
fn buf_to_http_header(buf: &[u8]) -> Result<ResponseHeader> {
let mut headers = vec![httparse::EMPTY_HEADER; MAX_HEADERS];
let mut resp = httparse::Response::new(&mut headers);
match resp.parse(buf) {
Ok(s) => match s {
httparse::Status::Complete(_size) => parsed_to_header(&resp),
// we always feed the but that contains the entire header to parse
_ => Error::e_explain(ErrorType::InternalError, "incomplete uncompressed header"),
},
Err(e) => Error::e_because(
ErrorType::InternalError,
format!(
"parsing failed on uncompressed header, len={}, content={:?}",
buf.len(),
String::from_utf8_lossy(buf)
),
e,
),
}
}
#[inline]
fn parsed_to_header(parsed: &httparse::Response) -> Result<ResponseHeader> {
// code should always be there
// TODO: allow reading the parsed http version?
let mut resp = ResponseHeader::build(parsed.code.unwrap(), Some(parsed.headers.len()))?;
for header in parsed.headers.iter() {
resp.append_header(header.name.to_string(), header.value)?;
}
Ok(resp)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ser_wo_dict() {
let serde = HeaderSerde::new(None);
let mut header = ResponseHeader::build(200, None).unwrap();
header.append_header("foo", "bar").unwrap();
header.append_header("foo", "barbar").unwrap();
header.append_header("foo", "barbarbar").unwrap();
header.append_header("Server", "Pingora").unwrap();
let compressed = serde.serialize(&header).unwrap();
let mut buf = vec![];
let uncompressed = resp_header_to_buf(&header, &mut buf);
assert!(compressed.len() < uncompressed);
}
#[test]
fn test_ser_de_no_dict() {
let serde = HeaderSerde::new(None);
let mut header = ResponseHeader::build(200, None).unwrap();
header.append_header("foo1", "bar1").unwrap();
header.append_header("foo2", "barbar2").unwrap();
header.append_header("foo3", "barbarbar3").unwrap();
header.append_header("Server", "Pingora").unwrap();
let compressed = serde.serialize(&header).unwrap();
let header2 = serde.deserialize(&compressed).unwrap();
assert_eq!(header.status, header2.status);
assert_eq!(header.headers, header2.headers);
}
#[test]
fn test_no_headers() {
let serde = HeaderSerde::new(None);
let header = ResponseHeader::build(200, None).unwrap(); // No headers added
// Serialize and deserialize
let compressed = serde.serialize(&header).unwrap();
let header2 = serde.deserialize(&compressed).unwrap();
assert_eq!(header.status, header2.status);
assert_eq!(header.headers.len(), 0);
assert_eq!(header2.headers.len(), 0);
}
#[test]
fn test_empty_header_wire_format() {
let header = ResponseHeader::build(200, None).unwrap();
let mut buf = vec![];
resp_header_to_buf(&header, &mut buf);
// Should be: "HTTP/1.1 200 OK\r\n\r\n", total 19 bytes
assert_eq!(buf.len(), 19);
assert_eq!(buf, b"HTTP/1.1 200 OK\r\n\r\n");
// Test that httparse can handle this
let parsed = buf_to_http_header(&buf).unwrap();
assert_eq!(parsed.status.as_u16(), 200);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-header-serde/src/thread_zstd.rs | pingora-header-serde/src/thread_zstd.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cell::{RefCell, RefMut};
use thread_local::ThreadLocal;
use zstd_safe::{CCtx, CDict, DCtx, DDict};
/// Each thread will own its compression and decompression CTXes, and they share a single dict
/// https://facebook.github.io/zstd/zstd_manual.html recommends to reuse ctx per thread
// Both `Compression` and `CompressionWithDict` are just wrappers around the inner compression and
// decompression contexts, but have different APIs to access it.
#[derive(Default)]
pub struct Compression(CompressionInner);
// these codes are inspired by zstd crate
impl Compression {
pub fn new() -> Self {
Compression(CompressionInner::new())
}
pub fn compress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
level: i32,
) -> Result<usize, &'static str> {
self.0.compress_to_buffer(source, destination, level)
}
pub fn compress(&self, data: &[u8], level: i32) -> Result<Vec<u8>, &'static str> {
let mut buffer = make_compressed_data_buffer(data.len());
self.compress_to_buffer(data, &mut buffer, level)?;
Ok(buffer)
}
pub fn decompress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
) -> Result<usize, &'static str> {
self.0.decompress_to_buffer(source, destination)
}
}
pub struct CompressionWithDict {
inner: CompressionInner,
// these dictionaries are owned by this struct, hence the static lifetime
com_dict: CDict<'static>,
de_dict: DDict<'static>,
}
impl CompressionWithDict {
pub fn new(dict: &[u8], compression_level: i32) -> Self {
CompressionWithDict {
inner: CompressionInner::new(),
// compression dictionary needs to be loaded ahead of time
// with the compression level
com_dict: CDict::create(dict, compression_level),
de_dict: DDict::create(dict),
}
}
pub fn compress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
) -> Result<usize, &'static str> {
self.inner
.compress_to_buffer_using_dict(source, destination, &self.com_dict)
}
pub fn compress(&self, data: &[u8]) -> Result<Vec<u8>, &'static str> {
let mut buffer = make_compressed_data_buffer(data.len());
self.compress_to_buffer(data, &mut buffer)?;
Ok(buffer)
}
pub fn decompress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
) -> Result<usize, &'static str> {
self.inner
.decompress_to_buffer_using_dict(source, destination, &self.de_dict)
}
}
#[derive(Default)]
struct CompressionInner {
com_context: ThreadLocal<RefCell<zstd_safe::CCtx<'static>>>,
de_context: ThreadLocal<RefCell<zstd_safe::DCtx<'static>>>,
}
impl CompressionInner {
fn new() -> Self {
CompressionInner {
com_context: ThreadLocal::new(),
de_context: ThreadLocal::new(),
}
}
#[inline]
fn get_com_context(&self) -> RefMut<'_, CCtx<'static>> {
self.com_context
.get_or(|| RefCell::new(CCtx::create()))
.borrow_mut()
}
#[inline]
fn get_de_context(&self) -> RefMut<'_, DCtx<'static>> {
self.de_context
.get_or(|| RefCell::new(DCtx::create()))
.borrow_mut()
}
fn compress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
level: i32,
) -> Result<usize, &'static str> {
self.get_com_context()
.compress(destination, source, level)
.map_err(zstd_safe::get_error_name)
}
fn decompress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
) -> Result<usize, &'static str> {
self.get_de_context()
.decompress(destination, source)
.map_err(zstd_safe::get_error_name)
}
fn compress_to_buffer_using_dict<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
dict: &CDict,
) -> Result<usize, &'static str> {
self.get_com_context()
.compress_using_cdict(destination, source, dict)
.map_err(zstd_safe::get_error_name)
}
pub fn decompress_to_buffer_using_dict<C: zstd_safe::WriteBuf + ?Sized>(
&self,
source: &[u8],
destination: &mut C,
dict: &DDict,
) -> Result<usize, &'static str> {
self.get_de_context()
.decompress_using_ddict(destination, source, dict)
.map_err(zstd_safe::get_error_name)
}
}
// Helper to create a buffer for the compressed data, preallocating enough
// for the compressed size (given the size of the uncompressed data).
#[inline]
fn make_compressed_data_buffer(uncompressed_len: usize) -> Vec<u8> {
let buffer_len = zstd_safe::compress_bound(uncompressed_len);
Vec::with_capacity(buffer_len)
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-header-serde/src/trainer.rs | pingora-header-serde/src/trainer.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use pingora_header_serde::dict::train;
use std::env;
use std::io::{self, Write};
pub fn main() {
let args: Vec<String> = env::args().collect();
let dict = train(&args[1]);
io::stdout().write_all(&dict).unwrap();
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-header-serde/src/dict.rs | pingora-header-serde/src/dict.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Training to generate the zstd dictionary.
use std::fs;
use zstd::dict;
/// Train the zstd dictionary from all the files under the given `dir_path`
///
/// The output will be the trained dictionary
pub fn train<P: AsRef<std::path::Path>>(dir_path: P) -> Vec<u8> {
// TODO: check f is file, it can be dir
let files = fs::read_dir(dir_path)
.unwrap()
.filter_map(|entry| entry.ok().map(|f| f.path()));
dict::from_files(files, 64 * 1024 * 1024).unwrap()
}
#[cfg(test)]
mod test {
use super::*;
use crate::resp_header_to_buf;
use pingora_http::ResponseHeader;
fn gen_test_dict() -> Vec<u8> {
let mut path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("samples/test");
train(path)
}
fn gen_test_header() -> ResponseHeader {
let mut header = ResponseHeader::build(200, None).unwrap();
header
.append_header("Date", "Thu, 23 Dec 2021 11:23:29 GMT")
.unwrap();
header
.append_header("Last-Modified", "Sat, 09 Oct 2021 22:41:34 GMT")
.unwrap();
header.append_header("Connection", "keep-alive").unwrap();
header.append_header("Vary", "Accept-encoding").unwrap();
header.append_header("Content-Encoding", "gzip").unwrap();
header
.append_header("Access-Control-Allow-Origin", "*")
.unwrap();
header
}
#[test]
fn test_ser_with_dict() {
let dict = gen_test_dict();
let serde = crate::HeaderSerde::new(Some(dict));
let serde_no_dict = crate::HeaderSerde::new(None);
let header = gen_test_header();
let compressed = serde.serialize(&header).unwrap();
let compressed_no_dict = serde_no_dict.serialize(&header).unwrap();
let mut buf = vec![];
let uncompressed = resp_header_to_buf(&header, &mut buf);
assert!(compressed.len() < uncompressed);
assert!(compressed.len() < compressed_no_dict.len());
}
#[test]
fn test_deserialize_with_dict() {
let dict = gen_test_dict();
let serde = crate::HeaderSerde::new(Some(dict));
let serde_no_dict = crate::HeaderSerde::new(None);
let header = gen_test_header();
let compressed = serde.serialize(&header).unwrap();
let compressed_no_dict = serde_no_dict.serialize(&header).unwrap();
let from_dict_header = serde.deserialize(&compressed).unwrap();
let from_no_dict_header = serde_no_dict.deserialize(&compressed_no_dict).unwrap();
assert_eq!(from_dict_header.status, from_no_dict_header.status);
assert_eq!(from_dict_header.headers, from_no_dict_header.headers);
}
#[test]
fn test_ser_de_with_dict() {
let dict = gen_test_dict();
let serde = crate::HeaderSerde::new(Some(dict));
let header = gen_test_header();
let compressed = serde.serialize(&header).unwrap();
let header2 = serde.deserialize(&compressed).unwrap();
assert_eq!(header.status, header2.status);
assert_eq!(header.headers, header2.headers);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-pool/src/lib.rs | pingora-pool/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic connection pooling
//!
//! The pool is optimized for high concurrency, high RPS use cases. Each connection group has a
//! lock free hot pool to reduce the lock contention when some connections are reused and released
//! very frequently.
#![warn(clippy::all)]
#![allow(clippy::new_without_default)]
#![allow(clippy::type_complexity)]
mod connection;
mod lru;
pub use connection::{ConnectionMeta, ConnectionPool, PoolNode};
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-pool/src/lru.rs | pingora-pool/src/lru.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::hash::Hash;
use lru::LruCache;
use parking_lot::RwLock;
use std::cell::RefCell;
use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
use std::sync::Arc;
use thread_local::ThreadLocal;
use tokio::sync::Notify;
pub struct Node<T> {
pub close_notifier: Arc<Notify>,
pub meta: T,
}
impl<T> Node<T> {
pub fn new(meta: T) -> Self {
Node {
close_notifier: Arc::new(Notify::new()),
meta,
}
}
pub fn notify_close(&self) {
self.close_notifier.notify_one();
}
}
pub struct Lru<K, T>
where
K: Send,
T: Send,
{
lru: RwLock<ThreadLocal<RefCell<LruCache<K, Node<T>>>>>,
size: usize,
drain: AtomicBool,
}
impl<K, T> Lru<K, T>
where
K: Hash + Eq + Send,
T: Send,
{
pub fn new(size: usize) -> Self {
Lru {
lru: RwLock::new(ThreadLocal::new()),
size,
drain: AtomicBool::new(false),
}
}
// put a node in and return the meta of the replaced node
pub fn put(&self, key: K, value: Node<T>) -> Option<T> {
if self.drain.load(Relaxed) {
value.notify_close(); // sort of hack to simulate being evicted right away
return None;
}
let lru = self.lru.read(); /* read lock */
let lru_cache = &mut *(lru
.get_or(|| RefCell::new(LruCache::unbounded()))
.borrow_mut());
lru_cache.put(key, value);
if lru_cache.len() > self.size {
match lru_cache.pop_lru() {
Some((_, v)) => {
// TODO: drop the lock here?
v.notify_close();
return Some(v.meta);
}
None => return None,
}
}
None
/* read lock dropped */
}
pub fn add(&self, key: K, meta: T) -> (Arc<Notify>, Option<T>) {
let node = Node::new(meta);
let notifier = node.close_notifier.clone();
// TODO: check if the key is already in it
(notifier, self.put(key, node))
}
pub fn pop(&self, key: &K) -> Option<Node<T>> {
let lru = self.lru.read(); /* read lock */
let lru_cache = &mut *(lru
.get_or(|| RefCell::new(LruCache::unbounded()))
.borrow_mut());
lru_cache.pop(key)
/* read lock dropped */
}
#[allow(dead_code)]
pub fn drain(&self) {
self.drain.store(true, Relaxed);
/* drain need to go through all the local lru cache objects
* acquire an exclusive write lock to make it safe */
let mut lru = self.lru.write(); /* write lock */
let lru_cache_iter = lru.iter_mut();
for lru_cache_rc in lru_cache_iter {
let mut lru_cache = lru_cache_rc.borrow_mut();
for (_, item) in lru_cache.iter() {
item.notify_close();
}
lru_cache.clear();
}
/* write lock dropped */
}
}
#[cfg(test)]
mod tests {
use super::*;
use log::debug;
#[tokio::test]
async fn test_evict_close() {
let pool: Lru<i32, ()> = Lru::new(2);
let (notifier1, _) = pool.add(1, ());
let (notifier2, _) = pool.add(2, ());
let (notifier3, _) = pool.add(3, ());
let closed_item = tokio::select! {
_ = notifier1.notified() => {debug!("notifier1"); 1},
_ = notifier2.notified() => {debug!("notifier2"); 2},
_ = notifier3.notified() => {debug!("notifier3"); 3},
};
assert_eq!(closed_item, 1);
}
#[tokio::test]
async fn test_evict_close_with_pop() {
let pool: Lru<i32, ()> = Lru::new(2);
let (notifier1, _) = pool.add(1, ());
let (notifier2, _) = pool.add(2, ());
pool.pop(&1);
let (notifier3, _) = pool.add(3, ());
let (notifier4, _) = pool.add(4, ());
let closed_item = tokio::select! {
_ = notifier1.notified() => {debug!("notifier1"); 1},
_ = notifier2.notified() => {debug!("notifier2"); 2},
_ = notifier3.notified() => {debug!("notifier3"); 3},
_ = notifier4.notified() => {debug!("notifier4"); 4},
};
assert_eq!(closed_item, 2);
}
#[tokio::test]
async fn test_drain() {
let pool: Lru<i32, ()> = Lru::new(4);
let (notifier1, _) = pool.add(1, ());
let (notifier2, _) = pool.add(2, ());
let (notifier3, _) = pool.add(3, ());
pool.drain();
let (notifier4, _) = pool.add(4, ());
tokio::join!(
notifier1.notified(),
notifier2.notified(),
notifier3.notified(),
notifier4.notified()
);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-pool/src/connection.rs | pingora-pool/src/connection.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic connection pooling
use log::{debug, warn};
use parking_lot::{Mutex, RwLock};
use pingora_timeout::{sleep, timeout};
use std::collections::HashMap;
use std::io;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncRead, AsyncReadExt};
use tokio::sync::{oneshot, watch, Notify, OwnedMutexGuard};
use super::lru::Lru;
type GroupKey = u64;
#[cfg(unix)]
type ID = i32;
#[cfg(windows)]
type ID = usize;
/// the metadata of a connection
#[derive(Clone, Debug)]
pub struct ConnectionMeta {
/// The group key. All connections under the same key are considered the same for connection reuse.
pub key: GroupKey,
/// The unique ID of a connection.
pub id: ID,
}
impl ConnectionMeta {
/// Create a new [ConnectionMeta]
pub fn new(key: GroupKey, id: ID) -> Self {
ConnectionMeta { key, id }
}
}
struct PoolConnection<S> {
pub notify_use: oneshot::Sender<bool>,
pub connection: S,
}
impl<S> PoolConnection<S> {
pub fn new(notify_use: oneshot::Sender<bool>, connection: S) -> Self {
PoolConnection {
notify_use,
connection,
}
}
pub fn release(self) -> S {
// notify the idle watcher to release the connection
let _ = self.notify_use.send(true);
// wait for the watcher to release
self.connection
}
}
use crossbeam_queue::ArrayQueue;
/// A pool of exchangeable items
pub struct PoolNode<T> {
connections: Mutex<HashMap<ID, T>>,
// a small lock free queue to avoid lock contention
hot_queue: ArrayQueue<(ID, T)>,
// to avoid race between 2 evictions on the queue
hot_queue_remove_lock: Mutex<()>,
// TODO: store the GroupKey to avoid hash collision?
}
// Keep the queue size small because eviction is O(n) in the queue
const HOT_QUEUE_SIZE: usize = 16;
impl<T> PoolNode<T> {
/// Create a new [PoolNode]
pub fn new() -> Self {
PoolNode {
connections: Mutex::new(HashMap::new()),
hot_queue: ArrayQueue::new(HOT_QUEUE_SIZE),
hot_queue_remove_lock: Mutex::new(()),
}
}
/// Get any item from the pool
pub fn get_any(&self) -> Option<(ID, T)> {
let hot_conn = self.hot_queue.pop();
if hot_conn.is_some() {
return hot_conn;
}
let mut connections = self.connections.lock();
// find one connection, any connection will do
let id = match connections.iter().next() {
Some((k, _)) => *k, // OK to copy i32
None => return None,
};
// unwrap is safe since we just found it
let connection = connections.remove(&id).unwrap();
/* NOTE: we don't resize or drop empty connections hashmap
* We may want to do it if they consume too much memory
* maybe we should use trees to save memory */
Some((id, connection))
// connections.lock released here
}
/// Insert an item with the given unique ID into the pool
pub fn insert(&self, id: ID, conn: T) {
if let Err(node) = self.hot_queue.push((id, conn)) {
// hot queue is full
let mut connections = self.connections.lock();
connections.insert(node.0, node.1); // TODO: check dup
}
}
// This function acquires 2 locks and iterates over the entire hot queue.
// But it should be fine because remove() rarely happens on a busy PoolNode.
/// Remove the item associated with the id from the pool. The item is returned
/// if it is found and removed.
pub fn remove(&self, id: ID) -> Option<T> {
// check the table first as least recent used ones are likely there
let removed = self.connections.lock().remove(&id);
if removed.is_some() {
return removed;
} // lock drops here
let _queue_lock = self.hot_queue_remove_lock.lock();
// check the hot queue, note that the queue can be accessed in parallel by insert and get
let max_len = self.hot_queue.len();
for _ in 0..max_len {
if let Some((conn_id, conn)) = self.hot_queue.pop() {
if conn_id == id {
// this is the item, it is already popped
return Some(conn);
} else {
// not this item, put back to hot queue, but it could also be full
self.insert(conn_id, conn);
}
} else {
// other threads grab all the connections
return None;
}
}
None
// _queue_lock drops here
}
}
/// Connection pool
///
/// [ConnectionPool] holds reusable connections. A reusable connection is released to this pool to
/// be picked up by another user/request.
pub struct ConnectionPool<S> {
// TODO: n-way pools to reduce lock contention
pool: RwLock<HashMap<GroupKey, Arc<PoolNode<PoolConnection<S>>>>>,
lru: Lru<ID, ConnectionMeta>,
}
impl<S> ConnectionPool<S> {
/// Create a new [ConnectionPool] with a size limit.
///
/// When a connection is released to this pool, the least recently used connection will be dropped.
pub fn new(size: usize) -> Self {
ConnectionPool {
pool: RwLock::new(HashMap::with_capacity(size)), // this is oversized since some connections will have the same key
lru: Lru::new(size),
}
}
/* get or create and insert a pool node for the hash key */
fn get_pool_node(&self, key: GroupKey) -> Arc<PoolNode<PoolConnection<S>>> {
{
let pool = self.pool.read();
if let Some(v) = pool.get(&key) {
return (*v).clone();
}
} // read lock released here
{
// write lock section
let mut pool = self.pool.write();
// check again since another task might have already added it
if let Some(v) = pool.get(&key) {
return (*v).clone();
}
let node = Arc::new(PoolNode::new());
let node_ret = node.clone();
pool.insert(key, node); // TODO: check dup
node_ret
}
}
// only remove from the pool because lru already removed it
fn pop_evicted(&self, meta: &ConnectionMeta) {
let pool_node = {
let pool = self.pool.read();
match pool.get(&meta.key) {
Some(v) => (*v).clone(),
None => {
warn!("Fail to get pool node for {:?}", meta);
return;
} // nothing to pop, should return error?
}
}; // read lock released here
pool_node.remove(meta.id);
debug!("evict fd: {} from key {}", meta.id, meta.key);
}
pub fn pop_closed(&self, meta: &ConnectionMeta) {
// NOTE: which of these should be done first?
self.pop_evicted(meta);
self.lru.pop(&meta.id);
}
/// Get a connection from this pool under the same group key
pub fn get(&self, key: &GroupKey) -> Option<S> {
let pool_node = {
let pool = self.pool.read();
match pool.get(key) {
Some(v) => (*v).clone(),
None => return None,
}
}; // read lock released here
if let Some((id, connection)) = pool_node.get_any() {
self.lru.pop(&id); // the notified is not needed
Some(connection.release())
} else {
None
}
}
/// Release a connection to this pool for reuse
///
/// - The returned [`Arc<Notify>`] will notify any listen when the connection is evicted from the pool.
/// - The returned [`oneshot::Receiver<bool>`] will notify when the connection is being picked up by [Self::get()].
pub fn put(
&self,
meta: &ConnectionMeta,
connection: S,
) -> (Arc<Notify>, oneshot::Receiver<bool>) {
let (notify_close, replaced) = self.lru.add(meta.id, meta.clone());
if let Some(meta) = replaced {
self.pop_evicted(&meta);
};
let pool_node = self.get_pool_node(meta.key);
let (notify_use, watch_use) = oneshot::channel();
let connection = PoolConnection::new(notify_use, connection);
pool_node.insert(meta.id, connection);
(notify_close, watch_use)
}
/// Actively monitor the health of a connection that is already released to this pool
///
/// When the connection breaks, or the optional `timeout` is reached this function will
/// remove it from the pool and drop the connection.
///
/// If the connection is reused via [Self::get()] or being evicted, this function will just exit.
pub async fn idle_poll<Stream>(
&self,
connection: OwnedMutexGuard<Stream>,
meta: &ConnectionMeta,
timeout: Option<Duration>,
notify_evicted: Arc<Notify>,
watch_use: oneshot::Receiver<bool>,
) where
Stream: AsyncRead + Unpin + Send,
{
let read_result = tokio::select! {
biased;
_ = watch_use => {
debug!("idle connection is being picked up");
return
},
_ = notify_evicted.notified() => {
debug!("idle connection is being evicted");
// TODO: gracefully close the connection?
return
}
read_result = read_with_timeout(connection , timeout) => read_result
};
match read_result {
Ok(n) => {
if n > 0 {
warn!("Data received on idle client connection, close it")
} else {
debug!("Peer closed the idle connection or timeout")
}
}
Err(e) => {
debug!("error with the idle connection, close it {:?}", e);
}
}
// connection terminated from either peer or timer
self.pop_closed(meta);
}
/// Passively wait to close the connection after the timeout
///
/// If this connection is not being picked up or evicted before the timeout is reach, this
/// function will remove it from the pool and close the connection.
pub async fn idle_timeout(
&self,
meta: &ConnectionMeta,
timeout: Duration,
notify_evicted: Arc<Notify>,
mut notify_closed: watch::Receiver<bool>,
watch_use: oneshot::Receiver<bool>,
) {
tokio::select! {
biased;
_ = watch_use => {
debug!("idle connection is being picked up");
},
_ = notify_evicted.notified() => {
debug!("idle connection is being evicted");
// TODO: gracefully close the connection?
}
_ = notify_closed.changed() => {
// assume always changed from false to true
debug!("idle connection is being closed");
self.pop_closed(meta);
}
_ = sleep(timeout) => {
debug!("idle connection is being evicted");
self.pop_closed(meta);
}
};
}
}
async fn read_with_timeout<S>(
mut connection: OwnedMutexGuard<S>,
timeout_duration: Option<Duration>,
) -> io::Result<usize>
where
S: AsyncRead + Unpin + Send,
{
let mut buf = [0; 1];
let read_event = connection.read(&mut buf[..]);
match timeout_duration {
Some(d) => match timeout(d, read_event).await {
Ok(res) => res,
Err(e) => {
debug!("keepalive timeout {:?} reached, {:?}", d, e);
Ok(0)
}
},
_ => read_event.await,
}
}
#[cfg(test)]
mod tests {
use super::*;
use log::debug;
use tokio::sync::Mutex as AsyncMutex;
use tokio_test::io::{Builder, Mock};
#[tokio::test]
async fn test_lookup() {
let meta1 = ConnectionMeta::new(101, 1);
let value1 = "v1".to_string();
let meta2 = ConnectionMeta::new(102, 2);
let value2 = "v2".to_string();
let meta3 = ConnectionMeta::new(101, 3);
let value3 = "v3".to_string();
let cp: ConnectionPool<String> = ConnectionPool::new(3); //#CP3
cp.put(&meta1, value1.clone());
cp.put(&meta2, value2.clone());
cp.put(&meta3, value3.clone());
let found_b = cp.get(&meta2.key).unwrap();
assert_eq!(found_b, value2);
let found_a1 = cp.get(&meta1.key).unwrap();
let found_a2 = cp.get(&meta1.key).unwrap();
assert!(
found_a1 == value1 && found_a2 == value3 || found_a2 == value1 && found_a1 == value3
);
}
#[tokio::test]
async fn test_pop() {
let meta1 = ConnectionMeta::new(101, 1);
let value1 = "v1".to_string();
let meta2 = ConnectionMeta::new(102, 2);
let value2 = "v2".to_string();
let meta3 = ConnectionMeta::new(101, 3);
let value3 = "v3".to_string();
let cp: ConnectionPool<String> = ConnectionPool::new(3); //#CP3
cp.put(&meta1, value1);
cp.put(&meta2, value2);
cp.put(&meta3, value3.clone());
cp.pop_closed(&meta1);
let found_a1 = cp.get(&meta1.key).unwrap();
assert_eq!(found_a1, value3);
cp.pop_closed(&meta1);
assert!(cp.get(&meta1.key).is_none())
}
#[tokio::test]
async fn test_eviction() {
let meta1 = ConnectionMeta::new(101, 1);
let value1 = "v1".to_string();
let meta2 = ConnectionMeta::new(102, 2);
let value2 = "v2".to_string();
let meta3 = ConnectionMeta::new(101, 3);
let value3 = "v3".to_string();
let cp: ConnectionPool<String> = ConnectionPool::new(2);
let (notify_close1, _) = cp.put(&meta1, value1.clone());
let (notify_close2, _) = cp.put(&meta2, value2.clone());
let (notify_close3, _) = cp.put(&meta3, value3.clone()); // meta 1 should be evicted
let closed_item = tokio::select! {
_ = notify_close1.notified() => {debug!("notifier1"); 1},
_ = notify_close2.notified() => {debug!("notifier2"); 2},
_ = notify_close3.notified() => {debug!("notifier3"); 3},
};
assert_eq!(closed_item, 1);
let found_a1 = cp.get(&meta1.key).unwrap();
assert_eq!(found_a1, value3);
assert_eq!(cp.get(&meta1.key), None)
}
#[tokio::test]
#[should_panic(expected = "There is still data left to read.")]
async fn test_read_close() {
let meta1 = ConnectionMeta::new(101, 1);
let mock_io1 = Arc::new(AsyncMutex::new(Builder::new().read(b"garbage").build()));
let meta2 = ConnectionMeta::new(102, 2);
let mock_io2 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let meta3 = ConnectionMeta::new(101, 3);
let mock_io3 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let cp: ConnectionPool<Arc<AsyncMutex<Mock>>> = ConnectionPool::new(3);
let (c1, u1) = cp.put(&meta1, mock_io1.clone());
let (c2, u2) = cp.put(&meta2, mock_io2.clone());
let (c3, u3) = cp.put(&meta3, mock_io3.clone());
let closed_item = tokio::select! {
_ = cp.idle_poll(mock_io1.try_lock_owned().unwrap(), &meta1, None, c1, u1) => {debug!("notifier1"); 1},
_ = cp.idle_poll(mock_io2.try_lock_owned().unwrap(), &meta1, None, c2, u2) => {debug!("notifier2"); 2},
_ = cp.idle_poll(mock_io3.try_lock_owned().unwrap(), &meta1, None, c3, u3) => {debug!("notifier3"); 3},
};
assert_eq!(closed_item, 1);
let _ = cp.get(&meta1.key).unwrap(); // mock_io3 should be selected
assert!(cp.get(&meta1.key).is_none()) // mock_io1 should already be removed by idle_poll
}
#[tokio::test]
async fn test_read_timeout() {
let meta1 = ConnectionMeta::new(101, 1);
let mock_io1 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let meta2 = ConnectionMeta::new(102, 2);
let mock_io2 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let meta3 = ConnectionMeta::new(101, 3);
let mock_io3 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let cp: ConnectionPool<Arc<AsyncMutex<Mock>>> = ConnectionPool::new(3);
let (c1, u1) = cp.put(&meta1, mock_io1.clone());
let (c2, u2) = cp.put(&meta2, mock_io2.clone());
let (c3, u3) = cp.put(&meta3, mock_io3.clone());
let closed_item = tokio::select! {
_ = cp.idle_poll(mock_io1.try_lock_owned().unwrap(), &meta1, Some(Duration::from_secs(1)), c1, u1) => {debug!("notifier1"); 1},
_ = cp.idle_poll(mock_io2.try_lock_owned().unwrap(), &meta1, Some(Duration::from_secs(2)), c2, u2) => {debug!("notifier2"); 2},
_ = cp.idle_poll(mock_io3.try_lock_owned().unwrap(), &meta1, Some(Duration::from_secs(3)), c3, u3) => {debug!("notifier3"); 3},
};
assert_eq!(closed_item, 1);
let _ = cp.get(&meta1.key).unwrap(); // mock_io3 should be selected
assert!(cp.get(&meta1.key).is_none()) // mock_io1 should already be removed by idle_poll
}
#[tokio::test]
async fn test_evict_poll() {
let meta1 = ConnectionMeta::new(101, 1);
let mock_io1 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let meta2 = ConnectionMeta::new(102, 2);
let mock_io2 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let meta3 = ConnectionMeta::new(101, 3);
let mock_io3 = Arc::new(AsyncMutex::new(
Builder::new().wait(Duration::from_secs(99)).build(),
));
let cp: ConnectionPool<Arc<AsyncMutex<Mock>>> = ConnectionPool::new(2);
let (c1, u1) = cp.put(&meta1, mock_io1.clone());
let (c2, u2) = cp.put(&meta2, mock_io2.clone());
let (c3, u3) = cp.put(&meta3, mock_io3.clone()); // 1 should be evicted at this point
let closed_item = tokio::select! {
_ = cp.idle_poll(mock_io1.try_lock_owned().unwrap(), &meta1, None, c1, u1) => {debug!("notifier1"); 1},
_ = cp.idle_poll(mock_io2.try_lock_owned().unwrap(), &meta1, None, c2, u2) => {debug!("notifier2"); 2},
_ = cp.idle_poll(mock_io3.try_lock_owned().unwrap(), &meta1, None, c3, u3) => {debug!("notifier3"); 3},
};
assert_eq!(closed_item, 1);
let _ = cp.get(&meta1.key).unwrap(); // mock_io3 should be selected
assert!(cp.get(&meta1.key).is_none()) // mock_io1 should already be removed by idle_poll
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-ketama/src/lib.rs | pingora-ketama/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # pingora-ketama
//! A Rust port of the nginx consistent hashing algorithm.
//!
//! This crate provides a consistent hashing algorithm which is identical in
//! behavior to [nginx consistent hashing](https://www.nginx.com/resources/wiki/modules/consistent_hash/).
//!
//! Using a consistent hash strategy like this is useful when one wants to
//! minimize the amount of requests that need to be rehashed to different nodes
//! when a node is added or removed.
//!
//! Here's a simple example of how one might use it:
//!
//! ```
//! use pingora_ketama::{Bucket, Continuum};
//!
//! # #[allow(clippy::needless_doctest_main)]
//! fn main() {
//! // Set up a continuum with a few nodes of various weight.
//! let mut buckets = vec![];
//! buckets.push(Bucket::new("127.0.0.1:12345".parse().unwrap(), 1));
//! buckets.push(Bucket::new("127.0.0.2:12345".parse().unwrap(), 2));
//! buckets.push(Bucket::new("127.0.0.3:12345".parse().unwrap(), 3));
//! let ring = Continuum::new(&buckets);
//!
//! // Let's see what the result is for a few keys:
//! for key in &["some_key", "another_key", "last_key"] {
//! let node = ring.node(key.as_bytes()).unwrap();
//! println!("{}: {}:{}", key, node.ip(), node.port());
//! }
//! }
//! ```
//!
//! ```bash
//! # Output:
//! some_key: 127.0.0.3:12345
//! another_key: 127.0.0.3:12345
//! last_key: 127.0.0.2:12345
//! ```
//!
//! We've provided a health-aware example in
//! `pingora-ketama/examples/health_aware_selector.rs`.
//!
//! For a carefully crafted real-world example, see the [`pingora-load-balancing`](https://docs.rs/pingora-load-balancing)
//! crate.
use std::cmp::Ordering;
use std::io::Write;
use std::net::SocketAddr;
use crc32fast::Hasher;
#[cfg(feature = "v2")]
use i_key_sort::sort::one_key_cmp::OneKeyAndCmpSort;
/// This constant is copied from nginx. It will create 160 points per weight
/// unit. For example, a weight of 2 will create 320 points on the ring.
pub const DEFAULT_POINT_MULTIPLE: u32 = 160;
/// A [Bucket] represents a server for consistent hashing
///
/// A [Bucket] contains a [SocketAddr] to the server and a weight associated with it.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
pub struct Bucket {
// The node name.
// TODO: UDS
node: SocketAddr,
// The weight associated with a node. A higher weight indicates that this node should
// receive more requests.
weight: u32,
}
impl Bucket {
/// Return a new bucket with the given node and weight.
///
/// The chance that a [Bucket] is selected is proportional to the relative weight of all [Bucket]s.
///
/// # Panics
///
/// This will panic if the weight is zero.
pub fn new(node: SocketAddr, weight: u32) -> Self {
assert!(weight != 0, "weight must be at least one");
Bucket { node, weight }
}
}
// A point on the continuum.
#[derive(Clone, Debug, Eq, PartialEq)]
struct PointV1 {
// the index to the actual address
node: u32,
hash: u32,
}
// We only want to compare the hash when sorting, so we implement these traits by hand.
impl Ord for PointV1 {
fn cmp(&self, other: &Self) -> Ordering {
self.hash.cmp(&other.hash)
}
}
impl PartialOrd for PointV1 {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PointV1 {
fn new(node: u32, hash: u32) -> Self {
PointV1 { node, hash }
}
}
/// A point on the continuum.
///
/// We are trying to save memory here, so this struct is equivalent to a struct
/// this this definition, but doesn't require using the "untrustworthy" compact
/// repr. This does mean we have to do the memory layout manually though, but
/// the benchmarks show there is no performance hit for it.
///
/// #[repr(Rust, packed)]
/// struct Point {
/// node: u16,
/// hash: u32,
/// }
#[cfg(feature = "v2")]
#[derive(Copy, Clone, Eq, PartialEq)]
#[repr(transparent)]
struct PointV2([u8; 6]);
#[cfg(feature = "v2")]
impl PointV2 {
fn new(node: u16, hash: u32) -> Self {
let mut this = [0; 6];
this[0..4].copy_from_slice(&hash.to_ne_bytes());
this[4..6].copy_from_slice(&node.to_ne_bytes());
Self(this)
}
/// Return the hash of the point which is stored in the first 4 bytes (big endian).
fn hash(&self) -> u32 {
u32::from_ne_bytes(self.0[0..4].try_into().expect("There are exactly 4 bytes"))
}
/// Return the node of the point which is stored in the last 2 bytes (big endian).
fn node(&self) -> u16 {
u16::from_ne_bytes(self.0[4..6].try_into().expect("There are exactly 2 bytes"))
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
pub enum Version {
#[default]
V1,
#[cfg(feature = "v2")]
V2 { point_multiple: u32 },
}
impl Version {
fn point_multiple(&self) -> u32 {
match self {
Version::V1 => DEFAULT_POINT_MULTIPLE,
#[cfg(feature = "v2")]
Version::V2 { point_multiple } => *point_multiple,
}
}
}
enum RingBuilder {
V1(Vec<PointV1>),
#[cfg(feature = "v2")]
V2(Vec<PointV2>),
}
impl RingBuilder {
fn new(version: Version, total_weight: u32) -> Self {
match version {
Version::V1 => RingBuilder::V1(Vec::with_capacity(
(total_weight * DEFAULT_POINT_MULTIPLE) as usize,
)),
#[cfg(feature = "v2")]
Version::V2 { point_multiple } => {
RingBuilder::V2(Vec::with_capacity((total_weight * point_multiple) as usize))
}
}
}
fn push(&mut self, node: u16, hash: u32) {
match self {
RingBuilder::V1(ring) => {
ring.push(PointV1::new(node as u32, hash));
}
#[cfg(feature = "v2")]
RingBuilder::V2(ring) => {
ring.push(PointV2::new(node, hash));
}
}
}
#[allow(unused)]
fn sort(&mut self, addresses: &[SocketAddr]) {
match self {
RingBuilder::V1(ring) => {
// Sort and remove any duplicates.
ring.sort_unstable();
ring.dedup_by(|a, b| a.hash == b.hash);
}
#[cfg(feature = "v2")]
RingBuilder::V2(ring) => {
ring.sort_by_one_key_then_by(
true,
|p| p.hash(),
|p1, p2| addresses[p1.node() as usize].cmp(&addresses[p2.node() as usize]),
);
//secondary_radix_sort(ring, |p| p.hash(), |p| addresses[p.node() as usize]);
ring.dedup_by(|a, b| a.0[0..4] == b.0[0..4]);
}
}
}
}
impl From<RingBuilder> for VersionedRing {
fn from(ring: RingBuilder) -> Self {
match ring {
RingBuilder::V1(ring) => VersionedRing::V1(ring.into_boxed_slice()),
#[cfg(feature = "v2")]
RingBuilder::V2(ring) => VersionedRing::V2(ring.into_boxed_slice()),
}
}
}
enum VersionedRing {
V1(Box<[PointV1]>),
#[cfg(feature = "v2")]
V2(Box<[PointV2]>),
}
impl VersionedRing {
/// Find the associated index for the given input.
pub fn node_idx(&self, hash: u32) -> usize {
// The `Result` returned here is either a match or the error variant
// returns where the value would be inserted.
let search_result = match self {
VersionedRing::V1(ring) => ring.binary_search_by(|p| p.hash.cmp(&hash)),
#[cfg(feature = "v2")]
VersionedRing::V2(ring) => ring.binary_search_by(|p| p.hash().cmp(&hash)),
};
match search_result {
Ok(i) => i,
Err(i) => {
// We wrap around to the front if this value would be
// inserted at the end.
if i == self.len() {
0
} else {
i
}
}
}
}
pub fn get(&self, index: usize) -> Option<usize> {
match self {
VersionedRing::V1(ring) => ring.get(index).map(|p| p.node as usize),
#[cfg(feature = "v2")]
VersionedRing::V2(ring) => ring.get(index).map(|p| p.node() as usize),
}
}
pub fn len(&self) -> usize {
match self {
VersionedRing::V1(ring) => ring.len(),
#[cfg(feature = "v2")]
VersionedRing::V2(ring) => ring.len(),
}
}
}
/// The consistent hashing ring
///
/// A [Continuum] represents a ring of buckets where a node is associated with various points on
/// the ring.
pub struct Continuum {
ring: VersionedRing,
addrs: Box<[SocketAddr]>,
}
impl Continuum {
pub fn new(buckets: &[Bucket]) -> Self {
Self::new_with_version(buckets, Version::default())
}
/// Create a new [Continuum] with the given list of buckets.
pub fn new_with_version(buckets: &[Bucket], version: Version) -> Self {
if buckets.is_empty() {
return Continuum {
ring: VersionedRing::V1(Box::new([])),
addrs: Box::new([]),
};
}
// The total weight is multiplied by the factor of points to create many points per node.
let total_weight: u32 = buckets.iter().fold(0, |sum, b| sum + b.weight);
let mut ring = RingBuilder::new(version, total_weight);
let mut addrs = Vec::with_capacity(buckets.len());
for bucket in buckets {
let mut hasher = Hasher::new();
// We only do the following for backwards compatibility with nginx/memcache:
// - Convert SocketAddr to string
// - The hash input is as follows "HOST EMPTY PORT PREVIOUS_HASH". Spaces are only added
// for readability.
// TODO: remove this logic and hash the literal SocketAddr once we no longer
// need backwards compatibility
// with_capacity = max_len(ipv6)(39) + len(null)(1) + max_len(port)(5)
let mut hash_bytes = Vec::with_capacity(39 + 1 + 5);
write!(&mut hash_bytes, "{}", bucket.node.ip()).unwrap();
write!(&mut hash_bytes, "\0").unwrap();
write!(&mut hash_bytes, "{}", bucket.node.port()).unwrap();
hasher.update(hash_bytes.as_ref());
// A higher weight will add more points for this node.
let num_points = bucket.weight * version.point_multiple();
// This is appended to the crc32 hash for each point.
let mut prev_hash: u32 = 0;
addrs.push(bucket.node);
let node = addrs.len() - 1;
for _ in 0..num_points {
let mut hasher = hasher.clone();
hasher.update(&prev_hash.to_le_bytes());
let hash = hasher.finalize();
ring.push(node as u16, hash);
prev_hash = hash;
}
}
let addrs = addrs.into_boxed_slice();
// Sort and remove any duplicates.
ring.sort(&addrs);
Continuum {
ring: ring.into(),
addrs,
}
}
/// Find the associated index for the given input.
pub fn node_idx(&self, input: &[u8]) -> usize {
let hash = crc32fast::hash(input);
self.ring.node_idx(hash)
}
/// Hash the given `hash_key` to the server address.
pub fn node(&self, hash_key: &[u8]) -> Option<SocketAddr> {
self.ring
.get(self.node_idx(hash_key)) // should we unwrap here?
.map(|n| self.addrs[n])
}
/// Get an iterator of nodes starting at the original hashed node of the `hash_key`.
///
/// This function is useful to find failover servers if the original ones are offline, which is
/// cheaper than rebuilding the entire hash ring.
pub fn node_iter(&self, hash_key: &[u8]) -> NodeIterator<'_> {
NodeIterator {
idx: self.node_idx(hash_key),
continuum: self,
}
}
pub fn get_addr(&self, idx: &mut usize) -> Option<&SocketAddr> {
let point = self.ring.get(*idx);
if point.is_some() {
// only update idx for non-empty ring otherwise we will panic on modulo 0
*idx = (*idx + 1) % self.ring.len();
}
point.map(|n| &self.addrs[n])
}
}
/// Iterator over a Continuum
pub struct NodeIterator<'a> {
idx: usize,
continuum: &'a Continuum,
}
impl<'a> Iterator for NodeIterator<'a> {
type Item = &'a SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
self.continuum.get_addr(&mut self.idx)
}
}
#[cfg(test)]
mod tests {
use std::net::SocketAddr;
use std::path::Path;
use super::{Bucket, Continuum};
fn get_sockaddr(ip: &str) -> SocketAddr {
ip.parse().unwrap()
}
#[test]
fn consistency_after_adding_host() {
fn assert_hosts(c: &Continuum) {
assert_eq!(c.node(b"a"), Some(get_sockaddr("127.0.0.10:6443")));
assert_eq!(c.node(b"b"), Some(get_sockaddr("127.0.0.5:6443")));
}
let buckets: Vec<_> = (1..11)
.map(|u| Bucket::new(get_sockaddr(&format!("127.0.0.{u}:6443")), 1))
.collect();
let c = Continuum::new(&buckets);
assert_hosts(&c);
// Now add a new host and ensure that the hosts don't get shuffled.
let buckets: Vec<_> = (1..12)
.map(|u| Bucket::new(get_sockaddr(&format!("127.0.0.{u}:6443")), 1))
.collect();
let c = Continuum::new(&buckets);
assert_hosts(&c);
}
#[test]
fn matches_nginx_sample() {
let upstream_hosts = ["127.0.0.1:7777", "127.0.0.1:7778"];
let upstream_hosts = upstream_hosts.iter().map(|i| get_sockaddr(i));
let mut buckets = Vec::new();
for upstream in upstream_hosts {
buckets.push(Bucket::new(upstream, 1));
}
let c = Continuum::new(&buckets);
assert_eq!(c.node(b"/some/path"), Some(get_sockaddr("127.0.0.1:7778")));
assert_eq!(
c.node(b"/some/longer/path"),
Some(get_sockaddr("127.0.0.1:7777"))
);
assert_eq!(
c.node(b"/sad/zaidoon"),
Some(get_sockaddr("127.0.0.1:7778"))
);
assert_eq!(c.node(b"/g"), Some(get_sockaddr("127.0.0.1:7777")));
assert_eq!(
c.node(b"/pingora/team/is/cool/and/this/is/a/long/uri"),
Some(get_sockaddr("127.0.0.1:7778"))
);
assert_eq!(
c.node(b"/i/am/not/confident/in/this/code"),
Some(get_sockaddr("127.0.0.1:7777"))
);
}
#[test]
fn matches_nginx_sample_data() {
let upstream_hosts = [
"10.0.0.1:443",
"10.0.0.2:443",
"10.0.0.3:443",
"10.0.0.4:443",
"10.0.0.5:443",
"10.0.0.6:443",
"10.0.0.7:443",
"10.0.0.8:443",
"10.0.0.9:443",
];
let upstream_hosts = upstream_hosts.iter().map(|i| get_sockaddr(i));
let mut buckets = Vec::new();
for upstream in upstream_hosts {
buckets.push(Bucket::new(upstream, 100));
}
let c = Continuum::new(&buckets);
let path = Path::new(env!("CARGO_MANIFEST_DIR"))
.join("test-data")
.join("sample-nginx-upstream.csv");
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_path(path)
.unwrap();
for pair in rdr.records() {
let pair = pair.unwrap();
let uri = pair.get(0).unwrap();
let upstream = pair.get(1).unwrap();
let got = c.node(uri.as_bytes()).unwrap();
assert_eq!(got, get_sockaddr(upstream));
}
}
#[test]
fn node_iter() {
let upstream_hosts = ["127.0.0.1:7777", "127.0.0.1:7778", "127.0.0.1:7779"];
let upstream_hosts = upstream_hosts.iter().map(|i| get_sockaddr(i));
let mut buckets = Vec::new();
for upstream in upstream_hosts {
buckets.push(Bucket::new(upstream, 1));
}
let c = Continuum::new(&buckets);
let mut iter = c.node_iter(b"doghash");
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7778")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7779")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7779")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7777")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7777")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7778")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7778")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7779")));
// drop 127.0.0.1:7777
let upstream_hosts = ["127.0.0.1:7777", "127.0.0.1:7779"];
let upstream_hosts = upstream_hosts.iter().map(|i| get_sockaddr(i));
let mut buckets = Vec::new();
for upstream in upstream_hosts {
buckets.push(Bucket::new(upstream, 1));
}
let c = Continuum::new(&buckets);
let mut iter = c.node_iter(b"doghash");
// 127.0.0.1:7778 nodes are gone now
// assert_eq!(iter.next(), Some("127.0.0.1:7778"));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7779")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7779")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7777")));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7777")));
// assert_eq!(iter.next(), Some("127.0.0.1:7778"));
// assert_eq!(iter.next(), Some("127.0.0.1:7778"));
assert_eq!(iter.next(), Some(&get_sockaddr("127.0.0.1:7779")));
// assert infinite cycle
let c = Continuum::new(&[Bucket::new(get_sockaddr("127.0.0.1:7777"), 1)]);
let mut iter = c.node_iter(b"doghash");
let start_idx = iter.idx;
for _ in 0..c.ring.len() {
assert!(iter.next().is_some());
}
// assert wrap around
assert_eq!(start_idx, iter.idx);
}
#[test]
fn test_empty() {
let c = Continuum::new(&[]);
assert!(c.node(b"doghash").is_none());
let mut iter = c.node_iter(b"doghash");
assert!(iter.next().is_none());
assert!(iter.next().is_none());
assert!(iter.next().is_none());
}
#[test]
fn test_ipv6_ring() {
let upstream_hosts = ["[::1]:7777", "[::1]:7778", "[::1]:7779"];
let upstream_hosts = upstream_hosts.iter().map(|i| get_sockaddr(i));
let mut buckets = Vec::new();
for upstream in upstream_hosts {
buckets.push(Bucket::new(upstream, 1));
}
let c = Continuum::new(&buckets);
let mut iter = c.node_iter(b"doghash");
assert_eq!(iter.next(), Some(&get_sockaddr("[::1]:7777")));
assert_eq!(iter.next(), Some(&get_sockaddr("[::1]:7778")));
assert_eq!(iter.next(), Some(&get_sockaddr("[::1]:7777")));
assert_eq!(iter.next(), Some(&get_sockaddr("[::1]:7778")));
assert_eq!(iter.next(), Some(&get_sockaddr("[::1]:7778")));
assert_eq!(iter.next(), Some(&get_sockaddr("[::1]:7777")));
assert_eq!(iter.next(), Some(&get_sockaddr("[::1]:7779")));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-ketama/tests/backwards_compat.rs | pingora-ketama/tests/backwards_compat.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use old_version::{Bucket as OldBucket, Continuum as OldContinuum};
#[allow(unused_imports)]
use pingora_ketama::{Bucket, Continuum, Version, DEFAULT_POINT_MULTIPLE};
use rand::{random, random_range, rng, seq::IteratorRandom};
use std::collections::BTreeSet;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
mod old_version;
fn random_socket_addr() -> SocketAddr {
if random::<bool>() {
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from_bits(random()), random()))
} else {
SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::from_bits(random()),
random(),
0,
0,
))
}
}
fn random_string(len: usize) -> String {
const CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
let mut rng = rng();
(0..len)
.map(|_| CHARS.chars().choose(&mut rng).unwrap())
.collect()
}
/// The old version of pingora-ketama should _always_ return the same result as
/// v1 of the new version as long as the original input is sorted by by socket
/// address (and has no duplicates). this test generates a large number of
/// random socket addresses with varying weights and compares the output of
/// both
#[test]
fn test_v1_to_old_version() {
let (old_buckets, new_buckets): (BTreeSet<_>, BTreeSet<_>) = (0..2000)
.map(|_| (random_socket_addr(), random_range(1..10)))
.map(|(addr, weight)| (OldBucket::new(addr, weight), Bucket::new(addr, weight)))
.unzip();
let old_continuum = OldContinuum::new(&Vec::from_iter(old_buckets));
let new_continuum = Continuum::new(&Vec::from_iter(new_buckets));
for _ in 0..20_000 {
let key = random_string(20);
let old_node = old_continuum.node(key.as_bytes()).unwrap();
let new_node = new_continuum.node(key.as_bytes()).unwrap();
assert_eq!(old_node, new_node);
}
}
/// The new version of pingora-ketama (v2) should return _almost_ exactly what
/// the old version does. The difference will be in collision handling
#[test]
#[cfg(feature = "v2")]
fn test_v2_to_old_version() {
let (old_buckets, new_buckets): (BTreeSet<_>, BTreeSet<_>) = (0..2000)
.map(|_| (random_socket_addr(), random_range(1..10)))
.map(|(addr, weight)| (OldBucket::new(addr, weight), Bucket::new(addr, weight)))
.unzip();
let old_continuum = OldContinuum::new(&Vec::from_iter(old_buckets));
let new_continuum = Continuum::new_with_version(
&Vec::from_iter(new_buckets),
Version::V2 {
point_multiple: DEFAULT_POINT_MULTIPLE,
},
);
let test_count = 20_000;
let mut mismatches = 0;
for _ in 0..test_count {
let key = random_string(20);
let old_node = old_continuum.node(key.as_bytes()).unwrap();
let new_node = new_continuum.node(key.as_bytes()).unwrap();
if old_node != new_node {
mismatches += 1;
}
}
assert!((mismatches as f64 / test_count as f64) < 0.001);
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-ketama/tests/old_version/mod.rs | pingora-ketama/tests/old_version/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This mod is a direct copy of the old version of pingora-ketama. It is here
//! to ensure that the new version's compatible mode is produces identical
//! results as the old version.
use std::cmp::Ordering;
use std::io::Write;
use std::net::SocketAddr;
use crc32fast::Hasher;
/// A [Bucket] represents a server for consistent hashing
///
/// A [Bucket] contains a [SocketAddr] to the server and a weight associated with it.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
pub struct Bucket {
// The node name.
// TODO: UDS
node: SocketAddr,
// The weight associated with a node. A higher weight indicates that this node should
// receive more requests.
weight: u32,
}
impl Bucket {
/// Return a new bucket with the given node and weight.
///
/// The chance that a [Bucket] is selected is proportional to the relative weight of all [Bucket]s.
///
/// # Panics
///
/// This will panic if the weight is zero.
pub fn new(node: SocketAddr, weight: u32) -> Self {
assert!(weight != 0, "weight must be at least one");
Bucket { node, weight }
}
}
// A point on the continuum.
#[derive(Clone, Debug, Eq, PartialEq)]
struct Point {
// the index to the actual address
node: u32,
hash: u32,
}
// We only want to compare the hash when sorting, so we implement these traits by hand.
impl Ord for Point {
fn cmp(&self, other: &Self) -> Ordering {
self.hash.cmp(&other.hash)
}
}
impl PartialOrd for Point {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Point {
fn new(node: u32, hash: u32) -> Self {
Point { node, hash }
}
}
/// The consistent hashing ring
///
/// A [Continuum] represents a ring of buckets where a node is associated with various points on
/// the ring.
pub struct Continuum {
ring: Box<[Point]>,
addrs: Box<[SocketAddr]>,
}
impl Continuum {
/// Create a new [Continuum] with the given list of buckets.
pub fn new(buckets: &[Bucket]) -> Self {
// This constant is copied from nginx. It will create 160 points per weight unit. For
// example, a weight of 2 will create 320 points on the ring.
const POINT_MULTIPLE: u32 = 160;
if buckets.is_empty() {
return Continuum {
ring: Box::new([]),
addrs: Box::new([]),
};
}
// The total weight is multiplied by the factor of points to create many points per node.
let total_weight: u32 = buckets.iter().fold(0, |sum, b| sum + b.weight);
let mut ring = Vec::with_capacity((total_weight * POINT_MULTIPLE) as usize);
let mut addrs = Vec::with_capacity(buckets.len());
for bucket in buckets {
let mut hasher = Hasher::new();
// We only do the following for backwards compatibility with nginx/memcache:
// - Convert SocketAddr to string
// - The hash input is as follows "HOST EMPTY PORT PREVIOUS_HASH". Spaces are only added
// for readability.
// TODO: remove this logic and hash the literal SocketAddr once we no longer
// need backwards compatibility
// with_capacity = max_len(ipv6)(39) + len(null)(1) + max_len(port)(5)
let mut hash_bytes = Vec::with_capacity(39 + 1 + 5);
write!(&mut hash_bytes, "{}", bucket.node.ip()).unwrap();
write!(&mut hash_bytes, "\0").unwrap();
write!(&mut hash_bytes, "{}", bucket.node.port()).unwrap();
hasher.update(hash_bytes.as_ref());
// A higher weight will add more points for this node.
let num_points = bucket.weight * POINT_MULTIPLE;
// This is appended to the crc32 hash for each point.
let mut prev_hash: u32 = 0;
addrs.push(bucket.node);
let node = addrs.len() - 1;
for _ in 0..num_points {
let mut hasher = hasher.clone();
hasher.update(&prev_hash.to_le_bytes());
let hash = hasher.finalize();
ring.push(Point::new(node as u32, hash));
prev_hash = hash;
}
}
// Sort and remove any duplicates.
ring.sort_unstable();
ring.dedup_by(|a, b| a.hash == b.hash);
Continuum {
ring: ring.into_boxed_slice(),
addrs: addrs.into_boxed_slice(),
}
}
/// Find the associated index for the given input.
pub fn node_idx(&self, input: &[u8]) -> usize {
let hash = crc32fast::hash(input);
// The `Result` returned here is either a match or the error variant returns where the
// value would be inserted.
match self.ring.binary_search_by(|p| p.hash.cmp(&hash)) {
Ok(i) => i,
Err(i) => {
// We wrap around to the front if this value would be inserted at the end.
if i == self.ring.len() {
0
} else {
i
}
}
}
}
/// Hash the given `hash_key` to the server address.
pub fn node(&self, hash_key: &[u8]) -> Option<SocketAddr> {
self.ring
.get(self.node_idx(hash_key)) // should we unwrap here?
.map(|p| self.addrs[p.node as usize])
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-ketama/benches/memory.rs | pingora-ketama/benches/memory.rs | use pingora_ketama::{Bucket, Continuum};
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
fn buckets() -> Vec<Bucket> {
let mut b = Vec::new();
for i in 1..254 {
b.push(Bucket::new(
format!("127.0.0.{i}:6443").parse().unwrap(),
10,
));
}
b
}
pub fn main() {
let _profiler = dhat::Profiler::new_heap();
let _c = Continuum::new(&buckets());
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-ketama/benches/simple.rs | pingora-ketama/benches/simple.rs | use pingora_ketama::{Bucket, Continuum};
use criterion::{criterion_group, criterion_main, Criterion};
use rand::{
distr::{Alphanumeric, SampleString},
rng,
};
#[cfg(feature = "heap-prof")]
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
fn buckets() -> Vec<Bucket> {
let mut b = Vec::new();
for i in 1..101 {
b.push(Bucket::new(format!("127.0.0.{i}:6443").parse().unwrap(), 1));
}
b
}
fn random_string() -> String {
let mut rand = rng();
Alphanumeric.sample_string(&mut rand, 30)
}
pub fn criterion_benchmark(c: &mut Criterion) {
#[cfg(feature = "heap-prof")]
let _profiler = dhat::Profiler::new_heap();
c.bench_function("create_continuum", |b| {
b.iter(|| Continuum::new(&buckets()))
});
c.bench_function("continuum_hash", |b| {
let continuum = Continuum::new(&buckets());
b.iter(|| continuum.node(random_string().as_bytes()))
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-ketama/examples/health_aware_selector.rs | pingora-ketama/examples/health_aware_selector.rs | use log::info;
use pingora_ketama::{Bucket, Continuum};
use std::collections::HashMap;
use std::net::SocketAddr;
// A repository for node healthiness, emulating a health checker.
struct NodeHealthRepository {
nodes: HashMap<SocketAddr, bool>,
}
impl NodeHealthRepository {
fn new() -> Self {
NodeHealthRepository {
nodes: HashMap::new(),
}
}
fn set_node_health(&mut self, node: SocketAddr, is_healthy: bool) {
self.nodes.insert(node, is_healthy);
}
fn node_is_healthy(&self, node: &SocketAddr) -> bool {
self.nodes.get(node).cloned().unwrap_or(false)
}
}
// A health-aware node selector, which relies on the above health repository.
struct HealthAwareNodeSelector<'a> {
ring: Continuum,
max_tries: usize,
node_health_repo: &'a NodeHealthRepository,
}
impl HealthAwareNodeSelector<'_> {
fn new(r: Continuum, tries: usize, nhr: &NodeHealthRepository) -> HealthAwareNodeSelector<'_> {
HealthAwareNodeSelector {
ring: r,
max_tries: tries,
node_health_repo: nhr,
}
}
// Try to select a node within <max_tries> attempts.
fn try_select(&self, key: &str) -> Option<SocketAddr> {
let node_iter = self.ring.node_iter(key.as_bytes());
for (tries, node) in node_iter.enumerate() {
if tries >= self.max_tries {
break;
}
if self.node_health_repo.node_is_healthy(node) {
return Some(*node);
}
}
None
}
}
// RUST_LOG=INFO cargo run --example health_aware_selector
fn main() {
env_logger::init();
// Set up some nodes.
let buckets: Vec<_> = (1..=10)
.map(|i| Bucket::new(format!("127.0.0.{i}:6443").parse().unwrap(), 1))
.collect();
// Mark the 1-5th nodes healthy, the 6-10th nodes unhealthy.
let mut health_repo = NodeHealthRepository::new();
(1..=10)
.map(|i| (i, format!("127.0.0.{i}:6443").parse().unwrap()))
.for_each(|(i, n)| {
health_repo.set_node_health(n, i < 6);
});
// Create a health-aware selector with up to 3 tries.
let health_aware_selector =
HealthAwareNodeSelector::new(Continuum::new(&buckets), 3, &health_repo);
// Let's try the selector on a few keys.
for i in 0..5 {
let key = format!("key_{i}");
match health_aware_selector.try_select(&key) {
Some(node) => {
info!("{key}: {}:{}", node.ip(), node.port());
}
None => {
info!("{key}: no healthy node found!");
}
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-http/src/lib.rs | pingora-http/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP header objects that preserve http header cases
//!
//! Although HTTP header names are supposed to be case-insensitive for compatibility, proxies
//! ideally shouldn't alter the HTTP traffic, especially the headers they don't need to read.
//!
//! This crate provide structs and methods to preserve the headers in order to build a transparent
//! proxy.
#![allow(clippy::new_without_default)]
use bytes::BufMut;
use http::header::{AsHeaderName, HeaderName, HeaderValue};
use http::request::Builder as ReqBuilder;
use http::request::Parts as ReqParts;
use http::response::Builder as RespBuilder;
use http::response::Parts as RespParts;
use http::uri::Uri;
use pingora_error::{ErrorType::*, OrErr, Result};
use std::ops::{Deref, DerefMut};
pub use http::method::Method;
pub use http::status::StatusCode;
pub use http::version::Version;
pub use http::HeaderMap as HMap;
mod case_header_name;
use case_header_name::CaseHeaderName;
pub use case_header_name::IntoCaseHeaderName;
pub mod prelude {
pub use crate::RequestHeader;
pub use crate::ResponseHeader;
}
/* an ordered header map to store the original case of each header name
HMap({
"foo": ["Foo", "foO", "FoO"]
})
The order how HeaderMap iter over its items is "arbitrary, but consistent".
Hopefully this property makes sure this map of header names always iterates in the
same order of the map of header values.
This idea is inspaired by hyper @nox
*/
type CaseMap = HMap<CaseHeaderName>;
pub enum HeaderNameVariant<'a> {
Case(&'a CaseHeaderName),
Titled(&'a str),
}
/// The HTTP request header type.
///
/// This type is similar to [http::request::Parts] but preserves header name case.
/// It also preserves request path even if it is not UTF-8.
///
/// [RequestHeader] implements [Deref] for [http::request::Parts] so it can be used as it in most
/// places.
#[derive(Debug)]
pub struct RequestHeader {
base: ReqParts,
header_name_map: Option<CaseMap>,
// store the raw path bytes only if it is invalid utf-8
raw_path_fallback: Vec<u8>, // can also be Box<[u8]>
// whether we send END_STREAM with HEADERS for h2 requests
send_end_stream: bool,
}
impl AsRef<ReqParts> for RequestHeader {
fn as_ref(&self) -> &ReqParts {
&self.base
}
}
impl Deref for RequestHeader {
type Target = ReqParts;
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl DerefMut for RequestHeader {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
impl RequestHeader {
fn new_no_case(size_hint: Option<usize>) -> Self {
let mut base = ReqBuilder::new().body(()).unwrap().into_parts().0;
base.headers.reserve(http_header_map_upper_bound(size_hint));
RequestHeader {
base,
header_name_map: None,
raw_path_fallback: vec![],
send_end_stream: true,
}
}
/// Create a new [RequestHeader] with the given method and path.
///
/// The `path` can be non UTF-8.
pub fn build(
method: impl TryInto<Method>,
path: &[u8],
size_hint: Option<usize>,
) -> Result<Self> {
let mut req = Self::build_no_case(method, path, size_hint)?;
req.header_name_map = Some(CaseMap::with_capacity(http_header_map_upper_bound(
size_hint,
)));
Ok(req)
}
/// Create a new [RequestHeader] with the given method and path without preserving header case.
///
/// A [RequestHeader] created from this type is more space efficient than those from [Self::build()].
///
/// Use this method if reading from or writing to HTTP/2 sessions where header case doesn't matter anyway.
pub fn build_no_case(
method: impl TryInto<Method>,
path: &[u8],
size_hint: Option<usize>,
) -> Result<Self> {
let mut req = Self::new_no_case(size_hint);
req.base.method = method
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid method")?;
req.set_raw_path(path)?;
Ok(req)
}
/// Append the header name and value to `self`.
///
/// If there are already some headers under the same name, a new value will be added without
/// any others being removed.
pub fn append_header(
&mut self,
name: impl IntoCaseHeaderName,
value: impl TryInto<HeaderValue>,
) -> Result<bool> {
let header_value = value
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid value while append")?;
append_header_value(
self.header_name_map.as_mut(),
&mut self.base.headers,
name,
header_value,
)
}
/// Insert the header name and value to `self`.
///
/// Different from [Self::append_header()], this method will replace all other existing headers
/// under the same name (case-insensitive).
pub fn insert_header(
&mut self,
name: impl IntoCaseHeaderName,
value: impl TryInto<HeaderValue>,
) -> Result<()> {
let header_value = value
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid value while insert")?;
insert_header_value(
self.header_name_map.as_mut(),
&mut self.base.headers,
name,
header_value,
)
}
/// Remove all headers under the name
pub fn remove_header<'a, N: ?Sized>(&mut self, name: &'a N) -> Option<HeaderValue>
where
&'a N: 'a + AsHeaderName,
{
remove_header(self.header_name_map.as_mut(), &mut self.base.headers, name)
}
/// Write the header to the `buf` in HTTP/1.1 wire format.
///
/// The header case will be preserved.
pub fn header_to_h1_wire(&self, buf: &mut impl BufMut) {
header_to_h1_wire(self.header_name_map.as_ref(), &self.base.headers, buf)
}
/// If case sensitivity is enabled, returns an iterator to iterate over case-sensitive header names and values.
/// Otherwise returns an empty iterator.
///
/// Headers of the same name are visited in insertion order.
pub fn case_header_iter(&self) -> impl Iterator<Item = (&CaseHeaderName, &HeaderValue)> + '_ {
case_header_iter(self.header_name_map.as_ref(), &self.base.headers)
}
/// Returns true if the request has case-sensitive headers.
pub fn has_case(&self) -> bool {
self.header_name_map.is_some()
}
pub fn map<F: FnMut(HeaderNameVariant, &HeaderValue) -> Result<()>>(
&self,
mut f: F,
) -> Result<()> {
let key_map = self.header_name_map.as_ref();
let value_map = &self.base.headers;
if let Some(key_map) = key_map {
let iter = key_map.iter().zip(value_map.iter());
for ((header, case_header), (header2, val)) in iter {
if header != header2 {
// in case the header iteration order changes in future versions of HMap
panic!("header iter mismatch {}, {}", header, header2)
}
f(HeaderNameVariant::Case(case_header), val)?;
}
} else {
for (header, value) in value_map {
let titled_header =
case_header_name::titled_header_name_str(header).unwrap_or(header.as_str());
f(HeaderNameVariant::Titled(titled_header), value)?;
}
}
Ok(())
}
/// Set the request method
pub fn set_method(&mut self, method: Method) {
self.base.method = method;
}
/// Set the request URI
pub fn set_uri(&mut self, uri: http::Uri) {
self.base.uri = uri;
// Clear out raw_path_fallback, or it will be used when serializing
self.raw_path_fallback = vec![];
}
/// Set the request URI directly via raw bytes.
///
/// Generally prefer [Self::set_uri()] to modify the header's URI if able.
///
/// This API is to allow supporting non UTF-8 cases.
pub fn set_raw_path(&mut self, path: &[u8]) -> Result<()> {
if let Ok(p) = std::str::from_utf8(path) {
let uri = Uri::builder()
.path_and_query(p)
.build()
.explain_err(InvalidHTTPHeader, |_| format!("invalid uri {}", p))?;
self.base.uri = uri;
// keep raw_path empty, no need to store twice
} else {
// put a valid utf-8 path into base for read only access
let lossy_str = String::from_utf8_lossy(path);
let uri = Uri::builder()
.path_and_query(lossy_str.as_ref())
.build()
.explain_err(InvalidHTTPHeader, |_| format!("invalid uri {}", lossy_str))?;
self.base.uri = uri;
self.raw_path_fallback = path.to_vec();
}
Ok(())
}
/// Set whether we send an END_STREAM on H2 request HEADERS if body is empty.
pub fn set_send_end_stream(&mut self, send_end_stream: bool) {
self.send_end_stream = send_end_stream;
}
/// Returns if we support sending an END_STREAM on H2 request HEADERS if body is empty,
/// returns None if not H2.
pub fn send_end_stream(&self) -> Option<bool> {
if self.base.version != Version::HTTP_2 {
return None;
}
Some(self.send_end_stream)
}
/// Return the request path in its raw format
///
/// Non-UTF8 is supported.
pub fn raw_path(&self) -> &[u8] {
if !self.raw_path_fallback.is_empty() {
&self.raw_path_fallback
} else {
// Url should always be set
self.base
.uri
.path_and_query()
.as_ref()
.unwrap()
.as_str()
.as_bytes()
}
}
/// Return the file extension of the path
pub fn uri_file_extension(&self) -> Option<&str> {
// get everything after the last '.' in path
let (_, ext) = self
.uri
.path_and_query()
.and_then(|pq| pq.path().rsplit_once('.'))?;
Some(ext)
}
/// Set http version
pub fn set_version(&mut self, version: Version) {
self.base.version = version;
}
/// Clone `self` into [http::request::Parts].
pub fn as_owned_parts(&self) -> ReqParts {
clone_req_parts(&self.base)
}
}
impl Clone for RequestHeader {
fn clone(&self) -> Self {
Self {
base: self.as_owned_parts(),
header_name_map: self.header_name_map.clone(),
raw_path_fallback: self.raw_path_fallback.clone(),
send_end_stream: self.send_end_stream,
}
}
}
// The `RequestHeader` will be the no case variant, because `ReqParts` keeps no header case
impl From<ReqParts> for RequestHeader {
fn from(parts: ReqParts) -> RequestHeader {
Self {
base: parts,
header_name_map: None,
// no illegal path
raw_path_fallback: vec![],
send_end_stream: true,
}
}
}
impl From<RequestHeader> for ReqParts {
fn from(resp: RequestHeader) -> ReqParts {
resp.base
}
}
/// The HTTP response header type.
///
/// This type is similar to [http::response::Parts] but preserves header name case.
/// [ResponseHeader] implements [Deref] for [http::response::Parts] so it can be used as it in most
/// places.
#[derive(Debug)]
pub struct ResponseHeader {
base: RespParts,
// an ordered header map to store the original case of each header name
header_name_map: Option<CaseMap>,
// the reason phrase of the response, if unset, a default one will be used
reason_phrase: Option<String>,
}
impl AsRef<RespParts> for ResponseHeader {
fn as_ref(&self) -> &RespParts {
&self.base
}
}
impl Deref for ResponseHeader {
type Target = RespParts;
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl DerefMut for ResponseHeader {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
impl Clone for ResponseHeader {
fn clone(&self) -> Self {
Self {
base: self.as_owned_parts(),
header_name_map: self.header_name_map.clone(),
reason_phrase: self.reason_phrase.clone(),
}
}
}
// The `ResponseHeader` will be the no case variant, because `RespParts` keeps no header case
impl From<RespParts> for ResponseHeader {
fn from(parts: RespParts) -> ResponseHeader {
Self {
base: parts,
header_name_map: None,
reason_phrase: None,
}
}
}
impl From<ResponseHeader> for RespParts {
fn from(resp: ResponseHeader) -> RespParts {
resp.base
}
}
impl From<Box<ResponseHeader>> for Box<RespParts> {
fn from(resp: Box<ResponseHeader>) -> Box<RespParts> {
Box::new(resp.base)
}
}
impl ResponseHeader {
fn new(size_hint: Option<usize>) -> Self {
let mut resp_header = Self::new_no_case(size_hint);
resp_header.header_name_map = Some(CaseMap::with_capacity(http_header_map_upper_bound(
size_hint,
)));
resp_header
}
fn new_no_case(size_hint: Option<usize>) -> Self {
let mut base = RespBuilder::new().body(()).unwrap().into_parts().0;
base.headers.reserve(http_header_map_upper_bound(size_hint));
ResponseHeader {
base,
header_name_map: None,
reason_phrase: None,
}
}
/// Create a new [ResponseHeader] with the given status code.
pub fn build(code: impl TryInto<StatusCode>, size_hint: Option<usize>) -> Result<Self> {
let mut resp = Self::new(size_hint);
resp.base.status = code
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid status")?;
Ok(resp)
}
/// Create a new [ResponseHeader] with the given status code without preserving header case.
///
/// A [ResponseHeader] created from this type is more space efficient than those from [Self::build()].
///
/// Use this method if reading from or writing to HTTP/2 sessions where header case doesn't matter anyway.
pub fn build_no_case(code: impl TryInto<StatusCode>, size_hint: Option<usize>) -> Result<Self> {
let mut resp = Self::new_no_case(size_hint);
resp.base.status = code
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid status")?;
Ok(resp)
}
/// Append the header name and value to `self`.
///
/// If there are already some headers under the same name, a new value will be added without
/// any others being removed.
pub fn append_header(
&mut self,
name: impl IntoCaseHeaderName,
value: impl TryInto<HeaderValue>,
) -> Result<bool> {
let header_value = value
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid value while append")?;
append_header_value(
self.header_name_map.as_mut(),
&mut self.base.headers,
name,
header_value,
)
}
/// Insert the header name and value to `self`.
///
/// Different from [Self::append_header()], this method will replace all other existing headers
/// under the same name (case insensitive).
pub fn insert_header(
&mut self,
name: impl IntoCaseHeaderName,
value: impl TryInto<HeaderValue>,
) -> Result<()> {
let header_value = value
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid value while insert")?;
insert_header_value(
self.header_name_map.as_mut(),
&mut self.base.headers,
name,
header_value,
)
}
/// Remove all headers under the name
pub fn remove_header<'a, N: ?Sized>(&mut self, name: &'a N) -> Option<HeaderValue>
where
&'a N: 'a + AsHeaderName,
{
remove_header(self.header_name_map.as_mut(), &mut self.base.headers, name)
}
/// Write the header to the `buf` in HTTP/1.1 wire format.
///
/// The header case will be preserved.
pub fn header_to_h1_wire(&self, buf: &mut impl BufMut) {
header_to_h1_wire(self.header_name_map.as_ref(), &self.base.headers, buf)
}
/// If case sensitivity is enabled, returns an iterator to iterate over case-sensitive header names and values.
/// Otherwise returns an empty iterator.
///
/// Headers of the same name are visited in insertion order.
pub fn case_header_iter(&self) -> impl Iterator<Item = (&CaseHeaderName, &HeaderValue)> + '_ {
case_header_iter(self.header_name_map.as_ref(), &self.base.headers)
}
/// Returns true if the response has case-sensitive headers.
pub fn has_case(&self) -> bool {
self.header_name_map.is_some()
}
pub fn map<F: FnMut(HeaderNameVariant, &HeaderValue) -> Result<()>>(
&self,
mut f: F,
) -> Result<()> {
let key_map = self.header_name_map.as_ref();
let value_map = &self.base.headers;
if let Some(key_map) = key_map {
let iter = key_map.iter().zip(value_map.iter());
for ((header, case_header), (header2, val)) in iter {
if header != header2 {
// in case the header iteration order changes in future versions of HMap
panic!("header iter mismatch {}, {}", header, header2)
}
f(HeaderNameVariant::Case(case_header), val)?;
}
} else {
for (header, value) in value_map {
let titled_header =
case_header_name::titled_header_name_str(header).unwrap_or(header.as_str());
f(HeaderNameVariant::Titled(titled_header), value)?;
}
}
Ok(())
}
/// Set the status code
pub fn set_status(&mut self, status: impl TryInto<StatusCode>) -> Result<()> {
self.base.status = status
.try_into()
.explain_err(InvalidHTTPHeader, |_| "invalid status")?;
Ok(())
}
/// Set the HTTP version
pub fn set_version(&mut self, version: Version) {
self.base.version = version
}
/// Set the HTTP reason phase. If `None`, a default reason phase will be used
pub fn set_reason_phrase(&mut self, reason_phrase: Option<&str>) -> Result<()> {
// No need to allocate memory to store the phrase if it is the default one.
if reason_phrase == self.base.status.canonical_reason() {
self.reason_phrase = None;
return Ok(());
}
// TODO: validate it "*( HTAB / SP / VCHAR / obs-text )"
self.reason_phrase = reason_phrase.map(str::to_string);
Ok(())
}
/// Get the HTTP reason phase. If [Self::set_reason_phrase()] is never called
/// or set to `None`, a default reason phase will be used
pub fn get_reason_phrase(&self) -> Option<&str> {
self.reason_phrase
.as_deref()
.or_else(|| self.base.status.canonical_reason())
}
/// Clone `self` into [http::response::Parts].
pub fn as_owned_parts(&self) -> RespParts {
clone_resp_parts(&self.base)
}
/// Helper function to set the HTTP content length on the response header.
pub fn set_content_length(&mut self, len: usize) -> Result<()> {
self.insert_header(http::header::CONTENT_LENGTH, len)
}
}
fn clone_req_parts(me: &ReqParts) -> ReqParts {
let mut parts = ReqBuilder::new()
.method(me.method.clone())
.uri(me.uri.clone())
.version(me.version)
.body(())
.unwrap()
.into_parts()
.0;
parts.headers = me.headers.clone();
parts.extensions = me.extensions.clone();
parts
}
fn clone_resp_parts(me: &RespParts) -> RespParts {
let mut parts = RespBuilder::new()
.status(me.status)
.version(me.version)
.body(())
.unwrap()
.into_parts()
.0;
parts.headers = me.headers.clone();
parts.extensions = me.extensions.clone();
parts
}
// This function returns an upper bound on the size of the header map used inside the http crate.
// As of version 0.2, there is a limit of 1 << 15 (32,768) items inside the map. There is an
// assertion against this size inside the crate, so we want to avoid panicking by not exceeding this
// upper bound.
fn http_header_map_upper_bound(size_hint: Option<usize>) -> usize {
// Even though the crate has 1 << 15 as the max size, calls to `with_capacity` invoke a
// function that returns the size + size / 3.
//
// See https://github.com/hyperium/http/blob/34a9d6bdab027948d6dea3b36d994f9cbaf96f75/src/header/map.rs#L3220
//
// Therefore we set our max size to be even lower, so we guarantee ourselves we won't hit that
// upper bound in the crate. Any way you cut it, 4,096 headers is insane.
const PINGORA_MAX_HEADER_COUNT: usize = 4096;
const INIT_HEADER_SIZE: usize = 8;
// We select the size hint or the max size here, ensuring that we pick a value substantially lower
// than 1 << 15 with room to grow the header map.
std::cmp::min(
size_hint.unwrap_or(INIT_HEADER_SIZE),
PINGORA_MAX_HEADER_COUNT,
)
}
#[inline]
fn append_header_value<T>(
name_map: Option<&mut CaseMap>,
value_map: &mut HMap<T>,
name: impl IntoCaseHeaderName,
value: T,
) -> Result<bool> {
let case_header_name = name.into_case_header_name();
let header_name: HeaderName = case_header_name
.as_slice()
.try_into()
.or_err(InvalidHTTPHeader, "invalid header name")?;
// store the original case in the map
if let Some(name_map) = name_map {
name_map.append(header_name.clone(), case_header_name);
}
Ok(value_map.append(header_name, value))
}
#[inline]
fn insert_header_value<T>(
name_map: Option<&mut CaseMap>,
value_map: &mut HMap<T>,
name: impl IntoCaseHeaderName,
value: T,
) -> Result<()> {
let case_header_name = name.into_case_header_name();
let header_name: HeaderName = case_header_name
.as_slice()
.try_into()
.or_err(InvalidHTTPHeader, "invalid header name")?;
if let Some(name_map) = name_map {
// store the original case in the map
name_map.insert(header_name.clone(), case_header_name);
}
value_map.insert(header_name, value);
Ok(())
}
// the &N here is to avoid clone(). None Copy type like String can impl AsHeaderName
#[inline]
fn remove_header<'a, T, N: ?Sized>(
name_map: Option<&mut CaseMap>,
value_map: &mut HMap<T>,
name: &'a N,
) -> Option<T>
where
&'a N: 'a + AsHeaderName,
{
let removed = value_map.remove(name);
if removed.is_some() {
if let Some(name_map) = name_map {
name_map.remove(name);
}
}
removed
}
#[inline]
fn header_to_h1_wire(key_map: Option<&CaseMap>, value_map: &HMap, buf: &mut impl BufMut) {
const CRLF: &[u8; 2] = b"\r\n";
const HEADER_KV_DELIMITER: &[u8; 2] = b": ";
if let Some(key_map) = key_map {
case_header_iter(key_map.into(), value_map).for_each(|(case_header, val)| {
buf.put_slice(case_header.as_slice());
buf.put_slice(HEADER_KV_DELIMITER);
buf.put_slice(val.as_ref());
buf.put_slice(CRLF);
});
} else {
for (header, value) in value_map {
let titled_header =
case_header_name::titled_header_name_str(header).unwrap_or(header.as_str());
buf.put_slice(titled_header.as_bytes());
buf.put_slice(HEADER_KV_DELIMITER);
buf.put_slice(value.as_ref());
buf.put_slice(CRLF);
}
}
}
#[inline]
fn case_header_iter<'a>(
name_map: Option<&'a CaseMap>,
value_map: &'a HMap,
) -> impl Iterator<Item = (&'a CaseHeaderName, &'a HeaderValue)> + 'a {
name_map.into_iter().flat_map(|name_map| {
name_map
.iter()
.zip(value_map.iter())
.map(|((h1, name), (h2, value))| {
// in case the header iteration order changes in future versions of HMap
assert_eq!(h1, h2, "header iter mismatch {}, {}", h1, h2);
(name, value)
})
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn header_map_upper_bound() {
assert_eq!(8, http_header_map_upper_bound(None));
assert_eq!(16, http_header_map_upper_bound(Some(16)));
assert_eq!(4096, http_header_map_upper_bound(Some(7777)));
}
#[test]
fn test_single_header() {
let mut req = RequestHeader::build("GET", b"\\", None).unwrap();
req.insert_header("foo", "bar").unwrap();
req.insert_header("FoO", "Bar").unwrap();
let mut buf: Vec<u8> = vec![];
req.header_to_h1_wire(&mut buf);
assert_eq!(buf, b"FoO: Bar\r\n");
req.case_header_iter().enumerate().for_each(|(i, (k, v))| {
let name = String::from_utf8_lossy(k.as_slice()).into_owned();
let value = String::from_utf8_lossy(v.as_ref()).into_owned();
match i + 1 {
1 => {
assert_eq!(name, "FoO");
assert_eq!(value, "Bar");
}
_ => panic!("too many headers"),
}
});
let mut resp = ResponseHeader::new(None);
resp.insert_header("foo", "bar").unwrap();
resp.insert_header("FoO", "Bar").unwrap();
let mut buf: Vec<u8> = vec![];
resp.header_to_h1_wire(&mut buf);
assert_eq!(buf, b"FoO: Bar\r\n");
resp.case_header_iter().enumerate().for_each(|(i, (k, v))| {
let name = String::from_utf8_lossy(k.as_slice()).into_owned();
let value = String::from_utf8_lossy(v.as_ref()).into_owned();
match i + 1 {
1 => {
assert_eq!(name, "FoO");
assert_eq!(value, "Bar");
}
_ => panic!("too many headers"),
}
});
}
#[test]
fn test_single_header_no_case() {
let mut req = RequestHeader::new_no_case(None);
req.insert_header("foo", "bar").unwrap();
req.insert_header("FoO", "Bar").unwrap();
let mut buf: Vec<u8> = vec![];
req.header_to_h1_wire(&mut buf);
assert_eq!(buf, b"foo: Bar\r\n");
req.case_header_iter().for_each(|(_, _)| {
unreachable!("request has no case");
});
let mut resp = ResponseHeader::new_no_case(None);
resp.insert_header("foo", "bar").unwrap();
resp.insert_header("FoO", "Bar").unwrap();
let mut buf: Vec<u8> = vec![];
resp.header_to_h1_wire(&mut buf);
assert_eq!(buf, b"foo: Bar\r\n");
resp.case_header_iter().for_each(|(_, _)| {
unreachable!("response has no case");
});
}
#[test]
fn test_multiple_header() {
let mut req = RequestHeader::build("GET", b"\\", None).unwrap();
req.append_header("FoO", "Bar").unwrap();
req.append_header("fOO", "bar").unwrap();
req.append_header("BAZ", "baR").unwrap();
req.append_header(http::header::CONTENT_LENGTH, "0")
.unwrap();
req.append_header("a", "b").unwrap();
req.remove_header("a");
let mut buf: Vec<u8> = vec![];
req.header_to_h1_wire(&mut buf);
assert_eq!(
buf,
b"FoO: Bar\r\nfOO: bar\r\nBAZ: baR\r\nContent-Length: 0\r\n"
);
req.case_header_iter().enumerate().for_each(|(i, (k, v))| {
let name = String::from_utf8_lossy(k.as_slice()).into_owned();
let value = String::from_utf8_lossy(v.as_ref()).into_owned();
match i + 1 {
1 => {
assert_eq!(name, "FoO");
assert_eq!(value, "Bar");
}
2 => {
assert_eq!(name, "fOO");
assert_eq!(value, "bar");
}
3 => {
assert_eq!(name, "BAZ");
assert_eq!(value, "baR");
}
4 => {
assert_eq!(name, "Content-Length");
assert_eq!(value, "0");
}
_ => panic!("too many headers"),
}
});
let mut resp = ResponseHeader::new(None);
resp.append_header("FoO", "Bar").unwrap();
resp.append_header("fOO", "bar").unwrap();
resp.append_header("BAZ", "baR").unwrap();
resp.append_header(http::header::CONTENT_LENGTH, "0")
.unwrap();
resp.append_header("a", "b").unwrap();
resp.remove_header("a");
let mut buf: Vec<u8> = vec![];
resp.header_to_h1_wire(&mut buf);
assert_eq!(
buf,
b"FoO: Bar\r\nfOO: bar\r\nBAZ: baR\r\nContent-Length: 0\r\n"
);
resp.case_header_iter().enumerate().for_each(|(i, (k, v))| {
let name = String::from_utf8_lossy(k.as_slice()).into_owned();
let value = String::from_utf8_lossy(v.as_ref()).into_owned();
match i + 1 {
1 => {
assert_eq!(name, "FoO");
assert_eq!(value, "Bar");
}
2 => {
assert_eq!(name, "fOO");
assert_eq!(value, "bar");
}
3 => {
assert_eq!(name, "BAZ");
assert_eq!(value, "baR");
}
4 => {
assert_eq!(name, "Content-Length");
assert_eq!(value, "0");
}
_ => panic!("too many headers"),
}
});
}
#[cfg(feature = "patched_http1")]
#[test]
fn test_invalid_path() {
let raw_path = b"Hello\xF0\x90\x80World";
let req = RequestHeader::build("GET", &raw_path[..], None).unwrap();
assert_eq!("Hello�World", req.uri.path_and_query().unwrap());
assert_eq!(raw_path, req.raw_path());
}
#[cfg(feature = "patched_http1")]
#[test]
fn test_override_invalid_path() {
let raw_path = b"Hello\xF0\x90\x80World";
let mut req = RequestHeader::build("GET", &raw_path[..], None).unwrap();
assert_eq!("Hello�World", req.uri.path_and_query().unwrap());
assert_eq!(raw_path, req.raw_path());
let new_path = "/HelloWorld";
req.set_uri(Uri::builder().path_and_query(new_path).build().unwrap());
assert_eq!(new_path, req.uri.path_and_query().unwrap());
assert_eq!(new_path.as_bytes(), req.raw_path());
}
#[test]
fn test_reason_phrase() {
let mut resp = ResponseHeader::new(None);
let reason = resp.get_reason_phrase().unwrap();
assert_eq!(reason, "OK");
resp.set_reason_phrase(Some("FooBar")).unwrap();
let reason = resp.get_reason_phrase().unwrap();
assert_eq!(reason, "FooBar");
resp.set_reason_phrase(Some("OK")).unwrap();
let reason = resp.get_reason_phrase().unwrap();
assert_eq!(reason, "OK");
resp.set_reason_phrase(None).unwrap();
let reason = resp.get_reason_phrase().unwrap();
assert_eq!(reason, "OK");
}
#[test]
fn set_test_send_end_stream() {
let mut req = RequestHeader::build("GET", b"/", None).unwrap();
req.set_send_end_stream(true);
// None for requests that are not h2
assert!(req.send_end_stream().is_none());
let mut req = RequestHeader::build("GET", b"/", None).unwrap();
req.set_version(Version::HTTP_2);
// Some(true) by default for h2
assert!(req.send_end_stream().unwrap());
req.set_send_end_stream(false);
// Some(false)
assert!(!req.send_end_stream().unwrap());
}
#[test]
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-http/src/case_header_name.rs | pingora-http/src/case_header_name.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::*;
use bytes::Bytes;
use http::header;
#[derive(Debug, Clone)]
pub struct CaseHeaderName(Bytes);
impl CaseHeaderName {
pub fn new(name: String) -> Self {
CaseHeaderName(name.into())
}
}
impl CaseHeaderName {
pub fn as_slice(&self) -> &[u8] {
&self.0
}
pub fn from_slice(buf: &[u8]) -> Self {
CaseHeaderName(Bytes::copy_from_slice(buf))
}
}
/// A trait that converts into case-sensitive header names.
pub trait IntoCaseHeaderName {
fn into_case_header_name(self) -> CaseHeaderName;
}
impl IntoCaseHeaderName for CaseHeaderName {
fn into_case_header_name(self) -> CaseHeaderName {
self
}
}
impl IntoCaseHeaderName for String {
fn into_case_header_name(self) -> CaseHeaderName {
CaseHeaderName(self.into())
}
}
impl IntoCaseHeaderName for &'static str {
fn into_case_header_name(self) -> CaseHeaderName {
CaseHeaderName(self.into())
}
}
impl IntoCaseHeaderName for HeaderName {
fn into_case_header_name(self) -> CaseHeaderName {
CaseHeaderName(titled_header_name(&self))
}
}
impl IntoCaseHeaderName for &HeaderName {
fn into_case_header_name(self) -> CaseHeaderName {
CaseHeaderName(titled_header_name(self))
}
}
impl IntoCaseHeaderName for Bytes {
fn into_case_header_name(self) -> CaseHeaderName {
CaseHeaderName(self)
}
}
fn titled_header_name(header_name: &HeaderName) -> Bytes {
titled_header_name_str(header_name).map_or_else(
|| Bytes::copy_from_slice(header_name.as_str().as_bytes()),
|s| Bytes::from_static(s.as_bytes()),
)
}
pub(crate) fn titled_header_name_str(header_name: &HeaderName) -> Option<&'static str> {
Some(match *header_name {
header::ACCEPT_RANGES => "Accept-Ranges",
header::AGE => "Age",
header::CACHE_CONTROL => "Cache-Control",
header::CONNECTION => "Connection",
header::CONTENT_TYPE => "Content-Type",
header::CONTENT_ENCODING => "Content-Encoding",
header::CONTENT_LENGTH => "Content-Length",
header::DATE => "Date",
header::TRANSFER_ENCODING => "Transfer-Encoding",
header::HOST => "Host",
header::SERVER => "Server",
header::SET_COOKIE => "Set-Cookie",
// TODO: add more const header here to map to their titled case
// TODO: automatically upper case the first letter?
_ => {
return None;
}
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_case_header_name() {
assert_eq!("FoO".into_case_header_name().as_slice(), b"FoO");
assert_eq!("FoO".to_string().into_case_header_name().as_slice(), b"FoO");
assert_eq!(header::SERVER.into_case_header_name().as_slice(), b"Server");
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-error/src/immut_str.rs | pingora-error/src/immut_str.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
/// A data struct that holds either immutable string or reference to static str.
/// Compared to String or `Box<str>`, it avoids memory allocation on static str.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum ImmutStr {
Static(&'static str),
Owned(Box<str>),
}
impl ImmutStr {
#[inline]
pub fn as_str(&self) -> &str {
match self {
ImmutStr::Static(s) => s,
ImmutStr::Owned(s) => s.as_ref(),
}
}
pub fn is_owned(&self) -> bool {
match self {
ImmutStr::Static(_) => false,
ImmutStr::Owned(_) => true,
}
}
}
impl fmt::Display for ImmutStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&'static str> for ImmutStr {
fn from(s: &'static str) -> Self {
ImmutStr::Static(s)
}
}
impl From<String> for ImmutStr {
fn from(s: String) -> Self {
ImmutStr::Owned(s.into_boxed_str())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_static_vs_owned() {
let s: ImmutStr = "test".into();
assert!(!s.is_owned());
let s: ImmutStr = "test".to_string().into();
assert!(s.is_owned());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-error/src/lib.rs | pingora-error/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![warn(clippy::all)]
//! The library to provide the struct to represent errors in pingora.
pub use std::error::Error as ErrorTrait;
use std::fmt;
use std::fmt::Debug;
use std::result::Result as StdResult;
mod immut_str;
pub use immut_str::ImmutStr;
/// The boxed [Error], the desired way to pass [Error]
pub type BError = Box<Error>;
/// Syntax sugar for `std::Result<T, BError>`
pub type Result<T, E = BError> = StdResult<T, E>;
/// The struct that represents an error
#[derive(Debug)]
pub struct Error {
/// the type of error
pub etype: ErrorType,
/// the source of error: from upstream, downstream or internal
pub esource: ErrorSource,
/// if the error is retry-able
pub retry: RetryType,
/// chain to the cause of this error
pub cause: Option<Box<dyn ErrorTrait + Send + Sync>>,
/// an arbitrary string that explains the context when the error happens
pub context: Option<ImmutStr>,
}
/// The source of the error
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum ErrorSource {
/// The error is caused by the remote server
Upstream,
/// The error is caused by the remote client
Downstream,
/// The error is caused by the internal logic
Internal,
/// Error source unknown or to be set
Unset,
}
/// Whether the request can be retried after encountering this error
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum RetryType {
Decided(bool),
ReusedOnly, // only retry when the error is from a reused connection
}
impl RetryType {
pub fn decide_reuse(&mut self, reused: bool) {
if matches!(self, RetryType::ReusedOnly) {
*self = RetryType::Decided(reused);
}
}
pub fn retry(&self) -> bool {
match self {
RetryType::Decided(b) => *b,
RetryType::ReusedOnly => {
panic!("Retry is not decided")
}
}
}
}
impl From<bool> for RetryType {
fn from(b: bool) -> Self {
RetryType::Decided(b)
}
}
impl ErrorSource {
/// for displaying the error source
pub fn as_str(&self) -> &'static str {
match self {
Self::Upstream => "Upstream",
Self::Downstream => "Downstream",
Self::Internal => "Internal",
Self::Unset => "",
}
}
}
/// Predefined type of errors
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum ErrorType {
// connect errors
ConnectTimedout,
ConnectRefused,
ConnectNoRoute,
TLSWantX509Lookup,
TLSHandshakeFailure,
TLSHandshakeTimedout,
InvalidCert,
HandshakeError, // other handshake
ConnectError, // catch all
BindError,
AcceptError,
SocketError,
ConnectProxyFailure,
// protocol errors
InvalidHTTPHeader,
H1Error, // catch all
H2Error, // catch all
H2Downgrade, // Peer over h2 requests to downgrade to h1
InvalidH2, // Peer sends invalid h2 frames to us
// IO error on established connections
ReadError,
WriteError,
ReadTimedout,
WriteTimedout,
ConnectionClosed,
// application error, will return HTTP status code
HTTPStatus(u16),
// file related
FileOpenError,
FileCreateError,
FileReadError,
FileWriteError,
// other errors
InternalError,
// catch all
UnknownError,
/// Custom error with static string.
/// this field is to allow users to extend the types of errors. If runtime generated string
/// is needed, it is more likely to be treated as "context" rather than "type".
Custom(&'static str),
/// Custom error with static string and code.
/// this field allows users to extend error further with error codes.
CustomCode(&'static str, u16),
}
impl ErrorType {
/// create a new type of error. Users should try to make `name` unique.
pub const fn new(name: &'static str) -> Self {
ErrorType::Custom(name)
}
/// create a new type of error. Users should try to make `name` unique.
pub const fn new_code(name: &'static str, code: u16) -> Self {
ErrorType::CustomCode(name, code)
}
/// for displaying the error type
pub fn as_str(&self) -> &'static str {
match self {
ErrorType::ConnectTimedout => "ConnectTimedout",
ErrorType::ConnectRefused => "ConnectRefused",
ErrorType::ConnectNoRoute => "ConnectNoRoute",
ErrorType::ConnectProxyFailure => "ConnectProxyFailure",
ErrorType::TLSWantX509Lookup => "TLSWantX509Lookup",
ErrorType::TLSHandshakeFailure => "TLSHandshakeFailure",
ErrorType::TLSHandshakeTimedout => "TLSHandshakeTimedout",
ErrorType::InvalidCert => "InvalidCert",
ErrorType::HandshakeError => "HandshakeError",
ErrorType::ConnectError => "ConnectError",
ErrorType::BindError => "BindError",
ErrorType::AcceptError => "AcceptError",
ErrorType::SocketError => "SocketError",
ErrorType::InvalidHTTPHeader => "InvalidHTTPHeader",
ErrorType::H1Error => "H1Error",
ErrorType::H2Error => "H2Error",
ErrorType::InvalidH2 => "InvalidH2",
ErrorType::H2Downgrade => "H2Downgrade",
ErrorType::ReadError => "ReadError",
ErrorType::WriteError => "WriteError",
ErrorType::ReadTimedout => "ReadTimedout",
ErrorType::WriteTimedout => "WriteTimedout",
ErrorType::ConnectionClosed => "ConnectionClosed",
ErrorType::FileOpenError => "FileOpenError",
ErrorType::FileCreateError => "FileCreateError",
ErrorType::FileReadError => "FileReadError",
ErrorType::FileWriteError => "FileWriteError",
ErrorType::HTTPStatus(_) => "HTTPStatus",
ErrorType::InternalError => "InternalError",
ErrorType::UnknownError => "UnknownError",
ErrorType::Custom(s) => s,
ErrorType::CustomCode(s, _) => s,
}
}
}
impl Error {
/// Simply create the error. See other functions that provide less verbose interfaces.
#[inline]
pub fn create(
etype: ErrorType,
esource: ErrorSource,
context: Option<ImmutStr>,
cause: Option<Box<dyn ErrorTrait + Send + Sync>>,
) -> BError {
let retry = if let Some(c) = cause.as_ref() {
if let Some(e) = c.downcast_ref::<BError>() {
e.retry
} else {
false.into()
}
} else {
false.into()
};
Box::new(Error {
etype,
esource,
retry,
cause,
context,
})
}
#[inline]
fn do_new(e: ErrorType, s: ErrorSource) -> BError {
Self::create(e, s, None, None)
}
/// Create an error with the given type
#[inline]
pub fn new(e: ErrorType) -> BError {
Self::do_new(e, ErrorSource::Unset)
}
/// Create an error with the given type, a context string and the causing error.
/// This method is usually used when there the error is caused by another error.
/// ```
/// use pingora_error::{Error, ErrorType, Result};
///
/// fn b() -> Result<()> {
/// // ...
/// Ok(())
/// }
/// fn do_something() -> Result<()> {
/// // a()?;
/// b().map_err(|e| Error::because(ErrorType::InternalError, "b failed after a", e))
/// }
/// ```
/// Choose carefully between simply surfacing the causing error versus Because() here.
/// Only use Because() when there is extra context that is not capture by
/// the causing error itself.
#[inline]
pub fn because<S: Into<ImmutStr>, E: Into<Box<dyn ErrorTrait + Send + Sync>>>(
e: ErrorType,
context: S,
cause: E,
) -> BError {
Self::create(
e,
ErrorSource::Unset,
Some(context.into()),
Some(cause.into()),
)
}
/// Short for Err(Self::because)
#[inline]
pub fn e_because<T, S: Into<ImmutStr>, E: Into<Box<dyn ErrorTrait + Send + Sync>>>(
e: ErrorType,
context: S,
cause: E,
) -> Result<T> {
Err(Self::because(e, context, cause))
}
/// Create an error with context but no direct causing error
#[inline]
pub fn explain<S: Into<ImmutStr>>(e: ErrorType, context: S) -> BError {
Self::create(e, ErrorSource::Unset, Some(context.into()), None)
}
/// Short for Err(Self::explain)
#[inline]
pub fn e_explain<T, S: Into<ImmutStr>>(e: ErrorType, context: S) -> Result<T> {
Err(Self::explain(e, context))
}
/// The new_{up, down, in} functions are to create new errors with source
/// {upstream, downstream, internal}
#[inline]
pub fn new_up(e: ErrorType) -> BError {
Self::do_new(e, ErrorSource::Upstream)
}
#[inline]
pub fn new_down(e: ErrorType) -> BError {
Self::do_new(e, ErrorSource::Downstream)
}
#[inline]
pub fn new_in(e: ErrorType) -> BError {
Self::do_new(e, ErrorSource::Internal)
}
/// Create a new custom error with the static string
#[inline]
pub fn new_str(s: &'static str) -> BError {
Self::do_new(ErrorType::Custom(s), ErrorSource::Unset)
}
// the err_* functions are the same as new_* but return a Result<T>
#[inline]
pub fn err<T>(e: ErrorType) -> Result<T> {
Err(Self::new(e))
}
#[inline]
pub fn err_up<T>(e: ErrorType) -> Result<T> {
Err(Self::new_up(e))
}
#[inline]
pub fn err_down<T>(e: ErrorType) -> Result<T> {
Err(Self::new_down(e))
}
#[inline]
pub fn err_in<T>(e: ErrorType) -> Result<T> {
Err(Self::new_in(e))
}
pub fn etype(&self) -> &ErrorType {
&self.etype
}
pub fn esource(&self) -> &ErrorSource {
&self.esource
}
pub fn retry(&self) -> bool {
self.retry.retry()
}
pub fn set_retry(&mut self, retry: bool) {
self.retry = retry.into();
}
pub fn reason_str(&self) -> &str {
self.etype.as_str()
}
pub fn source_str(&self) -> &str {
self.esource.as_str()
}
/// The as_{up, down, in} functions are to change the current errors with source
/// {upstream, downstream, internal}
pub fn as_up(&mut self) {
self.esource = ErrorSource::Upstream;
}
pub fn as_down(&mut self) {
self.esource = ErrorSource::Downstream;
}
pub fn as_in(&mut self) {
self.esource = ErrorSource::Internal;
}
/// The into_{up, down, in} are the same as as_* but takes `self` and also return `self`
pub fn into_up(mut self: BError) -> BError {
self.as_up();
self
}
pub fn into_down(mut self: BError) -> BError {
self.as_down();
self
}
pub fn into_in(mut self: BError) -> BError {
self.as_in();
self
}
pub fn into_err<T>(self: BError) -> Result<T> {
Err(self)
}
pub fn set_cause<C: Into<Box<dyn ErrorTrait + Send + Sync>>>(&mut self, cause: C) {
self.cause = Some(cause.into());
}
pub fn set_context<T: Into<ImmutStr>>(&mut self, context: T) {
self.context = Some(context.into());
}
/// Create a new error from self, with the same type and source and put self as the cause
/// ```
/// use pingora_error::Result;
///
/// fn b() -> Result<()> {
/// // ...
/// Ok(())
/// }
///
/// fn do_something() -> Result<()> {
/// // a()?;
/// b().map_err(|e| e.more_context("b failed after a"))
/// }
/// ```
/// This function is less verbose than `Because`. But it only work for [Error] while
/// `Because` works for all types of errors who implement [std::error::Error] trait.
pub fn more_context<T: Into<ImmutStr>>(self: BError, context: T) -> BError {
let esource = self.esource.clone();
let retry = self.retry;
let mut e = Self::because(self.etype.clone(), context, self);
e.esource = esource;
e.retry = retry;
e
}
// Display error but skip the duplicate elements from the error in previous hop
fn chain_display(&self, previous: Option<&Error>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if previous.map(|p| p.esource != self.esource).unwrap_or(true) {
write!(f, "{}", self.esource.as_str())?
}
if previous.map(|p| p.etype != self.etype).unwrap_or(true) {
write!(f, " {}", self.etype.as_str())?
}
if let Some(c) = self.context.as_ref() {
write!(f, " context: {}", c)?;
}
if let Some(c) = self.cause.as_ref() {
if let Some(e) = c.downcast_ref::<BError>() {
write!(f, " cause: ")?;
e.chain_display(Some(self), f)
} else {
write!(f, " cause: {}", c)
}
} else {
Ok(())
}
}
/// Return the ErrorType of the root Error
pub fn root_etype(&self) -> &ErrorType {
self.cause.as_ref().map_or(&self.etype, |c| {
// Stop the recursion if the cause is not Error
c.downcast_ref::<BError>()
.map_or(&self.etype, |e| e.root_etype())
})
}
pub fn root_cause(&self) -> &(dyn ErrorTrait + Send + Sync + 'static) {
self.cause.as_deref().map_or(self, |c| {
c.downcast_ref::<BError>().map_or(c, |e| e.root_cause())
})
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.chain_display(None, f)
}
}
impl ErrorTrait for Error {}
/// Helper trait to add more context to a given error
pub trait Context<T> {
/// Wrap the `Err(E)` in [Result] with more context, the existing E will be the cause.
///
/// This is a shortcut for map_err() + more_context()
fn err_context<C: Into<ImmutStr>, F: FnOnce() -> C>(self, context: F) -> Result<T, BError>;
}
impl<T> Context<T> for Result<T, BError> {
fn err_context<C: Into<ImmutStr>, F: FnOnce() -> C>(self, context: F) -> Result<T, BError> {
self.map_err(|e| e.more_context(context()))
}
}
/// Helper trait to chain errors with context
pub trait OrErr<T, E> {
/// Wrap the E in [Result] with new [ErrorType] and context, the existing E will be the cause.
///
/// This is a shortcut for map_err() + because()
fn or_err(self, et: ErrorType, context: &'static str) -> Result<T, BError>
where
E: Into<Box<dyn ErrorTrait + Send + Sync>>;
/// Similar to or_err(), but takes a closure, which is useful for constructing String.
fn or_err_with<C: Into<ImmutStr>, F: FnOnce() -> C>(
self,
et: ErrorType,
context: F,
) -> Result<T, BError>
where
E: Into<Box<dyn ErrorTrait + Send + Sync>>;
/// Replace the E in [Result] with a new [Error] generated from the current error
///
/// This is useful when the current error cannot move out of scope. This is a shortcut for map_err() + explain().
fn explain_err<C: Into<ImmutStr>, F: FnOnce(E) -> C>(
self,
et: ErrorType,
context: F,
) -> Result<T, BError>;
/// Similar to or_err() but just to surface errors that are not [Error] (where `?` cannot be used directly).
///
/// or_err()/or_err_with() are still preferred because they make the error more readable and traceable.
fn or_fail(self) -> Result<T>
where
E: Into<Box<dyn ErrorTrait + Send + Sync>>;
}
impl<T, E> OrErr<T, E> for Result<T, E> {
fn or_err(self, et: ErrorType, context: &'static str) -> Result<T, BError>
where
E: Into<Box<dyn ErrorTrait + Send + Sync>>,
{
self.map_err(|e| Error::because(et, context, e))
}
fn or_err_with<C: Into<ImmutStr>, F: FnOnce() -> C>(
self,
et: ErrorType,
context: F,
) -> Result<T, BError>
where
E: Into<Box<dyn ErrorTrait + Send + Sync>>,
{
self.map_err(|e| Error::because(et, context(), e))
}
fn explain_err<C: Into<ImmutStr>, F: FnOnce(E) -> C>(
self,
et: ErrorType,
exp: F,
) -> Result<T, BError> {
self.map_err(|e| Error::explain(et, exp(e)))
}
fn or_fail(self) -> Result<T, BError>
where
E: Into<Box<dyn ErrorTrait + Send + Sync>>,
{
self.map_err(|e| Error::because(ErrorType::InternalError, "", e))
}
}
/// Helper trait to convert an [Option] to an [Error] with context.
pub trait OkOrErr<T> {
fn or_err(self, et: ErrorType, context: &'static str) -> Result<T, BError>;
fn or_err_with<C: Into<ImmutStr>, F: FnOnce() -> C>(
self,
et: ErrorType,
context: F,
) -> Result<T, BError>;
}
impl<T> OkOrErr<T> for Option<T> {
/// Convert the [Option] to a new [Error] with [ErrorType] and context if None, Ok otherwise.
///
/// This is a shortcut for .ok_or(Error::explain())
fn or_err(self, et: ErrorType, context: &'static str) -> Result<T, BError> {
self.ok_or(Error::explain(et, context))
}
/// Similar to to_err(), but takes a closure, which is useful for constructing String.
fn or_err_with<C: Into<ImmutStr>, F: FnOnce() -> C>(
self,
et: ErrorType,
context: F,
) -> Result<T, BError> {
self.ok_or_else(|| Error::explain(et, context()))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_chain_of_error() {
let e1 = Error::new(ErrorType::InternalError);
let mut e2 = Error::new(ErrorType::HTTPStatus(400));
e2.set_cause(e1);
assert_eq!(format!("{}", e2), " HTTPStatus cause: InternalError");
assert_eq!(e2.root_etype().as_str(), "InternalError");
let e3 = Error::new(ErrorType::InternalError);
let e4 = Error::because(ErrorType::HTTPStatus(400), "test", e3);
assert_eq!(
format!("{}", e4),
" HTTPStatus context: test cause: InternalError"
);
assert_eq!(e4.root_etype().as_str(), "InternalError");
}
#[test]
fn test_error_context() {
let mut e1 = Error::new(ErrorType::InternalError);
e1.set_context(format!("{} {}", "my", "context"));
assert_eq!(format!("{}", e1), " InternalError context: my context");
}
#[test]
fn test_context_trait() {
let e1: Result<(), BError> = Err(Error::new(ErrorType::InternalError));
let e2 = e1.err_context(|| "another");
assert_eq!(
format!("{}", e2.unwrap_err()),
" InternalError context: another cause: "
);
}
#[test]
fn test_cause_trait() {
let e1: Result<(), BError> = Err(Error::new(ErrorType::InternalError));
let e2 = e1.or_err(ErrorType::HTTPStatus(400), "another");
assert_eq!(
format!("{}", e2.unwrap_err()),
" HTTPStatus context: another cause: InternalError"
);
}
#[test]
fn test_option_some_ok() {
let m = Some(2);
let o = m.or_err(ErrorType::InternalError, "some is not an error!");
assert_eq!(2, o.unwrap());
let o = m.or_err_with(ErrorType::InternalError, || "some is not an error!");
assert_eq!(2, o.unwrap());
}
#[test]
fn test_option_none_err() {
let m: Option<i32> = None;
let e1 = m.or_err(ErrorType::InternalError, "none is an error!");
assert_eq!(
format!("{}", e1.unwrap_err()),
" InternalError context: none is an error!"
);
let e1 = m.or_err_with(ErrorType::InternalError, || "none is an error!");
assert_eq!(
format!("{}", e1.unwrap_err()),
" InternalError context: none is an error!"
);
}
#[test]
fn test_into() {
fn other_error() -> Result<(), &'static str> {
Err("oops")
}
fn surface_err() -> Result<()> {
other_error().or_fail()?; // can return directly but want to showcase ?
Ok(())
}
let e = surface_err().unwrap_err();
assert_eq!(format!("{}", e), " InternalError context: cause: oops");
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/src/lib.rs | pingora/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![warn(clippy::all)]
#![allow(clippy::new_without_default)]
#![allow(clippy::type_complexity)]
#![allow(clippy::match_wild_err_arm)]
#![allow(clippy::missing_safety_doc)]
#![allow(clippy::upper_case_acronyms)]
// This enables the feature that labels modules that are only available with
// certain pingora features
#![cfg_attr(docsrs, feature(doc_cfg))]
//! # Pingora
//!
//! Pingora is a framework to build fast, reliable and programmable networked systems at Internet scale.
//!
//! # Features
//! - Http 1.x and Http 2
//! - Modern TLS with OpenSSL or BoringSSL (FIPS compatible)
//! - Zero downtime upgrade
//!
//! # Usage
//! This crate provides low level service and protocol implementation and abstraction.
//!
//! If looking to build a (reverse) proxy, see [`pingora-proxy`](https://docs.rs/pingora-proxy) crate.
//!
//! # Feature flags
#![cfg_attr(
feature = "document-features",
cfg_attr(doc, doc = ::document_features::document_features!())
)]
pub use pingora_core::*;
/// HTTP header objects that preserve http header cases
pub mod http {
pub use pingora_http::*;
}
#[cfg(feature = "cache")]
#[cfg_attr(docsrs, doc(cfg(feature = "cache")))]
/// Caching services and tooling
pub mod cache {
pub use pingora_cache::*;
}
#[cfg(feature = "lb")]
#[cfg_attr(docsrs, doc(cfg(feature = "lb")))]
/// Load balancing recipes
pub mod lb {
pub use pingora_load_balancing::*;
}
#[cfg(feature = "proxy")]
#[cfg_attr(docsrs, doc(cfg(feature = "proxy")))]
/// Proxying recipes
pub mod proxy {
pub use pingora_proxy::*;
}
#[cfg(feature = "time")]
#[cfg_attr(docsrs, doc(cfg(feature = "time")))]
/// Timeouts and other useful time utilities
pub mod time {
pub use pingora_timeout::*;
}
/// A useful set of types for getting started
pub mod prelude {
pub use pingora_core::prelude::*;
pub use pingora_http::prelude::*;
pub use pingora_timeout::*;
#[cfg(feature = "cache")]
#[cfg_attr(docsrs, doc(cfg(feature = "cache")))]
pub use pingora_cache::prelude::*;
#[cfg(feature = "lb")]
#[cfg_attr(docsrs, doc(cfg(feature = "lb")))]
pub use pingora_load_balancing::prelude::*;
#[cfg(feature = "proxy")]
#[cfg_attr(docsrs, doc(cfg(feature = "proxy")))]
pub use pingora_proxy::prelude::*;
#[cfg(feature = "time")]
#[cfg_attr(docsrs, doc(cfg(feature = "time")))]
pub use pingora_timeout::*;
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/client.rs | pingora/examples/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use pingora::{connectors::http::Connector, prelude::*};
use regex::Regex;
#[tokio::main]
async fn main() -> Result<()> {
let connector = Connector::new(None);
// create the HTTP session
let peer_addr = "1.1.1.1:443";
let mut peer = HttpPeer::new(peer_addr, true, "one.one.one.one".into());
peer.options.set_http_version(2, 1);
let (mut http, _reused) = connector.get_http_session(&peer).await?;
// perform a GET request
let mut new_request = RequestHeader::build("GET", b"/", None)?;
new_request.insert_header("Host", "one.one.one.one")?;
http.write_request_header(Box::new(new_request)).await?;
// Servers usually don't respond until the full request body is read.
http.finish_request_body().await?;
http.read_response_header().await?;
// display the headers from the response
if let Some(header) = http.response_header() {
println!("{header:#?}");
} else {
return Error::e_explain(ErrorType::InvalidHTTPHeader, "No response header");
};
// collect the response body
let mut response_body = String::new();
while let Some(chunk) = http.read_response_body().await? {
response_body.push_str(&String::from_utf8_lossy(&chunk));
}
// verify that the response body is valid HTML by displaying the page <title>
let re = Regex::new(r"<title>(.*?)</title>")
.or_err(ErrorType::InternalError, "Failed to compile regex")?;
if let Some(title) = re
.captures(&response_body)
.and_then(|caps| caps.get(1).map(|match_| match_.as_str()))
{
println!("Page Title: {title}");
} else {
return Error::e_explain(
ErrorType::new("InvalidHTML"),
"No <title> found in response body",
);
}
// gracefully release the connection
connector
.release_http_session(http, &peer, Some(std::time::Duration::from_secs(5)))
.await;
Ok(())
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/server.rs | pingora/examples/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[global_allocator]
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
use pingora::listeners::tls::TlsSettings;
use pingora::protocols::TcpKeepalive;
use pingora::server::configuration::Opt;
use pingora::server::{Server, ShutdownWatch};
use pingora::services::background::{background_service, BackgroundService};
use pingora::services::{listening::Service as ListeningService, Service};
use async_trait::async_trait;
use clap::Parser;
use tokio::time::interval;
use std::time::Duration;
mod app;
mod service;
pub struct ExampleBackgroundService;
#[async_trait]
impl BackgroundService for ExampleBackgroundService {
async fn start(&self, mut shutdown: ShutdownWatch) {
let mut period = interval(Duration::from_secs(1));
loop {
tokio::select! {
_ = shutdown.changed() => {
// shutdown
break;
}
_ = period.tick() => {
// do some work
// ...
}
}
}
}
}
#[cfg(feature = "openssl_derived")]
mod boringssl_openssl {
use super::*;
use pingora::tls::pkey::{PKey, Private};
use pingora::tls::x509::X509;
pub(super) struct DynamicCert {
cert: X509,
key: PKey<Private>,
}
impl DynamicCert {
pub(super) fn new(cert: &str, key: &str) -> Box<Self> {
let cert_bytes = std::fs::read(cert).unwrap();
let cert = X509::from_pem(&cert_bytes).unwrap();
let key_bytes = std::fs::read(key).unwrap();
let key = PKey::private_key_from_pem(&key_bytes).unwrap();
Box::new(DynamicCert { cert, key })
}
}
#[async_trait]
impl pingora::listeners::TlsAccept for DynamicCert {
async fn certificate_callback(&self, ssl: &mut pingora::tls::ssl::SslRef) {
use pingora::tls::ext;
ext::ssl_use_certificate(ssl, &self.cert).unwrap();
ext::ssl_use_private_key(ssl, &self.key).unwrap();
}
}
}
const USAGE: &str = r#"
Usage
port 6142: TCP echo server
nc 127.0.0.1 6142
port 6143: TLS echo server
openssl s_client -connect 127.0.0.1:6143
port 6145: Http echo server
curl http://127.0.0.1:6145 -v -d 'hello'
port 6148: Https echo server
curl https://127.0.0.1:6148 -vk -d 'hello'
port 6141: TCP proxy
curl http://127.0.0.1:6141 -v -H 'host: 1.1.1.1'
port 6144: TLS proxy
curl https://127.0.0.1:6144 -vk -H 'host: one.one.one.one' -o /dev/null
port 6150: metrics endpoint
curl http://127.0.0.1:6150
"#;
pub fn main() {
env_logger::init();
print!("{USAGE}");
let opt = Some(Opt::parse());
let mut my_server = Server::new(opt).unwrap();
my_server.bootstrap();
let cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key_path = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let mut echo_service = service::echo::echo_service();
echo_service.add_tcp("127.0.0.1:6142");
echo_service
.add_tls("0.0.0.0:6143", &cert_path, &key_path)
.unwrap();
let mut echo_service_http = service::echo::echo_service_http();
let mut options = pingora::listeners::TcpSocketOptions::default();
options.tcp_fastopen = Some(10);
options.tcp_keepalive = Some(TcpKeepalive {
idle: Duration::from_secs(60),
interval: Duration::from_secs(5),
count: 5,
#[cfg(target_os = "linux")]
user_timeout: Duration::from_secs(85),
});
echo_service_http.add_tcp_with_settings("0.0.0.0:6145", options);
echo_service_http.add_uds("/tmp/echo.sock", None);
let mut tls_settings;
// NOTE: dynamic certificate callback is only supported with BoringSSL/OpenSSL
#[cfg(feature = "openssl_derived")]
{
use std::ops::DerefMut;
let dynamic_cert = boringssl_openssl::DynamicCert::new(&cert_path, &key_path);
tls_settings = TlsSettings::with_callbacks(dynamic_cert).unwrap();
// by default intermediate supports both TLS 1.2 and 1.3. We force to tls 1.2 just for the demo
tls_settings
.deref_mut()
.deref_mut()
.set_max_proto_version(Some(pingora::tls::ssl::SslVersion::TLS1_2))
.unwrap();
}
#[cfg(feature = "rustls")]
{
tls_settings = TlsSettings::intermediate(&cert_path, &key_path).unwrap();
}
#[cfg(feature = "s2n")]
{
tls_settings = TlsSettings::intermediate(&cert_path, &key_path).unwrap();
}
#[cfg(not(feature = "any_tls"))]
{
tls_settings = TlsSettings;
}
tls_settings.enable_h2();
echo_service_http.add_tls_with_settings("0.0.0.0:6148", None, tls_settings);
let proxy_service = service::proxy::proxy_service(
"0.0.0.0:6141", // listen
"1.1.1.1:80", // proxy to
);
let proxy_service_ssl = service::proxy::proxy_service_tls(
"0.0.0.0:6144", // listen
"1.1.1.1:443", // proxy to
"one.one.one.one", // SNI
&cert_path,
&key_path,
);
let mut prometheus_service_http = ListeningService::prometheus_http_service();
prometheus_service_http.add_tcp("127.0.0.1:6150");
let background_service = background_service("example", ExampleBackgroundService {});
let services: Vec<Box<dyn Service>> = vec![
Box::new(echo_service),
Box::new(echo_service_http),
Box::new(proxy_service),
Box::new(proxy_service_ssl),
Box::new(prometheus_service_http),
Box::new(background_service),
];
my_server.add_services(services);
my_server.run_forever();
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/service/echo.rs | pingora/examples/service/echo.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::app::echo::{EchoApp, HttpEchoApp};
use pingora::apps::http_app::HttpServer;
use pingora::services::listening::Service;
pub fn echo_service() -> Service<EchoApp> {
Service::new("Echo Service".to_string(), EchoApp)
}
pub fn echo_service_http() -> Service<HttpServer<HttpEchoApp>> {
let server = HttpServer::new_app(HttpEchoApp);
Service::new("Echo Service HTTP".to_string(), server)
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/service/mod.rs | pingora/examples/service/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod echo;
pub mod proxy;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/service/proxy.rs | pingora/examples/service/proxy.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::app::proxy::ProxyApp;
use pingora_core::listeners::Listeners;
use pingora_core::services::listening::Service;
use pingora_core::upstreams::peer::BasicPeer;
pub fn proxy_service(addr: &str, proxy_addr: &str) -> Service<ProxyApp> {
let proxy_to = BasicPeer::new(proxy_addr);
Service::with_listeners(
"Proxy Service".to_string(),
Listeners::tcp(addr),
ProxyApp::new(proxy_to),
)
}
pub fn proxy_service_tls(
addr: &str,
proxy_addr: &str,
proxy_sni: &str,
cert_path: &str,
key_path: &str,
) -> Service<ProxyApp> {
let mut proxy_to = BasicPeer::new(proxy_addr);
// set SNI to enable TLS
proxy_to.sni = proxy_sni.into();
Service::with_listeners(
"Proxy Service TLS".to_string(),
Listeners::tls(addr, cert_path, key_path).unwrap(),
ProxyApp::new(proxy_to),
)
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/app/echo.rs | pingora/examples/app/echo.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use bytes::Bytes;
use http::{Response, StatusCode};
use log::debug;
use once_cell::sync::Lazy;
use pingora_timeout::timeout;
use prometheus::{register_int_counter, IntCounter};
use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use pingora::apps::http_app::ServeHttp;
use pingora::apps::ServerApp;
use pingora::protocols::http::ServerSession;
use pingora::protocols::Stream;
use pingora::server::ShutdownWatch;
static REQ_COUNTER: Lazy<IntCounter> =
Lazy::new(|| register_int_counter!("reg_counter", "Number of requests").unwrap());
#[derive(Clone)]
pub struct EchoApp;
#[async_trait]
impl ServerApp for EchoApp {
async fn process_new(
self: &Arc<Self>,
mut io: Stream,
_shutdown: &ShutdownWatch,
) -> Option<Stream> {
let mut buf = [0; 1024];
loop {
let n = io.read(&mut buf).await.unwrap();
if n == 0 {
debug!("session closing");
return None;
}
io.write_all(&buf[0..n]).await.unwrap();
io.flush().await.unwrap();
}
}
}
pub struct HttpEchoApp;
#[async_trait]
impl ServeHttp for HttpEchoApp {
async fn response(&self, http_stream: &mut ServerSession) -> Response<Vec<u8>> {
REQ_COUNTER.inc();
// read timeout of 2s
let read_timeout = 2000;
let body = match timeout(
Duration::from_millis(read_timeout),
http_stream.read_request_body(),
)
.await
{
Ok(res) => match res.unwrap() {
Some(bytes) => bytes,
None => Bytes::from("no body!"),
},
Err(_) => {
panic!("Timed out after {:?}ms", read_timeout);
}
};
Response::builder()
.status(StatusCode::OK)
.header(http::header::CONTENT_TYPE, "text/html")
.header(http::header::CONTENT_LENGTH, body.len())
.body(body.to_vec())
.unwrap()
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/app/mod.rs | pingora/examples/app/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod echo;
pub mod proxy;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora/examples/app/proxy.rs | pingora/examples/app/proxy.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use log::debug;
use std::sync::Arc;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::select;
use pingora::apps::ServerApp;
use pingora::connectors::TransportConnector;
use pingora::protocols::Stream;
use pingora::server::ShutdownWatch;
use pingora::upstreams::peer::BasicPeer;
pub struct ProxyApp {
client_connector: TransportConnector,
proxy_to: BasicPeer,
}
enum DuplexEvent {
DownstreamRead(usize),
UpstreamRead(usize),
}
impl ProxyApp {
pub fn new(proxy_to: BasicPeer) -> Self {
ProxyApp {
client_connector: TransportConnector::new(None),
proxy_to,
}
}
async fn duplex(&self, mut server_session: Stream, mut client_session: Stream) {
let mut upstream_buf = [0; 1024];
let mut downstream_buf = [0; 1024];
loop {
let downstream_read = server_session.read(&mut upstream_buf);
let upstream_read = client_session.read(&mut downstream_buf);
let event: DuplexEvent;
select! {
n = downstream_read => event
= DuplexEvent::DownstreamRead(n.unwrap()),
n = upstream_read => event
= DuplexEvent::UpstreamRead(n.unwrap()),
}
match event {
DuplexEvent::DownstreamRead(0) => {
debug!("downstream session closing");
return;
}
DuplexEvent::UpstreamRead(0) => {
debug!("upstream session closing");
return;
}
DuplexEvent::DownstreamRead(n) => {
client_session.write_all(&upstream_buf[0..n]).await.unwrap();
client_session.flush().await.unwrap();
}
DuplexEvent::UpstreamRead(n) => {
server_session
.write_all(&downstream_buf[0..n])
.await
.unwrap();
server_session.flush().await.unwrap();
}
}
}
}
}
#[async_trait]
impl ServerApp for ProxyApp {
async fn process_new(
self: &Arc<Self>,
io: Stream,
_shutdown: &ShutdownWatch,
) -> Option<Stream> {
let client_session = self.client_connector.new_stream(&self.proxy_to).await;
match client_session {
Ok(client_session) => {
self.duplex(io, client_session).await;
None
}
Err(e) => {
debug!("Failed to create client session: {}", e);
None
}
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-rustls/src/lib.rs | pingora-rustls/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This module contains all the rustls specific pingora integration for things
//! like loading certificates and private keys
#![warn(clippy::all)]
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
use log::warn;
pub use no_debug::{Ellipses, NoDebug, WithTypeInfo};
use pingora_error::{Error, ErrorType, OrErr, Result};
pub use rustls::{
client::WebPkiServerVerifier, version, CertificateError, ClientConfig, DigitallySignedStruct,
Error as RusTlsError, RootCertStore, ServerConfig, SignatureScheme, Stream,
};
pub use rustls_native_certs::load_native_certs;
use rustls_pemfile::Item;
pub use rustls_pki_types::{CertificateDer, PrivateKeyDer, ServerName, UnixTime};
pub use tokio_rustls::client::TlsStream as ClientTlsStream;
pub use tokio_rustls::server::TlsStream as ServerTlsStream;
pub use tokio_rustls::{Accept, Connect, TlsAcceptor, TlsConnector, TlsStream};
// This allows to skip certificate verification. Be highly cautious.
pub use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
/// Load the given file from disk as a buffered reader and use the pingora Error
/// type instead of the std::io version
fn load_file<P>(path: P) -> Result<BufReader<File>>
where
P: AsRef<Path>,
{
File::open(path)
.or_err(ErrorType::FileReadError, "Failed to load file")
.map(BufReader::new)
}
/// Read the pem file at the given path from disk
fn load_pem_file<P>(path: P) -> Result<Vec<Item>>
where
P: AsRef<Path>,
{
rustls_pemfile::read_all(&mut load_file(path)?)
.map(|item_res| {
item_res.or_err(
ErrorType::InvalidCert,
"Certificate in pem file could not be read",
)
})
.collect()
}
/// Load the certificates from the given pem file path into the given
/// certificate store
pub fn load_ca_file_into_store<P>(path: P, cert_store: &mut RootCertStore) -> Result<()>
where
P: AsRef<Path>,
{
for pem_item in load_pem_file(path)? {
// only loading certificates, handling a CA file
let Item::X509Certificate(content) = pem_item else {
return Error::e_explain(
ErrorType::InvalidCert,
"Pem file contains un-loadable certificate type",
);
};
cert_store.add(content).or_err(
ErrorType::InvalidCert,
"Failed to load X509 certificate into root store",
)?;
}
Ok(())
}
/// Attempt to load the native cas into the given root-certificate store
pub fn load_platform_certs_incl_env_into_store(ca_certs: &mut RootCertStore) -> Result<()> {
// this includes handling of ENV vars SSL_CERT_FILE & SSL_CERT_DIR
for cert in load_native_certs()
.or_err(ErrorType::InvalidCert, "Failed to load native certificates")?
.into_iter()
{
ca_certs.add(cert).or_err(
ErrorType::InvalidCert,
"Failed to load native certificate into root store",
)?;
}
Ok(())
}
/// Load the certificates and private key files
pub fn load_certs_and_key_files<'a>(
cert: &str,
key: &str,
) -> Result<Option<(Vec<CertificateDer<'a>>, PrivateKeyDer<'a>)>> {
let certs_file = load_pem_file(cert)?;
let key_file = load_pem_file(key)?;
let certs = certs_file
.into_iter()
.filter_map(|item| {
if let Item::X509Certificate(cert) = item {
Some(cert)
} else {
None
}
})
.collect::<Vec<_>>();
// These are the currently supported pk types -
// [https://doc.servo.org/rustls/key/struct.PrivateKey.html]
let private_key_opt = key_file
.into_iter()
.filter_map(|key_item| match key_item {
Item::Pkcs1Key(key) => Some(PrivateKeyDer::from(key)),
Item::Pkcs8Key(key) => Some(PrivateKeyDer::from(key)),
Item::Sec1Key(key) => Some(PrivateKeyDer::from(key)),
_ => None,
})
.next();
if let (Some(private_key), false) = (private_key_opt, certs.is_empty()) {
Ok(Some((certs, private_key)))
} else {
Ok(None)
}
}
/// Load the certificate
pub fn load_pem_file_ca(path: &String) -> Result<Vec<u8>> {
let mut reader = load_file(path)?;
let cas_file_items = rustls_pemfile::certs(&mut reader)
.map(|item_res| {
item_res.or_err(
ErrorType::InvalidCert,
"Failed to load certificate from file",
)
})
.collect::<Result<Vec<_>>>()?;
Ok(cas_file_items
.first()
.map(|ca| ca.to_vec())
.unwrap_or_default())
}
pub fn load_pem_file_private_key(path: &String) -> Result<Vec<u8>> {
Ok(rustls_pemfile::private_key(&mut load_file(path)?)
.or_err(
ErrorType::InvalidCert,
"Failed to load private key from file",
)?
.map(|key| key.secret_der().to_vec())
.unwrap_or_default())
}
pub fn hash_certificate(cert: &CertificateDer) -> Vec<u8> {
let hash = ring::digest::digest(&ring::digest::SHA256, cert.as_ref());
hash.as_ref().to_vec()
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-limits/src/estimator.rs | pingora-limits/src/estimator.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The estimator module contains a Count-Min Sketch type to help estimate the frequency of an item.
use crate::hash;
use crate::RandomState;
use std::hash::Hash;
use std::sync::atomic::{AtomicIsize, Ordering};
/// An implementation of a lock-free count–min sketch estimator. See the [wikipedia] page for more
/// information.
///
/// [wikipedia]: https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch
pub struct Estimator {
estimator: Box<[(Box<[AtomicIsize]>, RandomState)]>,
}
impl Estimator {
/// Create a new `Estimator` with the given amount of hashes and columns (slots).
pub fn new(hashes: usize, slots: usize) -> Self {
Self {
estimator: (0..hashes)
.map(|_| (0..slots).map(|_| AtomicIsize::new(0)).collect::<Vec<_>>())
.map(|slot| (slot.into_boxed_slice(), RandomState::new()))
.collect::<Vec<_>>()
.into_boxed_slice(),
}
}
/// Increment `key` by the value given. Return the new estimated value as a result.
/// Note: overflow can happen. When some of the internal counters overflow, a negative number
/// will be returned. It is up to the caller to catch and handle this case.
pub fn incr<T: Hash>(&self, key: T, value: isize) -> isize {
self.estimator
.iter()
.fold(isize::MAX, |min, (slot, hasher)| {
let hash = hash(&key, hasher) as usize;
let counter = &slot[hash % slot.len()];
// Overflow is allowed for simplicity
let current = counter.fetch_add(value, Ordering::Relaxed);
std::cmp::min(min, current + value)
})
}
/// Decrement `key` by the value given.
pub fn decr<T: Hash>(&self, key: T, value: isize) {
for (slot, hasher) in self.estimator.iter() {
let hash = hash(&key, hasher) as usize;
let counter = &slot[hash % slot.len()];
counter.fetch_sub(value, Ordering::Relaxed);
}
}
/// Get the estimated frequency of `key`.
pub fn get<T: Hash>(&self, key: T) -> isize {
self.estimator
.iter()
.fold(isize::MAX, |min, (slot, hasher)| {
let hash = hash(&key, hasher) as usize;
let counter = &slot[hash % slot.len()];
let current = counter.load(Ordering::Relaxed);
std::cmp::min(min, current)
})
}
/// Reset all values inside this `Estimator`.
pub fn reset(&self) {
self.estimator.iter().for_each(|(slot, _)| {
slot.iter()
.for_each(|counter| counter.store(0, Ordering::Relaxed))
});
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn incr() {
let est = Estimator::new(8, 8);
let v = est.incr("a", 1);
assert_eq!(v, 1);
let v = est.incr("b", 1);
assert_eq!(v, 1);
let v = est.incr("a", 2);
assert_eq!(v, 3);
let v = est.incr("b", 2);
assert_eq!(v, 3);
}
#[test]
fn desc() {
let est = Estimator::new(8, 8);
est.incr("a", 3);
est.incr("b", 3);
est.decr("a", 1);
est.decr("b", 1);
assert_eq!(est.get("a"), 2);
assert_eq!(est.get("b"), 2);
}
#[test]
fn get() {
let est = Estimator::new(8, 8);
est.incr("a", 1);
est.incr("a", 2);
est.incr("b", 1);
est.incr("b", 2);
assert_eq!(est.get("a"), 3);
assert_eq!(est.get("b"), 3);
}
#[test]
fn reset() {
let est = Estimator::new(8, 8);
est.incr("a", 1);
est.incr("a", 2);
est.incr("b", 1);
est.incr("b", 2);
est.decr("b", 1);
est.reset();
assert_eq!(est.get("a"), 0);
assert_eq!(est.get("b"), 0);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-limits/src/lib.rs | pingora-limits/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The pingora_limits crate contains modules that can help introduce things like rate limiting or
//! thread-safe event count estimation.
#![warn(clippy::all)]
#![allow(clippy::new_without_default)]
#![allow(clippy::type_complexity)]
pub mod estimator;
pub mod inflight;
pub mod rate;
use ahash::RandomState;
use std::hash::Hash;
#[inline]
fn hash<T: Hash>(key: T, hasher: &RandomState) -> u64 {
hasher.hash_one(key)
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-limits/src/inflight.rs | pingora-limits/src/inflight.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The inflight module defines the [Inflight] type which estimates the count of events occurring
//! at any point in time.
use crate::estimator::Estimator;
use crate::{hash, RandomState};
use std::hash::Hash;
use std::sync::Arc;
/// An `Inflight` type tracks the frequency of actions that are actively occurring. When the value
/// is dropped from scope, the count will automatically decrease.
pub struct Inflight {
estimator: Arc<Estimator>,
hasher: RandomState,
}
// fixed parameters for simplicity: hashes: h, slots: n
// Time complexity for a lookup operation is O(h). Space complexity is O(h*n)
// False positive ratio is 1/(n^h)
// We choose a small h and a large n to keep lookup cheap and FP ratio low
const HASHES: usize = 4;
const SLOTS: usize = 8192;
impl Inflight {
/// Create a new `Inflight`.
pub fn new() -> Self {
Inflight {
estimator: Arc::new(Estimator::new(HASHES, SLOTS)),
hasher: RandomState::new(),
}
}
/// Increment `key` by the value given. The return value is a tuple of a [Guard] and the
/// estimated count.
pub fn incr<T: Hash>(&self, key: T, value: isize) -> (Guard, isize) {
let guard = Guard {
estimator: self.estimator.clone(),
id: hash(key, &self.hasher),
value,
};
let estimation = guard.incr();
(guard, estimation)
}
}
/// A `Guard` is returned when an `Inflight` key is incremented via [Inflight::incr].
pub struct Guard {
estimator: Arc<Estimator>,
// store the hash instead of the actual key to save space
id: u64,
value: isize,
}
impl Guard {
/// Increment the key's value that the `Guard` was created from.
pub fn incr(&self) -> isize {
self.estimator.incr(self.id, self.value)
}
/// Get the estimated count of the key that the `Guard` was created from.
pub fn get(&self) -> isize {
self.estimator.get(self.id)
}
}
impl Drop for Guard {
fn drop(&mut self) {
self.estimator.decr(self.id, self.value)
}
}
impl std::fmt::Debug for Guard {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Guard")
.field("id", &self.id)
.field("value", &self.value)
// no need to dump shared estimator
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn inflight_count() {
let inflight = Inflight::new();
let (g1, v) = inflight.incr("a", 1);
assert_eq!(v, 1);
let (g2, v) = inflight.incr("a", 2);
assert_eq!(v, 3);
drop(g1);
assert_eq!(g2.get(), 2);
drop(g2);
let (_, v) = inflight.incr("a", 1);
assert_eq!(v, 1);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-limits/src/rate.rs | pingora-limits/src/rate.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The rate module defines the [Rate] type that helps estimate the occurrence of events over a
//! period of time.
use crate::estimator::Estimator;
use std::hash::Hash;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::time::{Duration, Instant};
/// Input struct to custom functions for calculating rate. Includes the counts
/// from the current interval, previous interval, the configured duration of an
/// interval, and the fraction into the current interval that the sample was
/// taken.
///
/// Ex. If the interval to the Rate instance is `10s`, and the rate calculation
/// is taken at 2 seconds after the start of the current interval, then the
/// fraction of the current interval returned in this struct will be `0.2`
/// meaning 20% of the current interval has elapsed
#[non_exhaustive]
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
pub struct RateComponents {
pub prev_samples: isize,
pub curr_samples: isize,
pub interval: Duration,
pub current_interval_fraction: f64,
}
/// A rate calculation function which uses a good estimate of the rate of events over the past
/// `interval` time.
///
/// Specifically, it linearly interpolates between the event counts of the previous and current
/// periods based on how far into the current period we are, as described in this post:
/// <https://blog.cloudflare.com/counting-things-a-lot-of-different-things/>
#[allow(dead_code)]
pub static PROPORTIONAL_RATE_ESTIMATE_CALC_FN: fn(RateComponents) -> f64 =
|rate_info: RateComponents| {
let prev = rate_info.prev_samples as f64;
let curr = rate_info.curr_samples as f64;
let interval_secs = rate_info.interval.as_secs_f64();
let interval_fraction = rate_info.current_interval_fraction;
let weighted_count = prev * (1. - interval_fraction) + curr;
weighted_count / interval_secs
};
/// A stable rate estimator that reports the rate of events per period of `interval` time.
///
/// It counts events for periods of `interval` and returns the average rate of the latest completed
/// period while counting events for the current (partial) period.
pub struct Rate {
// 2 slots so that we use one to collect the current events and the other to report rate
red_slot: Estimator,
blue_slot: Estimator,
red_or_blue: AtomicBool, // true: the current slot is red, otherwise blue
start: Instant,
// Use u64 below instead of Instant because we want atomic operation
reset_interval_ms: u64, // the time interval to reset `current` and move it to `previous`
last_reset_time: AtomicU64, // the timestamp in ms since `start`
interval: Duration,
}
// see inflight module for the meaning for these numbers
const HASHES: usize = 4;
const SLOTS: usize = 1024; // This value can be lower if interval is short (key cardinality is low)
impl Rate {
/// Create a new `Rate` with the given interval.
pub fn new(interval: std::time::Duration) -> Self {
Rate::new_with_estimator_config(interval, HASHES, SLOTS)
}
/// Create a new `Rate` with the given interval and Estimator config with the given amount of hashes and columns (slots).
#[inline]
pub fn new_with_estimator_config(
interval: std::time::Duration,
hashes: usize,
slots: usize,
) -> Self {
Rate {
red_slot: Estimator::new(hashes, slots),
blue_slot: Estimator::new(hashes, slots),
red_or_blue: AtomicBool::new(true),
start: Instant::now(),
reset_interval_ms: interval.as_millis() as u64, // should be small not to overflow
last_reset_time: AtomicU64::new(0),
interval,
}
}
fn current(&self, red_or_blue: bool) -> &Estimator {
if red_or_blue {
&self.red_slot
} else {
&self.blue_slot
}
}
fn previous(&self, red_or_blue: bool) -> &Estimator {
if red_or_blue {
&self.blue_slot
} else {
&self.red_slot
}
}
fn red_or_blue(&self) -> bool {
self.red_or_blue.load(Ordering::SeqCst)
}
/// Return the per second rate estimation.
///
/// This is the average rate of the latest completed period of length `interval`.
pub fn rate<T: Hash>(&self, key: &T) -> f64 {
let past_ms = self.maybe_reset();
if past_ms >= self.reset_interval_ms * 2 {
// already missed 2 intervals, no data, just report 0 as a short cut
return 0f64;
}
self.previous(self.red_or_blue()).get(key) as f64 * 1000.0 / self.reset_interval_ms as f64
}
/// Report new events and return number of events seen so far in the current interval.
pub fn observe<T: Hash>(&self, key: &T, events: isize) -> isize {
self.maybe_reset();
self.current(self.red_or_blue()).incr(key, events)
}
// reset if needed, return the time since last reset for other fn to use
fn maybe_reset(&self) -> u64 {
// should be short enough not to overflow
let now = Instant::now().duration_since(self.start).as_millis() as u64;
let last_reset = self.last_reset_time.load(Ordering::SeqCst);
let past_ms = now - last_reset;
if past_ms < self.reset_interval_ms {
// no need to reset
return past_ms;
}
let red_or_blue = self.red_or_blue();
match self.last_reset_time.compare_exchange(
last_reset,
now,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => {
// first clear the previous slot
self.previous(red_or_blue).reset();
// then flip the flag to tell others to use the reset slot
self.red_or_blue.store(!red_or_blue, Ordering::SeqCst);
// if current time is beyond 2 intervals, the data stored in the previous slot
// is also stale, we should clear that too
if now - last_reset >= self.reset_interval_ms * 2 {
// Note that this is the previous one now because we just flipped self.red_or_blue
self.current(red_or_blue).reset();
}
}
Err(new) => {
// another thread beats us to it
assert!(new >= now - 1000); // double check that the new timestamp looks right
}
}
past_ms
}
/// Get the current rate as calculated with the given closure. This closure
/// will take an argument containing all the accessible information about
/// the rate from this object and allow the caller to make their own
/// estimation of rate based on:
///
/// 1. The accumulated samples in the current interval (in progress)
/// 2. The accumulated samples in the previous interval (completed)
/// 3. The size of the interval
/// 4. Elapsed fraction of current interval for this sample (0..1)
///
pub fn rate_with<F, T, K>(&self, key: &K, mut rate_calc_fn: F) -> T
where
F: FnMut(RateComponents) -> T,
K: Hash,
{
let past_ms = self.maybe_reset();
let (prev_samples, curr_samples) = if past_ms >= self.reset_interval_ms * 2 {
// already missed 2 intervals, no data, just report 0 as a short cut
(0, 0)
} else if past_ms >= self.reset_interval_ms {
(self.previous(self.red_or_blue()).get(key), 0)
} else {
let (prev_est, curr_est) = if self.red_or_blue() {
(&self.blue_slot, &self.red_slot)
} else {
(&self.red_slot, &self.blue_slot)
};
(prev_est.get(key), curr_est.get(key))
};
rate_calc_fn(RateComponents {
interval: self.interval,
prev_samples,
curr_samples,
current_interval_fraction: (past_ms % self.reset_interval_ms) as f64
/ self.reset_interval_ms as f64,
})
}
}
#[cfg(test)]
mod tests {
use float_cmp::assert_approx_eq;
use super::*;
use std::thread::sleep;
use std::time::Duration;
#[test]
fn test_observe_rate() {
let r = Rate::new(Duration::from_secs(1));
let key = 1;
// second: 0
let observed = r.observe(&key, 3);
assert_eq!(observed, 3);
let observed = r.observe(&key, 2);
assert_eq!(observed, 5);
assert_eq!(r.rate(&key), 0f64); // no estimation yet because the interval has not passed
// second: 1
sleep(Duration::from_secs(1));
let observed = r.observe(&key, 4);
assert_eq!(observed, 4);
assert_eq!(r.rate(&key), 5f64); // 5 rps
// second: 2
sleep(Duration::from_secs(1));
assert_eq!(r.rate(&key), 4f64);
// second: 3
sleep(Duration::from_secs(1));
assert_eq!(r.rate(&key), 0f64); // no event observed in the past 2 seconds
}
/// Assertion that 2 numbers are close within a generous margin. These
/// tests are doing a lot of literal sleeping, so the measured results
/// can't be accurate or consistent. This function does an assert with a
/// generous tolerance
fn assert_eq_ish(left: f64, right: f64) {
assert_approx_eq!(f64, left, right, epsilon = 0.15)
}
#[test]
fn test_observe_rate_custom_90_10() {
let r = Rate::new(Duration::from_secs(1));
let key = 1;
let rate_90_10_fn = |rate_info: RateComponents| {
let prev = rate_info.prev_samples as f64;
let curr = rate_info.curr_samples as f64;
(prev * 0.1 + curr * 0.9) / rate_info.interval.as_secs_f64()
};
// second: 0
let observed = r.observe(&key, 3);
assert_eq!(observed, 3);
let observed = r.observe(&key, 2);
assert_eq!(observed, 5);
assert_eq!(r.rate_with(&key, rate_90_10_fn), 5. * 0.9);
// second: 1
sleep(Duration::from_secs(1));
let observed = r.observe(&key, 4);
assert_eq!(observed, 4);
assert_eq!(r.rate_with(&key, rate_90_10_fn), 5. * 0.1 + 4. * 0.9);
// second: 2
sleep(Duration::from_secs(1));
assert_eq!(r.rate_with(&key, rate_90_10_fn), 4. * 0.1);
// second: 3
sleep(Duration::from_secs(1));
assert_eq!(r.rate_with(&key, rate_90_10_fn), 0f64);
}
#[test]
fn test_observe_rate_custom_proportional() {
let r = Rate::new(Duration::from_secs(1));
let key = 1;
// second: 0
let observed = r.observe(&key, 3);
assert_eq!(observed, 3);
let observed = r.observe(&key, 2);
assert_eq!(observed, 5);
assert_eq_ish(r.rate_with(&key, PROPORTIONAL_RATE_ESTIMATE_CALC_FN), 5.);
// second 0.5
sleep(Duration::from_secs_f64(0.5));
assert_eq_ish(r.rate_with(&key, PROPORTIONAL_RATE_ESTIMATE_CALC_FN), 5.);
// rate() just looks at the previous interval, ignores current interval
assert_eq_ish(r.rate(&key), 0.);
// second: 1
sleep(Duration::from_secs_f64(0.5));
let observed = r.observe(&key, 4);
assert_eq!(observed, 4);
assert_eq_ish(r.rate_with(&key, PROPORTIONAL_RATE_ESTIMATE_CALC_FN), 9.);
// second 1.75
sleep(Duration::from_secs_f64(0.75));
assert_eq_ish(
r.rate_with(&key, PROPORTIONAL_RATE_ESTIMATE_CALC_FN),
5. * 0.25 + 4.,
);
// second: 2
sleep(Duration::from_secs_f64(0.25));
assert_eq_ish(r.rate_with(&key, PROPORTIONAL_RATE_ESTIMATE_CALC_FN), 4.);
assert_eq_ish(r.rate(&key), 4.);
// second: 2.5
sleep(Duration::from_secs_f64(0.5));
assert_eq_ish(
r.rate_with(&key, PROPORTIONAL_RATE_ESTIMATE_CALC_FN),
4. / 2.,
);
assert_eq_ish(r.rate(&key), 4.);
// second: 3
sleep(Duration::from_secs(1));
assert_eq!(r.rate_with(&key, PROPORTIONAL_RATE_ESTIMATE_CALC_FN), 0f64);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-limits/benches/benchmark.rs | pingora-limits/benches/benchmark.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "dhat-heap")]
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
use ahash::RandomState;
use dashmap::DashMap;
use pingora_limits::estimator::Estimator;
use rand::distributions::Uniform;
use rand::{thread_rng, Rng};
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::sync::Mutex;
use std::thread;
use std::time::Instant;
trait Counter {
fn incr(&self, key: u32, value: usize);
fn name() -> &'static str;
}
#[derive(Default)]
struct NaiveCounter(Mutex<HashMap<u32, usize>>);
impl Counter for NaiveCounter {
fn incr(&self, key: u32, value: usize) {
let mut map = self.0.lock().unwrap();
if let Some(v) = map.get_mut(&key) {
*v += value;
} else {
map.insert(key, value);
}
}
fn name() -> &'static str {
"Naive Counter"
}
}
#[derive(Default)]
struct OptimizedCounter(DashMap<u32, AtomicUsize, RandomState>);
impl Counter for OptimizedCounter {
fn incr(&self, key: u32, value: usize) {
if let Some(v) = self.0.get(&key) {
v.fetch_add(value, Ordering::Relaxed);
return;
}
self.0.insert(key, AtomicUsize::new(value));
}
fn name() -> &'static str {
"Optimized Counter"
}
}
impl Counter for Estimator {
fn incr(&self, key: u32, value: usize) {
self.incr(key, value as isize);
}
fn name() -> &'static str {
"Pingora Estimator"
}
}
fn run_bench<T: Counter>(
counter: &T,
samples: usize,
distribution: &Uniform<u32>,
test_name: &str,
) {
let mut rng = thread_rng();
let before = Instant::now();
for _ in 0..samples {
let event: u32 = rng.sample(distribution);
counter.incr(event, 1);
}
let elapsed = before.elapsed();
println!(
"{} {test_name} {:?} total, {:?} avg per operation",
T::name(),
elapsed,
elapsed / samples as u32
);
}
fn run_threaded_bench<T: Counter + Send + Sync + 'static>(
threads: usize,
counter: Arc<T>,
samples: usize,
distribution: &Uniform<u32>,
) {
let mut handlers = vec![];
for i in 0..threads {
let est = counter.clone();
let dist = *distribution;
let handler = thread::spawn(move || {
run_bench(est.as_ref(), samples, &dist, &format!("thread#{i}"));
});
handlers.push(handler);
}
for thread in handlers {
thread.join().unwrap();
}
}
/*
Pingora Estimator single thread 1.042849543s total, 10ns avg per operation
Naive Counter single thread 5.12641496s total, 51ns avg per operation
Optimized Counter single thread 4.302553352s total, 43ns avg per operation
Pingora Estimator thread#7 2.654667606s total, 212ns avg per operation
Pingora Estimator thread#2 2.65651993s total, 212ns avg per operation
Pingora Estimator thread#4 2.658225266s total, 212ns avg per operation
Pingora Estimator thread#0 2.660603361s total, 212ns avg per operation
Pingora Estimator thread#1 2.66139014s total, 212ns avg per operation
Pingora Estimator thread#6 2.663498849s total, 213ns avg per operation
Pingora Estimator thread#5 2.663344276s total, 213ns avg per operation
Pingora Estimator thread#3 2.664652951s total, 213ns avg per operation
Naive Counter thread#7 18.795881242s total, 1.503µs avg per operation
Naive Counter thread#1 18.805652672s total, 1.504µs avg per operation
Naive Counter thread#6 18.818084416s total, 1.505µs avg per operation
Naive Counter thread#4 18.832778982s total, 1.506µs avg per operation
Naive Counter thread#3 18.833952715s total, 1.506µs avg per operation
Naive Counter thread#2 18.837975133s total, 1.507µs avg per operation
Naive Counter thread#0 18.8397464s total, 1.507µs avg per operation
Naive Counter thread#5 18.842616299s total, 1.507µs avg per operation
Optimized Counter thread#4 2.650860314s total, 212ns avg per operation
Optimized Counter thread#0 2.651867013s total, 212ns avg per operation
Optimized Counter thread#2 2.656473381s total, 212ns avg per operation
Optimized Counter thread#5 2.657715876s total, 212ns avg per operation
Optimized Counter thread#1 2.658275111s total, 212ns avg per operation
Optimized Counter thread#7 2.658770751s total, 212ns avg per operation
Optimized Counter thread#6 2.659831251s total, 212ns avg per operation
Optimized Counter thread#3 2.664375398s total, 213ns avg per operation
*/
/* cargo bench --features dhat-heap for memory info
Pingora Estimator single thread 1.066846098s total, 10ns avg per operation
dhat: Total: 26,184 bytes in 9 blocks
dhat: At t-gmax: 26,184 bytes in 9 blocks
dhat: At t-end: 1,464 bytes in 5 blocks
dhat: The data has been saved to dhat-heap.json, and is viewable with dhat/dh_view.html
Naive Counter single thread 5.429089242s total, 54ns avg per operation
dhat: Total: 71,303,260 bytes in 20 blocks
dhat: At t-gmax: 53,477,392 bytes in 2 blocks
dhat: At t-end: 0 bytes in 0 blocks
dhat: The data has been saved to dhat-heap.json, and is viewable with dhat/dh_view.html
Optimized Counter single thread 4.361720355s total, 43ns avg per operation
dhat: Total: 71,307,722 bytes in 491 blocks
dhat: At t-gmax: 36,211,208 bytes in 34 blocks
dhat: At t-end: 0 bytes in 0 blocks
dhat: The data has been saved to dhat-heap.json, and is viewable with dhat/dh_view.html
*/
fn main() {
const SAMPLES: usize = 100_000_000;
const THREADS: usize = 8;
const ITEMS: u32 = 1_000_000;
const SAMPLES_PER_THREAD: usize = SAMPLES / THREADS;
let distribution = Uniform::new(0, ITEMS);
// single thread
{
#[cfg(feature = "dhat-heap")]
let _profiler = dhat::Profiler::new_heap();
let pingora_est = Estimator::new(3, 1024);
run_bench(&pingora_est, SAMPLES, &distribution, "single thread");
}
{
#[cfg(feature = "dhat-heap")]
let _profiler = dhat::Profiler::new_heap();
let naive: NaiveCounter = Default::default();
run_bench(&naive, SAMPLES, &distribution, "single thread");
}
{
#[cfg(feature = "dhat-heap")]
let _profiler = dhat::Profiler::new_heap();
let optimized: OptimizedCounter = Default::default();
run_bench(&optimized, SAMPLES, &distribution, "single thread");
}
// multithread
let pingora_est = Arc::new(Estimator::new(3, 1024));
run_threaded_bench(THREADS, pingora_est, SAMPLES_PER_THREAD, &distribution);
let naive: Arc<NaiveCounter> = Default::default();
run_threaded_bench(THREADS, naive, SAMPLES_PER_THREAD, &distribution);
let optimized: Arc<OptimizedCounter> = Default::default();
run_threaded_bench(THREADS, optimized, SAMPLES_PER_THREAD, &distribution);
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-memory-cache/src/lib.rs | pingora-memory-cache/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use ahash::RandomState;
use std::borrow::Borrow;
use std::hash::Hash;
use std::marker::PhantomData;
use std::time::{Duration, Instant};
use tinyufo::TinyUfo;
mod read_through;
pub use read_through::{Lookup, MultiLookup, RTCache};
#[derive(Debug, PartialEq, Eq)]
/// [CacheStatus] indicates the response type for a query.
pub enum CacheStatus {
/// The key was found in the cache
Hit,
/// The key was not found.
Miss,
/// The key was found but it was expired.
Expired,
/// The key was not initially found but was found after awaiting a lock.
LockHit,
/// The returned value was expired but still returned. The [Duration] is
/// how long it has been since its expiration time.
Stale(Duration),
}
impl CacheStatus {
/// Return the string representation for [CacheStatus].
pub fn as_str(&self) -> &str {
match self {
Self::Hit => "hit",
Self::Miss => "miss",
Self::Expired => "expired",
Self::LockHit => "lock_hit",
Self::Stale(_) => "stale",
}
}
/// Returns whether this status represents a cache hit.
pub fn is_hit(&self) -> bool {
match self {
CacheStatus::Hit | CacheStatus::LockHit | CacheStatus::Stale(_) => true,
CacheStatus::Miss | CacheStatus::Expired => false,
}
}
/// Returns the stale duration if any
pub fn stale(&self) -> Option<Duration> {
match self {
CacheStatus::Stale(time) => Some(*time),
_ => None,
}
}
}
#[derive(Debug, Clone)]
struct Node<T: Clone> {
pub value: T,
expire_on: Option<Instant>,
}
impl<T: Clone> Node<T> {
fn new(value: T, ttl: Option<Duration>) -> Self {
let expire_on = match ttl {
Some(t) => Instant::now().checked_add(t),
None => None,
};
Node { value, expire_on }
}
fn will_expire_at(&self, time: &Instant) -> bool {
self.stale_duration(time).is_some()
}
fn is_expired(&self) -> bool {
self.will_expire_at(&Instant::now())
}
fn stale_duration(&self, time: &Instant) -> Option<Duration> {
let expire_time = self.expire_on?;
if &expire_time <= time {
Some(time.duration_since(expire_time))
} else {
None
}
}
}
/// A high performant in-memory cache with S3-FIFO + TinyLFU
pub struct MemoryCache<K: Hash, T: Clone> {
store: TinyUfo<u64, Node<T>>,
_key_type: PhantomData<K>,
pub(crate) hasher: RandomState,
}
impl<K: Hash, T: Clone + Send + Sync + 'static> MemoryCache<K, T> {
/// Create a new [MemoryCache] with the given size.
pub fn new(size: usize) -> Self {
MemoryCache {
store: TinyUfo::new(size, size),
_key_type: PhantomData,
hasher: RandomState::new(),
}
}
/// Fetch the key and return its value in addition to a [CacheStatus].
pub fn get<Q>(&self, key: &Q) -> (Option<T>, CacheStatus)
where
K: Borrow<Q>,
Q: Hash + ?Sized,
{
let hashed_key = self.hasher.hash_one(key);
if let Some(n) = self.store.get(&hashed_key) {
if !n.is_expired() {
(Some(n.value), CacheStatus::Hit)
} else {
(None, CacheStatus::Expired)
}
} else {
(None, CacheStatus::Miss)
}
}
/// Similar to [Self::get], fetch the key and return its value in addition to a
/// [CacheStatus] but also return the value even if it is expired. When the
/// value is expired, the [Duration] of how long it has been stale will
/// also be returned.
pub fn get_stale<Q>(&self, key: &Q) -> (Option<T>, CacheStatus)
where
K: Borrow<Q>,
Q: Hash + ?Sized,
{
let hashed_key = self.hasher.hash_one(key);
if let Some(n) = self.store.get(&hashed_key) {
let stale_duration = n.stale_duration(&Instant::now());
if let Some(stale_duration) = stale_duration {
(Some(n.value), CacheStatus::Stale(stale_duration))
} else {
(Some(n.value), CacheStatus::Hit)
}
} else {
(None, CacheStatus::Miss)
}
}
/// Insert a key and value pair with an optional TTL into the cache.
///
/// An item with zero TTL of zero will not be inserted.
pub fn put<Q>(&self, key: &Q, value: T, ttl: Option<Duration>)
where
K: Borrow<Q>,
Q: Hash + ?Sized,
{
if let Some(t) = ttl {
if t.is_zero() {
return;
}
}
let hashed_key = self.hasher.hash_one(key);
let node = Node::new(value, ttl);
// weight is always 1 for now
self.store.put(hashed_key, node, 1);
}
/// Remove a key from the cache if it exists.
pub fn remove<Q>(&self, key: &Q)
where
K: Borrow<Q>,
Q: Hash + ?Sized,
{
let hashed_key = self.hasher.hash_one(key);
self.store.remove(&hashed_key);
}
pub(crate) fn force_put(&self, key: &K, value: T, ttl: Option<Duration>) {
if let Some(t) = ttl {
if t.is_zero() {
return;
}
}
let hashed_key = self.hasher.hash_one(key);
let node = Node::new(value, ttl);
// weight is always 1 for now
self.store.force_put(hashed_key, node, 1);
}
/// This is equivalent to [MemoryCache::get] but for an arbitrary amount of keys.
pub fn multi_get<'a, I, Q>(&self, keys: I) -> Vec<(Option<T>, CacheStatus)>
where
I: Iterator<Item = &'a Q>,
Q: Hash + ?Sized + 'a,
K: Borrow<Q> + 'a,
{
let mut resp = Vec::with_capacity(keys.size_hint().0);
for key in keys {
resp.push(self.get(key));
}
resp
}
/// Same as [MemoryCache::multi_get] but returns the keys that are missing from the cache.
pub fn multi_get_with_miss<'a, I, Q>(
&self,
keys: I,
) -> (Vec<(Option<T>, CacheStatus)>, Vec<&'a Q>)
where
I: Iterator<Item = &'a Q>,
Q: Hash + ?Sized + 'a,
K: Borrow<Q> + 'a,
{
let mut resp = Vec::with_capacity(keys.size_hint().0);
let mut missed = Vec::with_capacity(keys.size_hint().0 / 2);
for key in keys {
let (lookup, cache_status) = self.get(key);
if lookup.is_none() {
missed.push(key);
}
resp.push((lookup, cache_status));
}
(resp, missed)
}
// TODO: evict expired first
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread::sleep;
#[test]
fn test_get() {
let cache: MemoryCache<i32, ()> = MemoryCache::new(10);
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
}
#[test]
fn test_put_get() {
let cache: MemoryCache<i32, i32> = MemoryCache::new(10);
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
cache.put(&1, 2, None);
let (res, hit) = cache.get(&1);
assert_eq!(res.unwrap(), 2);
assert_eq!(hit, CacheStatus::Hit);
}
#[test]
fn test_put_get_remove() {
let cache: MemoryCache<i32, i32> = MemoryCache::new(10);
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
cache.put(&1, 2, None);
cache.put(&3, 4, None);
cache.put(&5, 6, None);
let (res, hit) = cache.get(&1);
assert_eq!(res.unwrap(), 2);
assert_eq!(hit, CacheStatus::Hit);
cache.remove(&1);
cache.remove(&3);
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
let (res, hit) = cache.get(&3);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
let (res, hit) = cache.get(&5);
assert_eq!(res.unwrap(), 6);
assert_eq!(hit, CacheStatus::Hit);
}
#[test]
fn test_get_expired() {
let cache: MemoryCache<i32, i32> = MemoryCache::new(10);
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
cache.put(&1, 2, Some(Duration::from_secs(1)));
sleep(Duration::from_millis(1100));
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Expired);
}
#[test]
fn test_get_stale() {
let cache: MemoryCache<i32, i32> = MemoryCache::new(10);
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
cache.put(&1, 2, Some(Duration::from_secs(1)));
sleep(Duration::from_millis(1100));
let (res, hit) = cache.get_stale(&1);
assert_eq!(res.unwrap(), 2);
// we slept 1100ms and the ttl is 1000ms
assert!(hit.stale().unwrap() >= Duration::from_millis(100));
}
#[test]
fn test_eviction() {
let cache: MemoryCache<i32, i32> = MemoryCache::new(2);
cache.put(&1, 2, None);
cache.put(&2, 4, None);
cache.put(&3, 6, None);
let (res, hit) = cache.get(&1);
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
let (res, hit) = cache.get(&2);
assert_eq!(res.unwrap(), 4);
assert_eq!(hit, CacheStatus::Hit);
let (res, hit) = cache.get(&3);
assert_eq!(res.unwrap(), 6);
assert_eq!(hit, CacheStatus::Hit);
}
#[test]
fn test_multi_get() {
let cache: MemoryCache<i32, i32> = MemoryCache::new(10);
cache.put(&2, -2, None);
let keys: Vec<i32> = vec![1, 2, 3];
let resp = cache.multi_get(keys.iter());
assert_eq!(resp[0].0, None);
assert_eq!(resp[0].1, CacheStatus::Miss);
assert_eq!(resp[1].0.unwrap(), -2);
assert_eq!(resp[1].1, CacheStatus::Hit);
assert_eq!(resp[2].0, None);
assert_eq!(resp[2].1, CacheStatus::Miss);
let (resp, missed) = cache.multi_get_with_miss(keys.iter());
assert_eq!(resp[0].0, None);
assert_eq!(resp[0].1, CacheStatus::Miss);
assert_eq!(resp[1].0.unwrap(), -2);
assert_eq!(resp[1].1, CacheStatus::Hit);
assert_eq!(resp[2].0, None);
assert_eq!(resp[2].1, CacheStatus::Miss);
assert_eq!(missed[0], &1);
assert_eq!(missed[1], &3);
}
#[test]
fn test_get_with_mismatched_key() {
let cache: MemoryCache<String, ()> = MemoryCache::new(10);
let (res, hit) = cache.get("Hello");
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
}
#[test]
fn test_put_get_with_mismatched_key() {
let cache: MemoryCache<String, i32> = MemoryCache::new(10);
let (res, hit) = cache.get("1");
assert_eq!(res, None);
assert_eq!(hit, CacheStatus::Miss);
cache.put("1", 2, None);
let (res, hit) = cache.get("1");
assert_eq!(res.unwrap(), 2);
assert_eq!(hit, CacheStatus::Hit);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-memory-cache/src/read_through.rs | pingora-memory-cache/src/read_through.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An async read through cache where cache misses are populated via the provided
//! async callback.
use super::{CacheStatus, MemoryCache};
use async_trait::async_trait;
use log::warn;
use parking_lot::RwLock;
use pingora_error::{Error, ErrorTrait};
use std::collections::HashMap;
use std::hash::Hash;
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::Semaphore;
struct CacheLock {
pub lock_start: Instant,
pub lock: Semaphore,
}
impl CacheLock {
pub fn new_arc() -> Arc<Self> {
Arc::new(CacheLock {
lock: Semaphore::new(0),
lock_start: Instant::now(),
})
}
pub fn too_old(&self, age: Option<&Duration>) -> bool {
match age {
Some(t) => Instant::now() - self.lock_start > *t,
None => false,
}
}
}
#[async_trait]
/// [Lookup] defines the caching behavior that the implementor needs. The `extra` field can be used
/// to define any additional metadata that the implementor uses to determine cache eligibility.
///
/// # Examples
///
/// ```ignore
/// use pingora_error::{ErrorTrait, Result};
/// use std::time::Duration;
///
/// struct MyLookup;
///
/// impl Lookup<usize, usize, ()> for MyLookup {
/// async fn lookup(
/// &self,
/// _key: &usize,
/// extra: Option<&()>,
/// ) -> Result<(usize, Option<Duration>), Box<dyn ErrorTrait + Send + Sync>> {
/// // Define your business logic here.
/// Ok(1, None)
/// }
/// }
/// ```
pub trait Lookup<K, T, S> {
/// Return a value and an optional TTL for the given key.
async fn lookup(
key: &K,
extra: Option<&S>,
) -> Result<(T, Option<Duration>), Box<dyn ErrorTrait + Send + Sync>>
where
K: 'async_trait,
S: 'async_trait;
}
#[async_trait]
/// [MultiLookup] is similar to [Lookup]. Implement this trait if the system being queried support
/// looking up multiple keys in a single API call.
pub trait MultiLookup<K, T, S> {
/// Like [Lookup::lookup] but for an arbitrary amount of keys.
async fn multi_lookup(
keys: &[&K],
extra: Option<&S>,
) -> Result<Vec<(T, Option<Duration>)>, Box<dyn ErrorTrait + Send + Sync>>
where
K: 'async_trait,
S: 'async_trait;
}
const LOOKUP_ERR_MSG: &str = "RTCache: lookup error";
/// A read-through in-memory cache on top of [MemoryCache]
///
/// Instead of providing a `put` function, [RTCache] requires a type which implements [Lookup] to
/// be automatically called during cache miss to populate the cache. This is useful when trying to
/// cache queries to external system such as DNS or databases.
///
/// Lookup coalescing is provided so that multiple concurrent lookups for the same key results
/// only in one lookup callback.
pub struct RTCache<K, T, CB, S>
where
K: Hash + Send,
T: Clone + Send,
{
inner: MemoryCache<K, T>,
_callback: PhantomData<CB>,
lockers: RwLock<HashMap<u64, Arc<CacheLock>>>,
lock_age: Option<Duration>,
lock_timeout: Option<Duration>,
phantom: PhantomData<S>,
}
impl<K, T, CB, S> RTCache<K, T, CB, S>
where
K: Hash + Send,
T: Clone + Send + Sync + 'static,
{
/// Create a new [RTCache] of given size. `lock_age` defines how long a lock is valid for.
/// `lock_timeout` is used to stop a lookup from holding on to the key for too long.
pub fn new(size: usize, lock_age: Option<Duration>, lock_timeout: Option<Duration>) -> Self {
RTCache {
inner: MemoryCache::new(size),
lockers: RwLock::new(HashMap::new()),
_callback: PhantomData,
lock_age,
lock_timeout,
phantom: PhantomData,
}
}
}
impl<K, T, CB, S> RTCache<K, T, CB, S>
where
K: Hash + Send,
T: Clone + Send + Sync + 'static,
CB: Lookup<K, T, S>,
{
/// Query the cache for a given value. If it exists and no TTL is configured initially, it will
/// use the `ttl` value given.
pub async fn get(
&self,
key: &K,
ttl: Option<Duration>,
extra: Option<&S>,
) -> (Result<T, Box<Error>>, CacheStatus) {
let (result, cache_state) = self.inner.get(key);
if let Some(result) = result {
/* cache hit */
return (Ok(result), cache_state);
}
let hashed_key = self.inner.hasher.hash_one(key);
/* Cache miss, try to lock the lookup. Check if there is already a lookup */
let my_lock = {
let lockers = self.lockers.read();
/* clone the Arc */
lockers.get(&hashed_key).cloned()
}; // read lock dropped
/* try insert a cache lock into locker */
let (my_write, my_read) = match my_lock {
// TODO: use a union
Some(lock) => {
/* There is an ongoing lookup to the same key */
if lock.too_old(self.lock_age.as_ref()) {
(None, None)
} else {
(None, Some(lock))
}
}
None => {
let mut lockers = self.lockers.write();
match lockers.get(&hashed_key) {
Some(lock) => {
/* another lookup to the same key got the write lock to locker first */
if lock.too_old(self.lock_age.as_ref()) {
(None, None)
} else {
(None, Some(lock.clone()))
}
}
None => {
let new_lock = CacheLock::new_arc();
let new_lock2 = new_lock.clone();
lockers.insert(hashed_key, new_lock2);
(Some(new_lock), None)
}
} // write lock dropped
}
};
if let Some(my_lock) = my_read {
/* another task will do the lookup */
/* if available_permits > 0, writer is done */
if my_lock.lock.available_permits() == 0 {
/* block here to wait for writer to finish lookup */
let lock_fut = my_lock.lock.acquire();
let timed_out = match self.lock_timeout {
Some(t) => pingora_timeout::timeout(t, lock_fut).await.is_err(),
None => {
let _ = lock_fut.await;
false
}
};
if timed_out {
let value = CB::lookup(key, extra).await;
return match value {
Ok((v, _ttl)) => (Ok(v), cache_state),
Err(e) => {
let mut err = Error::new_str(LOOKUP_ERR_MSG);
err.set_cause(e);
(Err(err), cache_state)
}
};
}
} // permit returned here
let (result, cache_state) = self.inner.get(key);
if let Some(result) = result {
/* cache lock hit, slow as a miss */
(Ok(result), CacheStatus::LockHit)
} else {
/* probably error happen during the actual lookup */
warn!(
"RTCache: no result after read lock, cache status: {:?}",
cache_state
);
match CB::lookup(key, extra).await {
Ok((v, new_ttl)) => {
self.inner.force_put(key, v.clone(), new_ttl.or(ttl));
(Ok(v), cache_state)
}
Err(e) => {
let mut err = Error::new_str(LOOKUP_ERR_MSG);
err.set_cause(e);
(Err(err), cache_state)
}
}
}
} else {
/* this one will do the look up, either because it gets the write lock or the read
* lock age is reached */
let value = CB::lookup(key, extra).await;
let ret = match value {
Ok((v, new_ttl)) => {
/* Don't put() if lock ago too old, to avoid too many concurrent writes */
if my_write.is_some() {
self.inner.force_put(key, v.clone(), new_ttl.or(ttl));
}
(Ok(v), cache_state) // the original cache_state: Miss or Expired
}
Err(e) => {
let mut err = Error::new_str(LOOKUP_ERR_MSG);
err.set_cause(e);
(Err(err), cache_state)
}
};
if let Some(my_write) = my_write {
/* add permit so that reader can start. Any number of permits will do,
* since readers will return permits right away. */
my_write.lock.add_permits(10);
{
// remove the lock from locker
let mut lockers = self.lockers.write();
lockers.remove(&hashed_key);
} // write lock dropped here
}
ret
}
}
/// Similar to [Self::get], query the cache for a given value, but also returns the value even if the
/// value is expired up to `stale_ttl`. If it is a cache miss or the value is stale more than
/// the `stale_ttl`, a lookup will be performed to populate the cache.
pub async fn get_stale(
&self,
key: &K,
ttl: Option<Duration>,
extra: Option<&S>,
stale_ttl: Duration,
) -> (Result<T, Box<Error>>, CacheStatus) {
let (result, cache_status) = self.inner.get_stale(key);
if let Some(result) = result {
let stale_duration = cache_status.stale();
if stale_duration.unwrap_or(Duration::ZERO) <= stale_ttl {
return (Ok(result), cache_status);
}
}
let (res, status) = self.get(key, ttl, extra).await;
(res, status)
}
}
impl<K, T, CB, S> RTCache<K, T, CB, S>
where
K: Hash + Clone + Send + Sync,
T: Clone + Send + Sync + 'static,
S: Clone + Send + Sync,
CB: Lookup<K, T, S> + Sync + Send,
{
/// Similar to [Self::get_stale], but when it returns the stale value, it also initiates a lookup
/// in the background in order to refresh the value.
///
/// Note that this function requires the [RTCache] to be static, which can be done by wrapping
/// it with something like [once_cell::sync::Lazy].
///
/// [once_cell::sync::Lazy]: https://docs.rs/once_cell/latest/once_cell/sync/struct.Lazy.html
pub async fn get_stale_while_update(
&'static self,
key: &K,
ttl: Option<Duration>,
extra: Option<&S>,
stale_ttl: Duration,
) -> (Result<T, Box<Error>>, CacheStatus) {
let (result, cache_status) = self.get_stale(key, ttl, extra, stale_ttl).await;
let key = key.clone();
let extra = extra.cloned();
if cache_status.stale().is_some() {
tokio::spawn(async move {
let _ = self.get(&key, ttl, extra.as_ref()).await;
});
}
(result, cache_status)
}
}
impl<K, T, CB, S> RTCache<K, T, CB, S>
where
K: Hash + Send,
T: Clone + Send + Sync + 'static,
CB: MultiLookup<K, T, S>,
{
/// Same behavior as [RTCache::get] but for an arbitrary amount of keys.
///
/// If there are keys that are missing from the cache, `multi_lookup` is invoked to populate the
/// cache before returning the final results. This is useful if your type supports batch
/// queries.
///
/// To avoid dead lock for the same key across concurrent `multi_get` calls,
/// this function does not provide lookup coalescing.
pub async fn multi_get<'a, I>(
&self,
keys: I,
ttl: Option<Duration>,
extra: Option<&S>,
) -> Result<Vec<(T, CacheStatus)>, Box<Error>>
where
I: Iterator<Item = &'a K>,
K: 'a,
{
let size = keys.size_hint().0;
let (hits, misses) = self.inner.multi_get_with_miss(keys);
let mut final_results = Vec::with_capacity(size);
let miss_results = if !misses.is_empty() {
match CB::multi_lookup(&misses, extra).await {
Ok(miss_results) => {
// assert! here to prevent index panic when building results,
// final_results has the full list of misses but miss_results might not
assert!(
miss_results.len() == misses.len(),
"multi_lookup() failed to return the matching number of results"
);
/* put the misses into cache */
for item in misses.iter().zip(miss_results.iter()) {
self.inner
.force_put(item.0, (item.1).0.clone(), (item.1).1.or(ttl));
}
miss_results
}
Err(e) => {
/* NOTE: we give up the hits when encounter lookup error */
let mut err = Error::new_str(LOOKUP_ERR_MSG);
err.set_cause(e);
return Err(err);
}
}
} else {
vec![] // to make the rest code simple, allocating one unused empty vec should be fine
};
/* fill in final_result */
let mut n_miss = 0;
for item in hits {
match item.0 {
Some(v) => final_results.push((v, item.1)),
None => {
final_results // miss_results.len() === #None in result (asserted above)
.push((miss_results[n_miss].0.clone(), CacheStatus::Miss));
n_miss += 1;
}
}
}
Ok(final_results)
}
}
#[cfg(test)]
mod tests {
use super::*;
use atomic::AtomicI32;
use std::sync::atomic;
#[derive(Clone, Debug)]
struct ExtraOpt {
error: bool,
empty: bool,
delay_for: Option<Duration>,
used: Arc<AtomicI32>,
}
struct TestCB();
#[async_trait]
impl Lookup<i32, i32, ExtraOpt> for TestCB {
async fn lookup(
_key: &i32,
extra: Option<&ExtraOpt>,
) -> Result<(i32, Option<Duration>), Box<dyn ErrorTrait + Send + Sync>> {
// this function returns #lookup_times
let mut used = 0;
if let Some(e) = extra {
used = e.used.fetch_add(1, atomic::Ordering::Relaxed) + 1;
if e.error {
return Err(Error::new_str("test error"));
}
if let Some(delay_for) = e.delay_for {
tokio::time::sleep(delay_for).await;
}
}
Ok((used, None))
}
}
#[async_trait]
impl MultiLookup<i32, i32, ExtraOpt> for TestCB {
async fn multi_lookup(
keys: &[&i32],
extra: Option<&ExtraOpt>,
) -> Result<Vec<(i32, Option<Duration>)>, Box<dyn ErrorTrait + Send + Sync>> {
let mut resp = vec![];
if let Some(extra) = extra {
if extra.empty {
return Ok(resp);
}
}
for key in keys {
resp.push((**key, None));
}
Ok(resp)
}
}
#[tokio::test]
async fn test_basic_get() {
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
let opt = Some(ExtraOpt {
error: false,
empty: false,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let (res, hit) = cache.get(&1, None, opt.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Miss);
let (res, hit) = cache.get(&1, None, opt.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Hit);
}
#[tokio::test]
async fn test_basic_get_error() {
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
let opt1 = Some(ExtraOpt {
error: true,
empty: false,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let (res, hit) = cache.get(&-1, None, opt1.as_ref()).await;
assert!(res.is_err());
assert_eq!(hit, CacheStatus::Miss);
}
#[tokio::test]
async fn test_concurrent_get() {
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
let cache = Arc::new(cache);
let opt = Some(ExtraOpt {
error: false,
empty: false,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let cache_c = cache.clone();
let opt1 = opt.clone();
// concurrent gets, only 1 will call the callback
let t1 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt1.as_ref()).await;
res.unwrap()
});
let cache_c = cache.clone();
let opt2 = opt.clone();
let t2 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt2.as_ref()).await;
res.unwrap()
});
let opt3 = opt.clone();
let cache_c = cache.clone();
let t3 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt3.as_ref()).await;
res.unwrap()
});
let (r1, r2, r3) = tokio::join!(t1, t2, t3);
assert_eq!(r1.unwrap(), 1);
assert_eq!(r2.unwrap(), 1);
assert_eq!(r3.unwrap(), 1);
}
#[tokio::test]
async fn test_concurrent_get_error() {
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
let cache = Arc::new(cache);
let cache_c = cache.clone();
let opt1 = Some(ExtraOpt {
error: true,
empty: false,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let opt2 = opt1.clone();
let opt3 = opt1.clone();
// concurrent gets, only 1 will call the callback
let t1 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&-1, None, opt1.as_ref()).await;
res.is_err()
});
let cache_c = cache.clone();
let t2 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&-1, None, opt2.as_ref()).await;
res.is_err()
});
let cache_c = cache.clone();
let t3 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&-1, None, opt3.as_ref()).await;
res.is_err()
});
let (r1, r2, r3) = tokio::join!(t1, t2, t3);
assert!(r1.unwrap());
assert!(r2.unwrap());
assert!(r3.unwrap());
}
#[tokio::test]
async fn test_concurrent_get_different_value() {
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
let cache = Arc::new(cache);
let opt1 = Some(ExtraOpt {
error: false,
empty: false,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let opt2 = opt1.clone();
let opt3 = opt1.clone();
let cache_c = cache.clone();
// concurrent gets to different keys, no locks, all will call the cb
let t1 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt1.as_ref()).await;
res.unwrap()
});
let cache_c = cache.clone();
let t2 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&3, None, opt2.as_ref()).await;
res.unwrap()
});
let cache_c = cache.clone();
let t3 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&5, None, opt3.as_ref()).await;
res.unwrap()
});
let (r1, r2, r3) = tokio::join!(t1, t2, t3);
// 1 lookup + 2 lookups + 3 lookups, order not matter
assert_eq!(r1.unwrap() + r2.unwrap() + r3.unwrap(), 6);
}
#[tokio::test]
async fn test_get_lock_age() {
// 1 sec lock age
let cache: RTCache<i32, i32, TestCB, ExtraOpt> =
RTCache::new(10, Some(Duration::from_secs(1)), None);
let cache = Arc::new(cache);
let counter = Arc::new(AtomicI32::new(0));
let opt1 = Some(ExtraOpt {
error: false,
empty: false,
delay_for: Some(Duration::from_secs(2)),
used: counter.clone(),
});
let opt2 = Some(ExtraOpt {
error: false,
empty: false,
delay_for: None,
used: counter.clone(),
});
let opt3 = opt2.clone();
let cache_c = cache.clone();
// t1 will be delay for 2 sec
let t1 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt1.as_ref()).await;
res.unwrap()
});
// start t2 and t3 1.5 seconds later, since lock age is 1 sec, there will be no lock
tokio::time::sleep(Duration::from_secs_f32(1.5)).await;
let cache_c = cache.clone();
let t2 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt2.as_ref()).await;
res.unwrap()
});
let cache_c = cache.clone();
let t3 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt3.as_ref()).await;
res.unwrap()
});
let (r1, r2, r3) = tokio::join!(t1, t2, t3);
// 1 lookup + 2 lookups + 3 lookups, order not matter
assert_eq!(r1.unwrap() + r2.unwrap() + r3.unwrap(), 6);
}
#[tokio::test]
async fn test_get_lock_timeout() {
// 1 sec lock timeout
let cache: RTCache<i32, i32, TestCB, ExtraOpt> =
RTCache::new(10, None, Some(Duration::from_secs(1)));
let cache = Arc::new(cache);
let counter = Arc::new(AtomicI32::new(0));
let opt1 = Some(ExtraOpt {
error: false,
empty: false,
delay_for: Some(Duration::from_secs(2)),
used: counter.clone(),
});
let opt2 = Some(ExtraOpt {
error: false,
empty: false,
delay_for: None,
used: counter.clone(),
});
let opt3 = opt2.clone();
let cache_c = cache.clone();
// t1 will be delay for 2 sec
let t1 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt1.as_ref()).await;
res.unwrap()
});
// since lock timeout is 1 sec, t2 and t3 will do their own lookup after 1 sec
let cache_c = cache.clone();
let t2 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt2.as_ref()).await;
res.unwrap()
});
let cache_c = cache.clone();
let t3 = tokio::spawn(async move {
let (res, _hit) = cache_c.get(&1, None, opt3.as_ref()).await;
res.unwrap()
});
let (r1, r2, r3) = tokio::join!(t1, t2, t3);
// 1 lookup + 2 lookups + 3 lookups, order not matter
assert_eq!(r1.unwrap() + r2.unwrap() + r3.unwrap(), 6);
}
#[tokio::test]
async fn test_multi_get() {
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
let counter = Arc::new(AtomicI32::new(0));
let opt1 = Some(ExtraOpt {
error: false,
empty: false,
delay_for: Some(Duration::from_secs(2)),
used: counter.clone(),
});
// make 1 a hit first
let (res, hit) = cache.get(&1, None, opt1.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Miss);
let (res, hit) = cache.get(&1, None, opt1.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Hit);
// 1 hit 2 miss 3 miss
let resp = cache
.multi_get([1, 2, 3].iter(), None, opt1.as_ref())
.await
.unwrap();
assert_eq!(resp[0].0, 1);
assert_eq!(resp[0].1, CacheStatus::Hit);
assert_eq!(resp[1].0, 2);
assert_eq!(resp[1].1, CacheStatus::Miss);
assert_eq!(resp[2].0, 3);
assert_eq!(resp[2].1, CacheStatus::Miss);
// all hits after a fetch
let resp = cache
.multi_get([1, 2, 3].iter(), None, opt1.as_ref())
.await
.unwrap();
assert_eq!(resp[0].0, 1);
assert_eq!(resp[0].1, CacheStatus::Hit);
assert_eq!(resp[1].0, 2);
assert_eq!(resp[1].1, CacheStatus::Hit);
assert_eq!(resp[2].0, 3);
assert_eq!(resp[2].1, CacheStatus::Hit);
}
#[tokio::test]
#[should_panic(expected = "multi_lookup() failed to return the matching number of results")]
async fn test_inconsistent_miss_results() {
// force an empty result
let opt1 = Some(ExtraOpt {
error: false,
empty: true,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
cache
.multi_get([4, 5, 6].iter(), None, opt1.as_ref())
.await
.unwrap();
}
#[tokio::test]
async fn test_get_stale() {
let ttl = Some(Duration::from_millis(100));
let cache: RTCache<i32, i32, TestCB, ExtraOpt> = RTCache::new(10, None, None);
let opt = Some(ExtraOpt {
error: false,
empty: false,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let (res, hit) = cache.get(&1, ttl, opt.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Miss);
let (res, hit) = cache.get(&1, ttl, opt.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Hit);
tokio::time::sleep(Duration::from_millis(150)).await;
let (res, hit) = cache
.get_stale(&1, ttl, opt.as_ref(), Duration::from_millis(1000))
.await;
assert_eq!(res.unwrap(), 1);
assert!(hit.stale().is_some());
let (res, hit) = cache
.get_stale(&1, ttl, opt.as_ref(), Duration::from_millis(30))
.await;
assert_eq!(res.unwrap(), 2);
assert_eq!(hit, CacheStatus::Expired);
}
#[tokio::test]
async fn test_get_stale_while_update() {
use once_cell::sync::Lazy;
let ttl = Some(Duration::from_millis(100));
static CACHE: Lazy<RTCache<i32, i32, TestCB, ExtraOpt>> =
Lazy::new(|| RTCache::new(10, None, None));
let opt = Some(ExtraOpt {
error: false,
empty: false,
delay_for: None,
used: Arc::new(AtomicI32::new(0)),
});
let (res, hit) = CACHE.get(&1, ttl, opt.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Miss);
let (res, hit) = CACHE.get(&1, ttl, opt.as_ref()).await;
assert_eq!(res.unwrap(), 1);
assert_eq!(hit, CacheStatus::Hit);
tokio::time::sleep(Duration::from_millis(150)).await;
let (res, hit) = CACHE
.get_stale_while_update(&1, ttl, opt.as_ref(), Duration::from_millis(1000))
.await;
assert_eq!(res.unwrap(), 1);
assert!(hit.stale().is_some());
// allow the background lookup to finish
tokio::time::sleep(Duration::from_millis(10)).await;
let (res, hit) = CACHE.get(&1, ttl, opt.as_ref()).await;
assert_eq!(res.unwrap(), 2);
assert_eq!(hit, CacheStatus::Hit);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/predictor.rs | pingora-cache/src/predictor.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cacheability Predictor
use crate::hashtable::{ConcurrentLruCache, LruShard};
pub type CustomReasonPredicate = fn(&'static str) -> bool;
/// Cacheability Predictor
///
/// Remembers previously uncacheable assets.
/// Allows bypassing cache / cache lock early based on historical precedent.
///
/// NOTE: to simply avoid caching requests with certain characteristics,
/// add checks in request_cache_filter to avoid enabling cache in the first place.
/// The predictor's bypass mechanism handles cases where the request _looks_ cacheable
/// but its previous responses suggest otherwise. The request _could_ be cacheable in the future.
pub struct Predictor<const N_SHARDS: usize> {
uncacheable_keys: ConcurrentLruCache<(), N_SHARDS>,
skip_custom_reasons_fn: Option<CustomReasonPredicate>,
}
use crate::{key::CacheHashKey, CacheKey, NoCacheReason};
use log::debug;
/// The cache predictor trait.
///
/// This trait allows user defined predictor to replace [Predictor].
pub trait CacheablePredictor {
/// Return true if likely cacheable, false if likely not.
fn cacheable_prediction(&self, key: &CacheKey) -> bool;
/// Mark cacheable to allow next request to cache.
/// Returns false if the key was already marked cacheable.
fn mark_cacheable(&self, key: &CacheKey) -> bool;
/// Mark uncacheable to actively bypass cache on the next request.
/// May skip marking on certain NoCacheReasons.
/// Returns None if we skipped marking uncacheable.
/// Returns Some(false) if the key was already marked uncacheable.
fn mark_uncacheable(&self, key: &CacheKey, reason: NoCacheReason) -> Option<bool>;
}
// This particular bit of `where [LruShard...; N]: Default` nonsense arises from
// ConcurrentLruCache needing this trait bound, which in turns arises from the Rust
// compiler not being able to guarantee that all array sizes N implement `Default`.
// See https://github.com/rust-lang/rust/issues/61415
impl<const N_SHARDS: usize> Predictor<N_SHARDS>
where
[LruShard<()>; N_SHARDS]: Default,
{
/// Create a new Predictor with `N_SHARDS * shard_capacity` total capacity for
/// uncacheable cache keys.
///
/// - `shard_capacity`: defines number of keys remembered as uncacheable per LRU shard.
/// - `skip_custom_reasons_fn`: an optional predicate used in `mark_uncacheable`
/// that can customize which `Custom` `NoCacheReason`s ought to be remembered as uncacheable.
/// If the predicate returns true, then the predictor will skip remembering the current
/// cache key as uncacheable (and avoid bypassing cache on the next request).
pub fn new(
shard_capacity: usize,
skip_custom_reasons_fn: Option<CustomReasonPredicate>,
) -> Predictor<N_SHARDS> {
Predictor {
uncacheable_keys: ConcurrentLruCache::<(), N_SHARDS>::new(shard_capacity),
skip_custom_reasons_fn,
}
}
}
impl<const N_SHARDS: usize> CacheablePredictor for Predictor<N_SHARDS>
where
[LruShard<()>; N_SHARDS]: Default,
{
fn cacheable_prediction(&self, key: &CacheKey) -> bool {
// variance key is ignored because this check happens before cache lookup
let hash = key.primary_bin();
let key = u128::from_be_bytes(hash); // Endianness doesn't matter
// Note: LRU updated in mark_* functions only,
// as we assume the caller always updates the cacheability of the response later
!self.uncacheable_keys.read(key).contains(&key)
}
fn mark_cacheable(&self, key: &CacheKey) -> bool {
// variance key is ignored because cacheable_prediction() is called before cache lookup
// where the variance key is unknown
let hash = key.primary_bin();
let key = u128::from_be_bytes(hash);
let cache = self.uncacheable_keys.get(key);
if !cache.read().contains(&key) {
// not in uncacheable list, nothing to do
return true;
}
let mut cache = cache.write();
cache.pop(&key);
debug!("bypassed request became cacheable");
false
}
fn mark_uncacheable(&self, key: &CacheKey, reason: NoCacheReason) -> Option<bool> {
// only mark as uncacheable for the future on certain reasons,
// (e.g. InternalErrors)
use NoCacheReason::*;
match reason {
// CacheLockGiveUp: the writer will set OriginNotCache (if applicable)
// readers don't need to do it
NeverEnabled
| StorageError
| InternalError
| Deferred
| CacheLockGiveUp
| CacheLockTimeout
| DeclinedToUpstream
| UpstreamError
| PredictedResponseTooLarge => {
return None;
}
// Skip certain NoCacheReason::Custom according to user
Custom(reason) if self.skip_custom_reasons_fn.is_some_and(|f| f(reason)) => {
return None;
}
Custom(_) | OriginNotCache | ResponseTooLarge => { /* mark uncacheable for these only */
}
}
// variance key is ignored because cacheable_prediction() is called before cache lookup
// where the variance key is unknown
let hash = key.primary_bin();
let key = u128::from_be_bytes(hash);
let mut cache = self.uncacheable_keys.get(key).write();
// put() returns Some(old_value) if the key existed, else None
let new_key = cache.put(key, ()).is_none();
if new_key {
debug!("request marked uncacheable");
}
Some(new_key)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mark_cacheability() {
let predictor = Predictor::<1>::new(10, None);
let key = CacheKey::new("a", "b", "c");
// cacheable if no history
assert!(predictor.cacheable_prediction(&key));
// don't remember internal / storage errors
predictor.mark_uncacheable(&key, NoCacheReason::InternalError);
assert!(predictor.cacheable_prediction(&key));
predictor.mark_uncacheable(&key, NoCacheReason::StorageError);
assert!(predictor.cacheable_prediction(&key));
// origin explicitly said uncacheable
predictor.mark_uncacheable(&key, NoCacheReason::OriginNotCache);
assert!(!predictor.cacheable_prediction(&key));
// mark cacheable again
predictor.mark_cacheable(&key);
assert!(predictor.cacheable_prediction(&key));
}
#[test]
fn test_custom_skip_predicate() {
let predictor = Predictor::<1>::new(
10,
Some(|custom_reason| matches!(custom_reason, "Skipping")),
);
let key = CacheKey::new("a", "b", "c");
// cacheable if no history
assert!(predictor.cacheable_prediction(&key));
// custom predicate still uses default skip reasons
predictor.mark_uncacheable(&key, NoCacheReason::InternalError);
assert!(predictor.cacheable_prediction(&key));
// other custom reasons can still be marked uncacheable
predictor.mark_uncacheable(&key, NoCacheReason::Custom("DontCacheMe"));
assert!(!predictor.cacheable_prediction(&key));
let key = CacheKey::new("a", "c", "d");
assert!(predictor.cacheable_prediction(&key));
// specific custom reason is skipped
predictor.mark_uncacheable(&key, NoCacheReason::Custom("Skipping"));
assert!(predictor.cacheable_prediction(&key));
}
#[test]
fn test_mark_uncacheable_lru() {
let predictor = Predictor::<1>::new(3, None);
let key1 = CacheKey::new("a", "b", "c");
predictor.mark_uncacheable(&key1, NoCacheReason::OriginNotCache);
assert!(!predictor.cacheable_prediction(&key1));
let key2 = CacheKey::new("a", "bc", "c");
predictor.mark_uncacheable(&key2, NoCacheReason::OriginNotCache);
assert!(!predictor.cacheable_prediction(&key2));
let key3 = CacheKey::new("a", "cd", "c");
predictor.mark_uncacheable(&key3, NoCacheReason::OriginNotCache);
assert!(!predictor.cacheable_prediction(&key3));
// promote / reinsert key1
predictor.mark_uncacheable(&key1, NoCacheReason::OriginNotCache);
let key4 = CacheKey::new("a", "de", "c");
predictor.mark_uncacheable(&key4, NoCacheReason::OriginNotCache);
assert!(!predictor.cacheable_prediction(&key4));
// key 1 was recently used
assert!(!predictor.cacheable_prediction(&key1));
// key 2 was evicted
assert!(predictor.cacheable_prediction(&key2));
assert!(!predictor.cacheable_prediction(&key3));
assert!(!predictor.cacheable_prediction(&key4));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/hashtable.rs | pingora-cache/src/hashtable.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Concurrent hash tables and LRUs
use lru::LruCache;
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use std::collections::HashMap;
// There are probably off-the-shelf crates of this, DashMap?
/// A hash table that shards to a constant number of tables to reduce lock contention
#[derive(Debug)]
pub struct ConcurrentHashTable<V, const N: usize> {
tables: [RwLock<HashMap<u128, V>>; N],
}
#[inline]
fn get_shard(key: u128, n_shards: usize) -> usize {
(key % n_shards as u128) as usize
}
impl<V, const N: usize> ConcurrentHashTable<V, N>
where
[RwLock<HashMap<u128, V>>; N]: Default,
{
pub fn new() -> Self {
ConcurrentHashTable {
tables: Default::default(),
}
}
pub fn get(&self, key: u128) -> &RwLock<HashMap<u128, V>> {
&self.tables[get_shard(key, N)]
}
#[allow(dead_code)]
pub fn get_shard_at_idx(&self, idx: usize) -> Option<&RwLock<HashMap<u128, V>>> {
self.tables.get(idx)
}
#[allow(dead_code)]
pub fn read(&self, key: u128) -> RwLockReadGuard<'_, HashMap<u128, V>> {
self.get(key).read()
}
pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, HashMap<u128, V>> {
self.get(key).write()
}
#[allow(dead_code)]
pub fn for_each<F>(&self, mut f: F)
where
F: FnMut(&u128, &V),
{
for shard in &self.tables {
let guard = shard.read();
for (key, value) in guard.iter() {
f(key, value);
}
}
}
// TODO: work out the lifetimes to provide get/set directly
}
impl<V, const N: usize> Default for ConcurrentHashTable<V, N>
where
[RwLock<HashMap<u128, V>>; N]: Default,
{
fn default() -> Self {
Self::new()
}
}
#[doc(hidden)] // not need in public API
pub struct LruShard<V>(RwLock<LruCache<u128, V>>);
impl<V> Default for LruShard<V> {
fn default() -> Self {
// help satisfy default construction of arrays
LruShard(RwLock::new(LruCache::unbounded()))
}
}
/// Sharded concurrent data structure for LruCache
pub struct ConcurrentLruCache<V, const N: usize> {
lrus: [LruShard<V>; N],
}
impl<V, const N: usize> ConcurrentLruCache<V, N>
where
[LruShard<V>; N]: Default,
{
pub fn new(shard_capacity: usize) -> Self {
use std::num::NonZeroUsize;
// safe, 1 != 0
const ONE: NonZeroUsize = NonZeroUsize::new(1).unwrap();
let mut cache = ConcurrentLruCache {
lrus: Default::default(),
};
for lru in &mut cache.lrus {
lru.0
.write()
.resize(shard_capacity.try_into().unwrap_or(ONE));
}
cache
}
pub fn get(&self, key: u128) -> &RwLock<LruCache<u128, V>> {
&self.lrus[get_shard(key, N)].0
}
#[allow(dead_code)]
pub fn read(&self, key: u128) -> RwLockReadGuard<'_, LruCache<u128, V>> {
self.get(key).read()
}
pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, LruCache<u128, V>> {
self.get(key).write()
}
// TODO: work out the lifetimes to provide get/set directly
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/key.rs | pingora-cache/src/key.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cache key
use super::*;
use blake2::{Blake2b, Digest};
use http::Extensions;
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Formatter, Result as FmtResult};
// 16-byte / 128-bit key: large enough to avoid collision
const KEY_SIZE: usize = 16;
/// An 128 bit hash binary
pub type HashBinary = [u8; KEY_SIZE];
fn hex2str(hex: &[u8]) -> String {
use std::fmt::Write;
let mut s = String::with_capacity(KEY_SIZE * 2);
for c in hex {
write!(s, "{:02x}", c).unwrap(); // safe, just dump hex to string
}
s
}
/// Decode the hex str into [HashBinary].
///
/// Return `None` when the decode fails or the input is not exact 32 (to decode to 16 bytes).
pub fn str2hex(s: &str) -> Option<HashBinary> {
if s.len() != KEY_SIZE * 2 {
return None;
}
let mut output = [0; KEY_SIZE];
// no need to bubble the error, it should be obvious why the decode fails
hex::decode_to_slice(s.as_bytes(), &mut output).ok()?;
Some(output)
}
/// The trait for cache key
pub trait CacheHashKey {
/// Return the hash of the cache key
fn primary_bin(&self) -> HashBinary;
/// Return the variance hash of the cache key.
///
/// `None` if no variance.
fn variance_bin(&self) -> Option<HashBinary>;
/// Return the hash including both primary and variance keys
fn combined_bin(&self) -> HashBinary {
let key = self.primary_bin();
if let Some(v) = self.variance_bin() {
let mut hasher = Blake2b128::new();
hasher.update(key);
hasher.update(v);
hasher.finalize().into()
} else {
// if there is no variance, combined_bin should return the same as primary_bin
key
}
}
/// An extra tag for identifying users
///
/// For example, if the storage backend implements per user quota, this tag can be used.
fn user_tag(&self) -> &str;
/// The hex string of [Self::primary_bin()]
fn primary(&self) -> String {
hex2str(&self.primary_bin())
}
/// The hex string of [Self::variance_bin()]
fn variance(&self) -> Option<String> {
self.variance_bin().as_ref().map(|b| hex2str(&b[..]))
}
/// The hex string of [Self::combined_bin()]
fn combined(&self) -> String {
hex2str(&self.combined_bin())
}
}
/// General purpose cache key
#[derive(Debug, Clone)]
pub struct CacheKey {
// Namespace and primary fields are essentially strings,
// except they allow invalid UTF-8 sequences.
// These fields should be able to be hashed.
namespace: Vec<u8>,
primary: Vec<u8>,
primary_bin_override: Option<HashBinary>,
variance: Option<HashBinary>,
/// An extra tag for identifying users
///
/// For example, if the storage backend implements per user quota, this tag can be used.
pub user_tag: String,
/// Grab-bag for user-defined extensions. These will not be persisted to disk.
pub extensions: Extensions,
}
impl CacheKey {
/// Set the value of the variance hash
pub fn set_variance_key(&mut self, key: HashBinary) {
self.variance = Some(key)
}
/// Get the value of the variance hash
pub fn get_variance_key(&self) -> Option<&HashBinary> {
self.variance.as_ref()
}
/// Removes the variance from this cache key
pub fn remove_variance_key(&mut self) {
self.variance = None
}
/// Override the primary key hash
pub fn set_primary_bin_override(&mut self, key: HashBinary) {
self.primary_bin_override = Some(key)
}
/// Try to get primary key as UTF-8 str, if valid
pub fn primary_key_str(&self) -> Option<&str> {
std::str::from_utf8(&self.primary).ok()
}
/// Try to get namespace key as UTF-8 str, if valid
pub fn namespace_str(&self) -> Option<&str> {
std::str::from_utf8(&self.namespace).ok()
}
}
/// Storage optimized cache key to keep in memory or in storage
// 16 bytes + 8 bytes (+16 * u8) + user_tag.len() + 16 Bytes (Box<str>)
#[derive(Debug, Deserialize, Serialize, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct CompactCacheKey {
pub primary: HashBinary,
// save 8 bytes for non-variance but waste 8 bytes for variance vs, store flat 16 bytes
pub variance: Option<Box<HashBinary>>,
pub user_tag: Box<str>, // the len should be small to keep memory usage bounded
}
impl Display for CompactCacheKey {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{}", hex2str(&self.primary))?;
if let Some(var) = &self.variance {
write!(f, ", variance: {}", hex2str(var.as_ref()))?;
}
write!(f, ", user_tag: {}", self.user_tag)
}
}
impl CacheHashKey for CompactCacheKey {
fn primary_bin(&self) -> HashBinary {
self.primary
}
fn variance_bin(&self) -> Option<HashBinary> {
self.variance.as_ref().map(|s| *s.as_ref())
}
fn user_tag(&self) -> &str {
&self.user_tag
}
}
/*
* We use blake2 hashing, which is faster and more secure, to replace md5.
* We have not given too much thought on whether non-crypto hash can be safely
* use because hashing performance is not critical.
* Note: we should avoid hashes like ahash which does not have consistent output
* across machines because it is designed purely for in memory hashtable
*/
// hash output: we use 128 bits (16 bytes) hash which will map to 32 bytes hex string
pub(crate) type Blake2b128 = Blake2b<blake2::digest::consts::U16>;
/// helper function: hash str to u8
pub fn hash_u8(key: &str) -> u8 {
let mut hasher = Blake2b128::new();
hasher.update(key);
let raw = hasher.finalize();
raw[0]
}
/// helper function: hash key (String or Bytes) to [HashBinary]
pub fn hash_key<K: AsRef<[u8]>>(key: K) -> HashBinary {
let mut hasher = Blake2b128::new();
hasher.update(key.as_ref());
let raw = hasher.finalize();
raw.into()
}
impl CacheKey {
fn primary_hasher(&self) -> Blake2b128 {
let mut hasher = Blake2b128::new();
hasher.update(&self.namespace);
hasher.update(&self.primary);
hasher
}
/// Create a default [CacheKey] from a request, which just takes its URI as the primary key.
pub fn default(req_header: &ReqHeader) -> Self {
CacheKey {
namespace: Vec::new(),
primary: format!("{}", req_header.uri).into_bytes(),
primary_bin_override: None,
variance: None,
user_tag: "".into(),
extensions: Extensions::new(),
}
}
/// Create a new [CacheKey] from the given namespace, primary, and user_tag input.
///
/// Both `namespace` and `primary` will be used for the primary hash
pub fn new<B1, B2, S>(namespace: B1, primary: B2, user_tag: S) -> Self
where
B1: Into<Vec<u8>>,
B2: Into<Vec<u8>>,
S: Into<String>,
{
CacheKey {
namespace: namespace.into(),
primary: primary.into(),
primary_bin_override: None,
variance: None,
user_tag: user_tag.into(),
extensions: Extensions::new(),
}
}
/// Return the namespace of this key
pub fn namespace(&self) -> &[u8] {
&self.namespace[..]
}
/// Return the primary key of this key
pub fn primary_key(&self) -> &[u8] {
&self.primary[..]
}
/// Convert this key to [CompactCacheKey].
pub fn to_compact(&self) -> CompactCacheKey {
let primary = self.primary_bin();
CompactCacheKey {
primary,
variance: self.variance_bin().map(Box::new),
user_tag: self.user_tag.clone().into_boxed_str(),
}
}
}
impl CacheHashKey for CacheKey {
fn primary_bin(&self) -> HashBinary {
if let Some(primary_bin_override) = self.primary_bin_override {
primary_bin_override
} else {
self.primary_hasher().finalize().into()
}
}
fn variance_bin(&self) -> Option<HashBinary> {
self.variance
}
fn user_tag(&self) -> &str {
&self.user_tag
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cache_key_hash() {
let key = CacheKey {
namespace: Vec::new(),
primary: b"aa".to_vec(),
primary_bin_override: None,
variance: None,
user_tag: "1".into(),
extensions: Extensions::new(),
};
let hash = key.primary();
assert_eq!(hash, "ac10f2aef117729f8dad056b3059eb7e");
assert!(key.variance().is_none());
assert_eq!(key.combined(), hash);
let compact = key.to_compact();
assert_eq!(compact.primary(), hash);
assert!(compact.variance().is_none());
assert_eq!(compact.combined(), hash);
}
#[test]
fn test_cache_key_hash_override() {
let mut key = CacheKey {
namespace: Vec::new(),
primary: b"aa".to_vec(),
primary_bin_override: str2hex("27c35e6e9373877f29e562464e46497e"),
variance: None,
user_tag: "1".into(),
extensions: Extensions::new(),
};
let hash = key.primary();
assert_eq!(hash, "27c35e6e9373877f29e562464e46497e");
assert!(key.variance().is_none());
assert_eq!(key.combined(), hash);
let compact = key.to_compact();
assert_eq!(compact.primary(), hash);
assert!(compact.variance().is_none());
assert_eq!(compact.combined(), hash);
// make sure set_primary_bin_override overrides the primary key hash correctly
key.set_primary_bin_override(str2hex("004174d3e75a811a5b44c46b3856f3ee").unwrap());
let hash = key.primary();
assert_eq!(hash, "004174d3e75a811a5b44c46b3856f3ee");
assert!(key.variance().is_none());
assert_eq!(key.combined(), hash);
let compact = key.to_compact();
assert_eq!(compact.primary(), hash);
assert!(compact.variance().is_none());
assert_eq!(compact.combined(), hash);
}
#[test]
fn test_cache_key_vary_hash() {
let key = CacheKey {
namespace: Vec::new(),
primary: b"aa".to_vec(),
primary_bin_override: None,
variance: Some([0u8; 16]),
user_tag: "1".into(),
extensions: Extensions::new(),
};
let hash = key.primary();
assert_eq!(hash, "ac10f2aef117729f8dad056b3059eb7e");
assert_eq!(key.variance().unwrap(), "00000000000000000000000000000000");
assert_eq!(key.combined(), "004174d3e75a811a5b44c46b3856f3ee");
let compact = key.to_compact();
assert_eq!(compact.primary(), "ac10f2aef117729f8dad056b3059eb7e");
assert_eq!(
compact.variance().unwrap(),
"00000000000000000000000000000000"
);
assert_eq!(compact.combined(), "004174d3e75a811a5b44c46b3856f3ee");
}
#[test]
fn test_cache_key_vary_hash_override() {
let key = CacheKey {
namespace: Vec::new(),
primary: b"saaaad".to_vec(),
primary_bin_override: str2hex("ac10f2aef117729f8dad056b3059eb7e"),
variance: Some([0u8; 16]),
user_tag: "1".into(),
extensions: Extensions::new(),
};
let hash = key.primary();
assert_eq!(hash, "ac10f2aef117729f8dad056b3059eb7e");
assert_eq!(key.variance().unwrap(), "00000000000000000000000000000000");
assert_eq!(key.combined(), "004174d3e75a811a5b44c46b3856f3ee");
let compact = key.to_compact();
assert_eq!(compact.primary(), "ac10f2aef117729f8dad056b3059eb7e");
assert_eq!(
compact.variance().unwrap(),
"00000000000000000000000000000000"
);
assert_eq!(compact.combined(), "004174d3e75a811a5b44c46b3856f3ee");
}
#[test]
fn test_hex_str() {
let mut key = [0; KEY_SIZE];
for (i, v) in key.iter_mut().enumerate() {
// key: [0, 1, 2, .., 15]
*v = i as u8;
}
let hex_str = hex2str(&key);
let key2 = str2hex(&hex_str).unwrap();
for i in 0..KEY_SIZE {
assert_eq!(key[i], key2[i]);
}
}
#[test]
fn test_primary_key_str_valid_utf8() {
let valid_utf8_key = CacheKey {
namespace: Vec::new(),
primary: b"/valid/path?query=1".to_vec(),
primary_bin_override: None,
variance: None,
user_tag: "1".into(),
extensions: Extensions::new(),
};
assert_eq!(
valid_utf8_key.primary_key_str(),
Some("/valid/path?query=1")
)
}
#[test]
fn test_primary_key_str_invalid_utf8() {
let invalid_utf8_key = CacheKey {
namespace: Vec::new(),
primary: vec![0x66, 0x6f, 0x6f, 0xff],
primary_bin_override: None,
variance: None,
user_tag: "1".into(),
extensions: Extensions::new(),
};
assert!(invalid_utf8_key.primary_key_str().is_none())
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/lib.rs | pingora-cache/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The HTTP caching layer for proxies.
#![allow(clippy::new_without_default)]
use cf_rustracing::tag::Tag;
use http::{method::Method, request::Parts as ReqHeader, response::Parts as RespHeader};
use key::{CacheHashKey, CompactCacheKey, HashBinary};
use lock::WritePermit;
use log::warn;
use pingora_error::Result;
use pingora_http::ResponseHeader;
use pingora_timeout::timeout;
use std::time::{Duration, Instant, SystemTime};
use storage::MissFinishType;
use strum::IntoStaticStr;
use trace::{CacheTraceCTX, Span};
pub mod cache_control;
pub mod eviction;
pub mod filters;
pub mod hashtable;
pub mod key;
pub mod lock;
pub mod max_file_size;
mod memory;
pub mod meta;
pub mod predictor;
pub mod put;
pub mod storage;
pub mod trace;
mod variance;
use crate::max_file_size::MaxFileSizeTracker;
pub use key::CacheKey;
use lock::{CacheKeyLockImpl, LockStatus, Locked};
pub use memory::MemCache;
pub use meta::{set_compression_dict_content, set_compression_dict_path};
pub use meta::{CacheMeta, CacheMetaDefaults};
pub use storage::{HitHandler, MissHandler, PurgeType, Storage};
pub use variance::VarianceBuilder;
pub mod prelude {}
/// The state machine for http caching
///
/// This object is used to handle the state and transitions for HTTP caching through the life of a
/// request.
pub struct HttpCache {
phase: CachePhase,
// Box the rest so that a disabled HttpCache struct is small
inner: Option<Box<HttpCacheInner>>,
digest: HttpCacheDigest,
}
/// This reflects the phase of HttpCache during the lifetime of a request
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CachePhase {
/// Cache disabled, with reason (NeverEnabled if never explicitly used)
Disabled(NoCacheReason),
/// Cache enabled but nothing is set yet
Uninit,
/// Cache was enabled, the request decided not to use it
// HttpCache.inner_enabled is kept
Bypass,
/// Awaiting the cache key to be generated
CacheKey,
/// Cache hit
Hit,
/// No cached asset is found
Miss,
/// A staled (expired) asset is found
Stale,
/// A staled (expired) asset was found, but another request is revalidating it
StaleUpdating,
/// A staled (expired) asset was found, so a fresh one was fetched
Expired,
/// A staled (expired) asset was found, and it was revalidated to be fresh
Revalidated,
/// Revalidated, but deemed uncacheable, so we do not freshen it
RevalidatedNoCache(NoCacheReason),
}
impl CachePhase {
/// Convert [CachePhase] as `str`, for logging and debugging.
pub fn as_str(&self) -> &'static str {
match self {
CachePhase::Disabled(_) => "disabled",
CachePhase::Uninit => "uninitialized",
CachePhase::Bypass => "bypass",
CachePhase::CacheKey => "key",
CachePhase::Hit => "hit",
CachePhase::Miss => "miss",
CachePhase::Stale => "stale",
CachePhase::StaleUpdating => "stale-updating",
CachePhase::Expired => "expired",
CachePhase::Revalidated => "revalidated",
CachePhase::RevalidatedNoCache(_) => "revalidated-nocache",
}
}
}
/// The possible reasons for not caching
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum NoCacheReason {
/// Caching is not enabled to begin with
NeverEnabled,
/// Origin directives indicated this was not cacheable
OriginNotCache,
/// Response size was larger than the cache's configured maximum asset size
ResponseTooLarge,
/// Disabling caching due to unknown body size and previously exceeding maximum asset size;
/// the asset is otherwise cacheable, but cache needs to confirm the final size of the asset
/// before it can mark it as cacheable again.
PredictedResponseTooLarge,
/// Due to internal caching storage error
StorageError,
/// Due to other types of internal issues
InternalError,
/// will be cacheable but skip cache admission now
///
/// This happens when the cache predictor predicted that this request is not cacheable, but
/// the response turns out to be OK to cache. However, it might be too large to re-enable caching
/// for this request
Deferred,
/// Due to the proxy upstream filter declining the current request from going upstream
DeclinedToUpstream,
/// Due to the upstream being unreachable or otherwise erroring during proxying
UpstreamError,
/// The writer of the cache lock sees that the request is not cacheable (Could be OriginNotCache)
CacheLockGiveUp,
/// This request waited too long for the writer of the cache lock to finish, so this request will
/// fetch from the origin without caching
CacheLockTimeout,
/// Other custom defined reasons
Custom(&'static str),
}
impl NoCacheReason {
/// Convert [NoCacheReason] as `str`, for logging and debugging.
pub fn as_str(&self) -> &'static str {
use NoCacheReason::*;
match self {
NeverEnabled => "NeverEnabled",
OriginNotCache => "OriginNotCache",
ResponseTooLarge => "ResponseTooLarge",
PredictedResponseTooLarge => "PredictedResponseTooLarge",
StorageError => "StorageError",
InternalError => "InternalError",
Deferred => "Deferred",
DeclinedToUpstream => "DeclinedToUpstream",
UpstreamError => "UpstreamError",
CacheLockGiveUp => "CacheLockGiveUp",
CacheLockTimeout => "CacheLockTimeout",
Custom(s) => s,
}
}
}
/// Information collected about the caching operation that will not be cleared
#[derive(Debug, Default)]
pub struct HttpCacheDigest {
pub lock_duration: Option<Duration>,
// time spent in cache lookup and reading the header
pub lookup_duration: Option<Duration>,
}
/// Convenience function to add a duration to an optional duration
fn add_duration_to_opt(target_opt: &mut Option<Duration>, to_add: Duration) {
*target_opt = Some(target_opt.map_or(to_add, |existing| existing + to_add));
}
impl HttpCacheDigest {
fn add_lookup_duration(&mut self, extra_lookup_duration: Duration) {
add_duration_to_opt(&mut self.lookup_duration, extra_lookup_duration)
}
fn add_lock_duration(&mut self, extra_lock_duration: Duration) {
add_duration_to_opt(&mut self.lock_duration, extra_lock_duration)
}
}
/// Response cacheable decision
///
///
#[derive(Debug)]
pub enum RespCacheable {
Cacheable(CacheMeta),
Uncacheable(NoCacheReason),
}
impl RespCacheable {
/// Whether it is cacheable
#[inline]
pub fn is_cacheable(&self) -> bool {
matches!(*self, Self::Cacheable(_))
}
/// Unwrap [RespCacheable] to get the [CacheMeta] stored
/// # Panic
/// Panic when this object is not cacheable. Check [Self::is_cacheable()] first.
pub fn unwrap_meta(self) -> CacheMeta {
match self {
Self::Cacheable(meta) => meta,
Self::Uncacheable(_) => panic!("expected Cacheable value"),
}
}
}
/// Indicators of which level of cache freshness logic to force apply to an asset.
///
/// For example, should an existing fresh asset be revalidated or re-retrieved altogether.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ForcedFreshness {
/// Indicates the asset should be considered stale and revalidated
ForceExpired,
/// Indicates the asset should be considered absent and treated like a miss
/// instead of a hit
ForceMiss,
/// Indicates the asset should be considered fresh despite possibly being stale
ForceFresh,
}
/// Freshness state of cache hit asset
///
///
#[derive(Debug, Copy, Clone, IntoStaticStr, PartialEq, Eq)]
#[strum(serialize_all = "snake_case")]
pub enum HitStatus {
/// The asset's freshness directives indicate it has expired
Expired,
/// The asset was marked as expired, and should be treated as stale
ForceExpired,
/// The asset was marked as absent, and should be treated as a miss
ForceMiss,
/// An error occurred while processing the asset, so it should be treated as
/// a miss
FailedHitFilter,
/// The asset is not expired
Fresh,
/// Asset exists but is expired, forced to be a hit
ForceFresh,
}
impl HitStatus {
/// For displaying cache hit status
pub fn as_str(&self) -> &'static str {
self.into()
}
/// Whether cached asset can be served as fresh
pub fn is_fresh(&self) -> bool {
*self == HitStatus::Fresh || *self == HitStatus::ForceFresh
}
/// Check whether the hit status should be treated as a miss. A forced miss
/// is obviously treated as a miss. A hit-filter failure is treated as a
/// miss because we can't use the asset as an actual hit. If we treat it as
/// expired, we still might not be able to use it even if revalidation
/// succeeds.
pub fn is_treated_as_miss(self) -> bool {
matches!(self, HitStatus::ForceMiss | HitStatus::FailedHitFilter)
}
}
pub struct LockCtx {
pub lock: Option<Locked>,
pub cache_lock: &'static CacheKeyLockImpl,
pub wait_timeout: Option<Duration>,
}
// Fields like storage handlers that are needed only when cache is enabled (or bypassing).
struct HttpCacheInnerEnabled {
pub meta: Option<CacheMeta>,
// when set, even if an asset exists, it would only be considered valid after this timestamp
pub valid_after: Option<SystemTime>,
pub miss_handler: Option<MissHandler>,
pub body_reader: Option<HitHandler>,
pub storage: &'static (dyn storage::Storage + Sync), // static for now
pub eviction: Option<&'static (dyn eviction::EvictionManager + Sync)>,
pub lock_ctx: Option<LockCtx>,
pub traces: trace::CacheTraceCTX,
}
struct HttpCacheInner {
// Prefer adding fields to InnerEnabled if possible, these fields are released
// when cache is disabled.
// If fields are needed after cache disablement, add directly to Inner.
pub enabled_ctx: Option<Box<HttpCacheInnerEnabled>>,
pub key: Option<CacheKey>,
// when set, an asset will be rejected from the cache if it exceeds configured size in bytes
pub max_file_size_tracker: Option<MaxFileSizeTracker>,
pub predictor: Option<&'static (dyn predictor::CacheablePredictor + Sync)>,
}
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct CacheOptionOverrides {
pub wait_timeout: Option<Duration>,
}
impl HttpCache {
/// Create a new [HttpCache].
///
/// Caching is not enabled by default.
pub fn new() -> Self {
HttpCache {
phase: CachePhase::Disabled(NoCacheReason::NeverEnabled),
inner: None,
digest: HttpCacheDigest::default(),
}
}
/// Whether the cache is enabled
pub fn enabled(&self) -> bool {
!matches!(self.phase, CachePhase::Disabled(_) | CachePhase::Bypass)
}
/// Whether the cache is being bypassed
pub fn bypassing(&self) -> bool {
matches!(self.phase, CachePhase::Bypass)
}
/// Return the [CachePhase]
pub fn phase(&self) -> CachePhase {
self.phase
}
/// Whether anything was fetched from the upstream
///
/// This essentially checks all possible [CachePhase] who need to contact the upstream server
pub fn upstream_used(&self) -> bool {
use CachePhase::*;
match self.phase {
Disabled(_) | Bypass | Miss | Expired | Revalidated | RevalidatedNoCache(_) => true,
Hit | Stale | StaleUpdating => false,
Uninit | CacheKey => false, // invalid states for this call, treat them as false to keep it simple
}
}
/// Check whether the backend storage is the type `T`.
pub fn storage_type_is<T: 'static>(&self) -> bool {
self.inner
.as_ref()
.and_then(|inner| {
inner
.enabled_ctx
.as_ref()
.and_then(|ie| ie.storage.as_any().downcast_ref::<T>())
})
.is_some()
}
/// Release the cache lock if the current request is a cache writer.
///
/// Generally callers should prefer using `disable` when a cache lock should be released
/// due to an error to clear all cache context. This function is for releasing the cache lock
/// while still keeping the cache around for reading, e.g. when serving stale.
pub fn release_write_lock(&mut self, reason: NoCacheReason) {
use NoCacheReason::*;
if let Some(inner) = self.inner.as_mut() {
if let Some(lock_ctx) = inner
.enabled_ctx
.as_mut()
.and_then(|ie| ie.lock_ctx.as_mut())
{
let lock = lock_ctx.lock.take();
if let Some(Locked::Write(permit)) = lock {
let lock_status = match reason {
// let the next request try to fetch it
InternalError | StorageError | Deferred | UpstreamError => {
LockStatus::TransientError
}
// depends on why the proxy upstream filter declined the request,
// for now still allow next request try to acquire to avoid thundering herd
DeclinedToUpstream => LockStatus::TransientError,
// no need for the lock anymore
OriginNotCache | ResponseTooLarge | PredictedResponseTooLarge => {
LockStatus::GiveUp
}
// not sure which LockStatus make sense, we treat it as GiveUp for now
Custom(_) => LockStatus::GiveUp,
// should never happen, NeverEnabled shouldn't hold a lock
NeverEnabled => panic!("NeverEnabled holds a write lock"),
CacheLockGiveUp | CacheLockTimeout => {
panic!("CacheLock* are for cache lock readers only")
}
};
lock_ctx
.cache_lock
.release(inner.key.as_ref().unwrap(), permit, lock_status);
}
}
}
}
/// Disable caching
pub fn disable(&mut self, reason: NoCacheReason) {
// XXX: compile type enforce?
assert!(
reason != NoCacheReason::NeverEnabled,
"NeverEnabled not allowed as a disable reason"
);
match self.phase {
CachePhase::Disabled(old_reason) => {
// replace reason
if old_reason == NoCacheReason::NeverEnabled {
// safeguard, don't allow replacing NeverEnabled as a reason
// TODO: can be promoted to assertion once confirmed nothing is attempting this
warn!("Tried to replace cache NeverEnabled with reason: {reason:?}");
return;
}
self.phase = CachePhase::Disabled(reason);
}
_ => {
self.phase = CachePhase::Disabled(reason);
self.release_write_lock(reason);
// enabled_ctx will be cleared out
let mut inner_enabled = self
.inner_mut()
.enabled_ctx
.take()
.expect("could remove enabled_ctx on disable");
// log initial disable reason
inner_enabled
.traces
.cache_span
.set_tag(|| trace::Tag::new("disable_reason", reason.as_str()));
}
}
}
/* The following methods panic when they are used in the wrong phase.
* This is better than returning errors as such panics are only caused by coding error, which
* should be fixed right away. Tokio runtime only crashes the current task instead of the whole
* program when these panics happen. */
/// Set the cache to bypass
///
/// # Panic
/// This call is only allowed in [CachePhase::CacheKey] phase (before any cache lookup is performed).
/// Use it in any other phase will lead to panic.
pub fn bypass(&mut self) {
match self.phase {
CachePhase::CacheKey => {
// before cache lookup / found / miss
self.phase = CachePhase::Bypass;
self.inner_enabled_mut()
.traces
.cache_span
.set_tag(|| trace::Tag::new("bypassed", true));
}
_ => panic!("wrong phase to bypass HttpCache {:?}", self.phase),
}
}
/// Enable the cache
///
/// - `storage`: the cache storage backend that implements [storage::Storage]
/// - `eviction`: optionally the eviction manager, without it, nothing will be evicted from the storage
/// - `predictor`: optionally a cache predictor. The cache predictor predicts whether something is likely
/// to be cacheable or not. This is useful because the proxy can apply different types of optimization to
/// cacheable and uncacheable requests.
/// - `cache_lock`: optionally a cache lock which handles concurrent lookups to the same asset. Without it
/// such lookups will all be allowed to fetch the asset independently.
pub fn enable(
&mut self,
storage: &'static (dyn storage::Storage + Sync),
eviction: Option<&'static (dyn eviction::EvictionManager + Sync)>,
predictor: Option<&'static (dyn predictor::CacheablePredictor + Sync)>,
cache_lock: Option<&'static CacheKeyLockImpl>,
option_overrides: Option<CacheOptionOverrides>,
) {
match self.phase {
CachePhase::Disabled(_) => {
self.phase = CachePhase::Uninit;
let lock_ctx = cache_lock.map(|cache_lock| LockCtx {
cache_lock,
lock: None,
wait_timeout: option_overrides
.as_ref()
.and_then(|overrides| overrides.wait_timeout),
});
self.inner = Some(Box::new(HttpCacheInner {
enabled_ctx: Some(Box::new(HttpCacheInnerEnabled {
meta: None,
valid_after: None,
miss_handler: None,
body_reader: None,
storage,
eviction,
lock_ctx,
traces: CacheTraceCTX::new(),
})),
key: None,
max_file_size_tracker: None,
predictor,
}));
}
_ => panic!("Cannot enable already enabled HttpCache {:?}", self.phase),
}
}
/// Set the cache lock implementation.
/// # Panic
/// Must be called before a cache lock is attempted to be acquired,
/// i.e. in the `cache_key_callback` or `cache_hit_filter` phases.
pub fn set_cache_lock(
&mut self,
cache_lock: Option<&'static CacheKeyLockImpl>,
option_overrides: Option<CacheOptionOverrides>,
) {
match self.phase {
CachePhase::Disabled(_)
| CachePhase::CacheKey
| CachePhase::Stale
| CachePhase::Hit => {
let inner_enabled = self.inner_enabled_mut();
if inner_enabled
.lock_ctx
.as_ref()
.is_some_and(|ctx| ctx.lock.is_some())
{
panic!("lock already set when resetting cache lock")
} else {
let lock_ctx = cache_lock.map(|cache_lock| LockCtx {
cache_lock,
lock: None,
wait_timeout: option_overrides.and_then(|overrides| overrides.wait_timeout),
});
inner_enabled.lock_ctx = lock_ctx;
}
}
_ => panic!("wrong phase: {:?}", self.phase),
}
}
// Enable distributed tracing
pub fn enable_tracing(&mut self, parent_span: trace::Span) {
if let Some(inner_enabled) = self.inner.as_mut().and_then(|i| i.enabled_ctx.as_mut()) {
inner_enabled.traces.enable(parent_span);
}
}
// Get the cache parent tracing span
pub fn get_cache_span(&self) -> Option<trace::SpanHandle> {
self.inner
.as_ref()
.and_then(|i| i.enabled_ctx.as_ref().map(|ie| ie.traces.get_cache_span()))
}
// Get the cache `miss` tracing span
pub fn get_miss_span(&self) -> Option<trace::SpanHandle> {
self.inner
.as_ref()
.and_then(|i| i.enabled_ctx.as_ref().map(|ie| ie.traces.get_miss_span()))
}
// Get the cache `hit` tracing span
pub fn get_hit_span(&self) -> Option<trace::SpanHandle> {
self.inner
.as_ref()
.and_then(|i| i.enabled_ctx.as_ref().map(|ie| ie.traces.get_hit_span()))
}
// shortcut to access inner fields, panic if phase is disabled
#[inline]
fn inner_enabled_mut(&mut self) -> &mut HttpCacheInnerEnabled {
self.inner.as_mut().unwrap().enabled_ctx.as_mut().unwrap()
}
#[inline]
fn inner_enabled(&self) -> &HttpCacheInnerEnabled {
self.inner.as_ref().unwrap().enabled_ctx.as_ref().unwrap()
}
// shortcut to access inner fields, panic if cache was never enabled
#[inline]
fn inner_mut(&mut self) -> &mut HttpCacheInner {
self.inner.as_mut().unwrap()
}
#[inline]
fn inner(&self) -> &HttpCacheInner {
self.inner.as_ref().unwrap()
}
/// Set the cache key
/// # Panic
/// Cache key is only allowed to be set in its own phase. Set it in other phases will cause panic.
pub fn set_cache_key(&mut self, key: CacheKey) {
match self.phase {
CachePhase::Uninit | CachePhase::CacheKey => {
self.phase = CachePhase::CacheKey;
self.inner_mut().key = Some(key);
}
_ => panic!("wrong phase {:?}", self.phase),
}
}
/// Return the cache key used for asset lookup
/// # Panic
/// Can only be called after the cache key is set and the cache is not disabled. Panic otherwise.
pub fn cache_key(&self) -> &CacheKey {
match self.phase {
CachePhase::Disabled(NoCacheReason::NeverEnabled) | CachePhase::Uninit => {
panic!("wrong phase {:?}", self.phase)
}
_ => self
.inner()
.key
.as_ref()
.expect("cache key should be set (set_cache_key not called?)"),
}
}
/// Return the max size allowed to be cached.
pub fn max_file_size_bytes(&self) -> Option<usize> {
assert!(
!matches!(
self.phase,
CachePhase::Disabled(NoCacheReason::NeverEnabled)
),
"tried to access max file size bytes when cache never enabled"
);
self.inner()
.max_file_size_tracker
.as_ref()
.map(|t| t.max_file_size_bytes())
}
/// Set the maximum response _body_ size in bytes that will be admitted to the cache.
///
/// Response header size should not contribute to the max file size.
///
/// To track body bytes, call `track_bytes_for_max_file_size`.
pub fn set_max_file_size_bytes(&mut self, max_file_size_bytes: usize) {
match self.phase {
CachePhase::Disabled(_) => panic!("wrong phase {:?}", self.phase),
_ => {
self.inner_mut().max_file_size_tracker =
Some(MaxFileSizeTracker::new(max_file_size_bytes));
}
}
}
/// Record body bytes for the max file size tracker.
///
/// The `bytes_len` input contributes to a cumulative body byte tracker.
///
/// Once the cumulative body bytes exceeds the maximum allowable cache file size (as configured
/// by `set_max_file_size_bytes`), then the return value will be false.
///
/// Else the return value is true as long as the max file size is not exceeded.
/// If max file size was not configured, the return value is always true.
pub fn track_body_bytes_for_max_file_size(&mut self, bytes_len: usize) -> bool {
// This is intended to be callable when cache has already been disabled,
// so that we can re-mark an asset as cacheable if the body size is under limits.
assert!(
!matches!(
self.phase,
CachePhase::Disabled(NoCacheReason::NeverEnabled)
),
"tried to access max file size bytes when cache never enabled"
);
self.inner_mut()
.max_file_size_tracker
.as_mut()
.is_none_or(|t| t.add_body_bytes(bytes_len))
}
/// Check if the max file size has been exceeded according to max file size tracker.
///
/// Return true if max file size was exceeded.
pub fn exceeded_max_file_size(&self) -> bool {
assert!(
!matches!(
self.phase,
CachePhase::Disabled(NoCacheReason::NeverEnabled)
),
"tried to access max file size bytes when cache never enabled"
);
self.inner()
.max_file_size_tracker
.as_ref()
.is_some_and(|t| !t.allow_caching())
}
/// Set that cache is found in cache storage.
///
/// This function is called after [Self::cache_lookup()] which returns the [CacheMeta] and
/// [HitHandler].
///
/// The `hit_status` enum allows the caller to force expire assets.
pub fn cache_found(&mut self, meta: CacheMeta, hit_handler: HitHandler, hit_status: HitStatus) {
// Stale allowed because of cache lock and then retry
if !matches!(self.phase, CachePhase::CacheKey | CachePhase::Stale) {
panic!("wrong phase {:?}", self.phase)
}
self.phase = match hit_status {
HitStatus::Fresh | HitStatus::ForceFresh => CachePhase::Hit,
HitStatus::Expired | HitStatus::ForceExpired => CachePhase::Stale,
HitStatus::FailedHitFilter | HitStatus::ForceMiss => self.phase,
};
let phase = self.phase;
let inner = self.inner_mut();
let key = inner.key.as_ref().expect("key must be set on hit");
let inner_enabled = inner
.enabled_ctx
.as_mut()
.expect("cache_found must be called while cache enabled");
// The cache lock might not be set for stale hit or hits treated as
// misses, so we need to initialize it here
let stale = phase == CachePhase::Stale;
if stale || hit_status.is_treated_as_miss() {
if let Some(lock_ctx) = inner_enabled.lock_ctx.as_mut() {
lock_ctx.lock = Some(lock_ctx.cache_lock.lock(key, stale));
}
}
if hit_status.is_treated_as_miss() {
// Clear the body and meta for hits that are treated as misses
inner_enabled.body_reader = None;
inner_enabled.meta = None;
} else {
// Set the metadata appropriately for legit hits
inner_enabled.traces.start_hit_span(phase, hit_status);
inner_enabled.traces.log_meta_in_hit_span(&meta);
if let Some(eviction) = inner_enabled.eviction {
// TODO: make access() accept CacheKey
let cache_key = key.to_compact();
if hit_handler.should_count_access() {
let size = hit_handler.get_eviction_weight();
eviction.access(&cache_key, size, meta.0.internal.fresh_until);
}
}
inner_enabled.meta = Some(meta);
inner_enabled.body_reader = Some(hit_handler);
}
}
/// Mark `self` to be cache miss.
///
/// This function is called after [Self::cache_lookup()] finds nothing or the caller decides
/// not to use the assets found.
/// # Panic
/// Panic in other phases.
pub fn cache_miss(&mut self) {
match self.phase {
// from CacheKey: set state to miss during cache lookup
// from Bypass: response became cacheable, set state to miss to cache
// from Stale: waited for cache lock, then retried and found asset was gone
CachePhase::CacheKey | CachePhase::Bypass | CachePhase::Stale => {
self.phase = CachePhase::Miss;
// It's possible that we've set the meta on lookup and have come back around
// here after not being able to acquire the cache lock, and our item has since
// purged or expired. We should be sure that the meta is not set in this case
// as there shouldn't be a meta set for cache misses.
self.inner_enabled_mut().meta = None;
self.inner_enabled_mut().traces.start_miss_span();
}
_ => panic!("wrong phase {:?}", self.phase),
}
}
/// Return the [HitHandler]
/// # Panic
/// Call this after [Self::cache_found()], panic in other phases.
pub fn hit_handler(&mut self) -> &mut HitHandler {
match self.phase {
CachePhase::Hit
| CachePhase::Stale
| CachePhase::StaleUpdating
| CachePhase::Revalidated
| CachePhase::RevalidatedNoCache(_) => {
self.inner_enabled_mut().body_reader.as_mut().unwrap()
}
_ => panic!("wrong phase {:?}", self.phase),
}
}
/// Return the body reader during a cache admission (miss/expired) which decouples the downstream
/// read and upstream cache write
pub fn miss_body_reader(&mut self) -> Option<&mut HitHandler> {
match self.phase {
CachePhase::Miss | CachePhase::Expired => {
let inner_enabled = self.inner_enabled_mut();
if inner_enabled.storage.support_streaming_partial_write() {
inner_enabled.body_reader.as_mut()
} else {
// body_reader could be set even when the storage doesn't support streaming
// Expired cache would have the reader set.
None
}
}
_ => None,
}
}
/// Return whether the underlying storage backend supports streaming partial write.
///
/// Returns None if cache is not enabled.
pub fn support_streaming_partial_write(&self) -> Option<bool> {
self.inner.as_ref().and_then(|inner| {
inner
.enabled_ctx
.as_ref()
.map(|c| c.storage.support_streaming_partial_write())
})
}
/// Call this when cache hit is fully read.
///
/// This call will release resource if any and log the timing in tracing if set.
/// # Panic
/// Panic in phases where there is no cache hit.
pub async fn finish_hit_handler(&mut self) -> Result<()> {
match self.phase {
CachePhase::Hit
| CachePhase::Miss
| CachePhase::Expired
| CachePhase::Stale
| CachePhase::StaleUpdating
| CachePhase::Revalidated
| CachePhase::RevalidatedNoCache(_) => {
let inner = self.inner_mut();
let inner_enabled = inner.enabled_ctx.as_mut().expect("cache enabled");
if inner_enabled.body_reader.is_none() {
// already finished, we allow calling this function more than once
return Ok(());
}
let body_reader = inner_enabled.body_reader.take().unwrap();
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/put.rs | pingora-cache/src/put.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cache Put module
use crate::max_file_size::ERR_RESPONSE_TOO_LARGE;
use crate::*;
use bytes::Bytes;
use http::header;
use log::warn;
use pingora_core::protocols::http::{
v1::common::header_value_content_length, HttpTask, ServerSession,
};
use pingora_error::Error;
/// The interface to define cache put behavior
pub trait CachePut {
/// Return whether to cache the asset according to the given response header.
fn cacheable(&self, response: ResponseHeader) -> RespCacheable {
let cc = cache_control::CacheControl::from_resp_headers(&response);
filters::resp_cacheable(cc.as_ref(), response, false, Self::cache_defaults())
}
/// Return the [CacheMetaDefaults]
fn cache_defaults() -> &'static CacheMetaDefaults;
/// Put interesting things in the span given the parsed response header.
fn trace_header(&mut self, _response: &ResponseHeader) {}
}
use parse_response::ResponseParse;
/// The cache put context
pub struct CachePutCtx<C: CachePut> {
cache_put: C, // the user defined cache put behavior
key: CacheKey,
storage: &'static (dyn storage::Storage + Sync), // static for now
eviction: Option<&'static (dyn eviction::EvictionManager + Sync)>,
miss_handler: Option<MissHandler>,
max_file_size_tracker: Option<MaxFileSizeTracker>,
meta: Option<CacheMeta>,
parser: ResponseParse,
// FIXME: cache put doesn't have cache lock but some storage cannot handle concurrent put
// to the same asset.
trace: trace::Span,
}
impl<C: CachePut> CachePutCtx<C> {
/// Create a new [CachePutCtx]
pub fn new(
cache_put: C,
key: CacheKey,
storage: &'static (dyn storage::Storage + Sync),
eviction: Option<&'static (dyn eviction::EvictionManager + Sync)>,
trace: trace::Span,
) -> Self {
CachePutCtx {
cache_put,
key,
storage,
eviction,
miss_handler: None,
max_file_size_tracker: None,
meta: None,
parser: ResponseParse::new(),
trace,
}
}
/// Set the max cacheable size limit
pub fn set_max_file_size_bytes(&mut self, max_file_size_bytes: usize) {
self.max_file_size_tracker = Some(MaxFileSizeTracker::new(max_file_size_bytes));
}
async fn put_header(&mut self, meta: CacheMeta) -> Result<()> {
let mut trace = self.trace.child("cache put header", |o| o.start());
let miss_handler = self
.storage
.get_miss_handler(&self.key, &meta, &trace.handle())
.await?;
trace::tag_span_with_meta(&mut trace, &meta);
self.miss_handler = Some(miss_handler);
self.meta = Some(meta);
Ok(())
}
async fn put_body(&mut self, data: Bytes, eof: bool) -> Result<()> {
// fail if writing the body would exceed the max_file_size_bytes
if let Some(size_tracker) = self.max_file_size_tracker.as_mut() {
let body_size_allowed = size_tracker.add_body_bytes(data.len());
if !body_size_allowed {
return Error::e_explain(
ERR_RESPONSE_TOO_LARGE,
format!(
"writing data of size {} bytes would exceed max file size of {} bytes",
data.len(),
size_tracker.max_file_size_bytes(),
),
);
}
}
let miss_handler = self.miss_handler.as_mut().unwrap();
miss_handler.write_body(data, eof).await
}
async fn finish(&mut self) -> Result<()> {
let Some(miss_handler) = self.miss_handler.take() else {
// no miss_handler, uncacheable
return Ok(());
};
let finish = miss_handler.finish().await?;
if let Some(eviction) = self.eviction.as_ref() {
let cache_key = self.key.to_compact();
let meta = self.meta.as_ref().unwrap();
let evicted = match finish {
MissFinishType::Appended(delta, max_size) => {
eviction.increment_weight(&cache_key, delta, max_size)
}
MissFinishType::Created(size) => {
eviction.admit(cache_key, size, meta.0.internal.fresh_until)
}
};
// actual eviction can be done async
let trace = self
.trace
.child("cache put eviction", |o| o.start())
.handle();
let storage = self.storage;
tokio::task::spawn(async move {
for item in evicted {
if let Err(e) = storage.purge(&item, PurgeType::Eviction, &trace).await {
warn!("Failed to purge {item} during eviction for cache put: {e}");
}
}
});
}
Ok(())
}
fn trace_header(&mut self, header: &ResponseHeader) {
self.trace.set_tag(|| {
Tag::new(
"cache-control",
header
.headers
.get_all(http::header::CACHE_CONTROL)
.into_iter()
.map(|v| String::from_utf8_lossy(v.as_bytes()).to_string())
.collect::<Vec<_>>()
.join(","),
)
});
}
async fn do_cache_put(&mut self, data: &[u8]) -> Result<Option<NoCacheReason>> {
let tasks = self.parser.inject_data(data)?;
for task in tasks {
match task {
HttpTask::Header(header, _eos) => {
self.trace_header(&header);
match self.cache_put.cacheable(*header) {
RespCacheable::Cacheable(meta) => {
if let Some(max_file_size_tracker) = &self.max_file_size_tracker {
let content_length_hdr = meta.headers().get(header::CONTENT_LENGTH);
if let Some(content_length) =
header_value_content_length(content_length_hdr)
{
if content_length > max_file_size_tracker.max_file_size_bytes()
{
return Ok(Some(NoCacheReason::ResponseTooLarge));
}
}
}
self.put_header(meta).await?;
}
RespCacheable::Uncacheable(reason) => {
return Ok(Some(reason));
}
}
}
HttpTask::Body(data, eos) => {
if let Some(data) = data {
self.put_body(data, eos).await?;
}
}
_ => {
panic!("unexpected HttpTask during cache put {task:?}");
}
}
}
Ok(None)
}
/// Start the cache put logic for the given request
///
/// This function will start to read the request body to put into cache.
/// Return:
/// - `Ok(None)` when the payload will be cache.
/// - `Ok(Some(reason))` when the payload is not cacheable
pub async fn cache_put(
&mut self,
session: &mut ServerSession,
) -> Result<Option<NoCacheReason>> {
let mut no_cache_reason = None;
while let Some(data) = session.read_request_body().await? {
if no_cache_reason.is_some() {
// even uncacheable, the entire body needs to be drains for 1. downstream
// not throwing errors 2. connection reuse
continue;
}
no_cache_reason = self.do_cache_put(&data).await?
}
self.parser.finish()?;
self.finish().await?;
if let Some(reason) = no_cache_reason {
self.trace
.set_tag(|| Tag::new("uncacheable_reason", reason.as_str()));
}
Ok(no_cache_reason)
}
}
#[cfg(test)]
mod test {
use super::*;
use cf_rustracing::span::Span;
use once_cell::sync::Lazy;
struct TestCachePut();
impl CachePut for TestCachePut {
fn cache_defaults() -> &'static CacheMetaDefaults {
const DEFAULT: CacheMetaDefaults =
CacheMetaDefaults::new(|_| Some(Duration::from_secs(1)), 1, 1);
&DEFAULT
}
}
type TestCachePutCtx = CachePutCtx<TestCachePut>;
static CACHE_BACKEND: Lazy<MemCache> = Lazy::new(MemCache::new);
#[tokio::test]
async fn test_cache_put() {
let key = CacheKey::new("", "a", "1");
let span = Span::inactive();
let put = TestCachePut();
let mut ctx = TestCachePutCtx::new(put, key.clone(), &*CACHE_BACKEND, None, span);
let payload = b"HTTP/1.1 200 OK\r\n\
Date: Thu, 26 Apr 2018 05:42:05 GMT\r\n\
Content-Type: text/html; charset=utf-8\r\n\
Connection: keep-alive\r\n\
X-Frame-Options: SAMEORIGIN\r\n\
Cache-Control: public, max-age=1\r\n\
Server: origin-server\r\n\
Content-Length: 4\r\n\r\nrust";
// here we skip mocking a real http session for simplicity
let res = ctx.do_cache_put(payload).await.unwrap();
assert!(res.is_none()); // cacheable
ctx.parser.finish().unwrap();
ctx.finish().await.unwrap();
let span = Span::inactive();
let (meta, mut hit) = CACHE_BACKEND
.lookup(&key, &span.handle())
.await
.unwrap()
.unwrap();
assert_eq!(
meta.headers().get("date").unwrap(),
"Thu, 26 Apr 2018 05:42:05 GMT"
);
let data = hit.read_body().await.unwrap().unwrap();
assert_eq!(data, "rust");
}
#[tokio::test]
async fn test_cache_put_uncacheable() {
let key = CacheKey::new("", "a", "1");
let span = Span::inactive();
let put = TestCachePut();
let mut ctx = TestCachePutCtx::new(put, key.clone(), &*CACHE_BACKEND, None, span);
let payload = b"HTTP/1.1 200 OK\r\n\
Date: Thu, 26 Apr 2018 05:42:05 GMT\r\n\
Content-Type: text/html; charset=utf-8\r\n\
Connection: keep-alive\r\n\
X-Frame-Options: SAMEORIGIN\r\n\
Cache-Control: no-store\r\n\
Server: origin-server\r\n\
Content-Length: 4\r\n\r\nrust";
// here we skip mocking a real http session for simplicity
let no_cache = ctx.do_cache_put(payload).await.unwrap().unwrap();
assert_eq!(no_cache, NoCacheReason::OriginNotCache);
ctx.parser.finish().unwrap();
ctx.finish().await.unwrap();
}
#[tokio::test]
async fn test_cache_put_204_invalid_body() {
let key = CacheKey::new("", "b", "1");
let span = Span::inactive();
let put = TestCachePut();
let mut ctx = TestCachePutCtx::new(put, key.clone(), &*CACHE_BACKEND, None, span);
let payload = b"HTTP/1.1 204 OK\r\n\
Date: Thu, 26 Apr 2018 05:42:05 GMT\r\n\
Content-Type: text/html; charset=utf-8\r\n\
Connection: keep-alive\r\n\
X-Frame-Options: SAMEORIGIN\r\n\
Cache-Control: public, max-age=1\r\n\
Server: origin-server\r\n\
Content-Length: 4\r\n\r\n";
// here we skip mocking a real http session for simplicity
let res = ctx.do_cache_put(payload).await.unwrap();
assert!(res.is_none()); // cacheable
// 204 should not have body, invalid client input may try to pass one
let res = ctx.do_cache_put(b"rust").await.unwrap();
assert!(res.is_none()); // still cacheable
ctx.parser.finish().unwrap();
ctx.finish().await.unwrap();
let span = Span::inactive();
let (meta, mut hit) = CACHE_BACKEND
.lookup(&key, &span.handle())
.await
.unwrap()
.unwrap();
assert_eq!(
meta.headers().get("date").unwrap(),
"Thu, 26 Apr 2018 05:42:05 GMT"
);
// just treated as empty body
// (TODO: should we reset content-length/transfer-encoding
// headers on 204/304?)
let data = hit.read_body().await.unwrap().unwrap();
assert!(data.is_empty());
}
#[tokio::test]
async fn test_cache_put_extra_body() {
let key = CacheKey::new("", "c", "1");
let span = Span::inactive();
let put = TestCachePut();
let mut ctx = TestCachePutCtx::new(put, key.clone(), &*CACHE_BACKEND, None, span);
let payload = b"HTTP/1.1 200 OK\r\n\
Date: Thu, 26 Apr 2018 05:42:05 GMT\r\n\
Content-Type: text/html; charset=utf-8\r\n\
Connection: keep-alive\r\n\
X-Frame-Options: SAMEORIGIN\r\n\
Cache-Control: public, max-age=1\r\n\
Server: origin-server\r\n\
Content-Length: 4\r\n\r\n";
// here we skip mocking a real http session for simplicity
let res = ctx.do_cache_put(payload).await.unwrap();
assert!(res.is_none()); // cacheable
// pass in more extra request body that needs to be drained
let res = ctx.do_cache_put(b"rustab").await.unwrap();
assert!(res.is_none()); // still cacheable
let res = ctx.do_cache_put(b"cdef").await.unwrap();
assert!(res.is_none()); // still cacheable
ctx.parser.finish().unwrap();
ctx.finish().await.unwrap();
let span = Span::inactive();
let (meta, mut hit) = CACHE_BACKEND
.lookup(&key, &span.handle())
.await
.unwrap()
.unwrap();
assert_eq!(
meta.headers().get("date").unwrap(),
"Thu, 26 Apr 2018 05:42:05 GMT"
);
let data = hit.read_body().await.unwrap().unwrap();
// body only contains specified content-length bounds
assert_eq!(data, "rust");
}
}
// maybe this can simplify some logic in pingora::h1
mod parse_response {
use super::*;
use bstr::ByteSlice;
use bytes::BytesMut;
use httparse::Status;
use pingora_error::{
Error,
ErrorType::{self, *},
};
pub const INCOMPLETE_BODY: ErrorType = ErrorType::new("IncompleteHttpBody");
const MAX_HEADERS: usize = 256;
const INIT_HEADER_BUF_SIZE: usize = 4096;
#[derive(Debug, Clone, Copy, PartialEq)]
enum ParseState {
Init,
PartialHeader,
PartialBodyContentLength(usize, usize),
PartialBody(usize),
Done(usize),
Invalid(httparse::Error),
}
impl ParseState {
fn is_done(&self) -> bool {
matches!(self, Self::Done(_))
}
fn read_header(&self) -> bool {
matches!(self, Self::Init | Self::PartialHeader)
}
fn read_body(&self) -> bool {
matches!(
self,
Self::PartialBodyContentLength(..) | Self::PartialBody(_)
)
}
}
pub(super) struct ResponseParse {
state: ParseState,
buf: BytesMut,
header_bytes: Bytes,
}
impl ResponseParse {
pub fn new() -> Self {
ResponseParse {
state: ParseState::Init,
buf: BytesMut::with_capacity(INIT_HEADER_BUF_SIZE),
header_bytes: Bytes::new(),
}
}
pub fn inject_data(&mut self, data: &[u8]) -> Result<Vec<HttpTask>> {
if self.state.is_done() {
// just ignore extra response body after parser is done
// could be invalid body appended to a no-content status
// or invalid body after content-length
// TODO: consider propagating an error to the client
return Ok(vec![]);
}
self.put_data(data);
let mut tasks = vec![];
while !self.state.is_done() {
if self.state.read_header() {
let header = self.parse_header()?;
let Some(header) = header else {
break;
};
tasks.push(HttpTask::Header(Box::new(header), self.state.is_done()));
} else if self.state.read_body() {
let body = self.parse_body()?;
let Some(body) = body else {
break;
};
tasks.push(HttpTask::Body(Some(body), self.state.is_done()));
} else {
break;
}
}
Ok(tasks)
}
fn put_data(&mut self, data: &[u8]) {
use ParseState::*;
if matches!(self.state, Done(_) | Invalid(_)) {
panic!("Wrong phase {:?}", self.state);
}
self.buf.extend_from_slice(data);
}
fn parse_header(&mut self) -> Result<Option<ResponseHeader>> {
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
let mut resp = httparse::Response::new(&mut headers);
let mut parser = httparse::ParserConfig::default();
parser.allow_spaces_after_header_name_in_responses(true);
parser.allow_obsolete_multiline_headers_in_responses(true);
let res = parser.parse_response(&mut resp, &self.buf);
let res = match res {
Ok(res) => res,
Err(e) => {
self.state = ParseState::Invalid(e);
return Error::e_because(
InvalidHTTPHeader,
format!("buf: {:?}", self.buf.as_bstr()),
e,
);
}
};
let split_to = match res {
Status::Complete(s) => s,
Status::Partial => {
self.state = ParseState::PartialHeader;
return Ok(None);
}
};
// safe to unwrap, valid response always has code set.
let mut response =
ResponseHeader::build(resp.code.unwrap(), Some(resp.headers.len())).unwrap();
for header in resp.headers {
// TODO: consider hold a Bytes and all header values can be Bytes referencing the
// original buffer without reallocation
response.append_header(header.name.to_owned(), header.value.to_owned())?;
}
// TODO: see above, we can make header value `Bytes` referencing header_bytes
let header_bytes = self.buf.split_to(split_to).freeze();
self.header_bytes = header_bytes;
self.state = body_type(&response);
Ok(Some(response))
}
fn parse_body(&mut self) -> Result<Option<Bytes>> {
use ParseState::*;
if self.buf.is_empty() {
return Ok(None);
}
match self.state {
Init | PartialHeader | Invalid(_) => {
panic!("Wrong phase {:?}", self.state);
}
Done(_) => Ok(None),
PartialBodyContentLength(total, mut seen) => {
let end = if total < self.buf.len() + seen {
// TODO: warn! more data than expected
total - seen
} else {
self.buf.len()
};
seen += end;
if seen >= total {
self.state = Done(seen);
} else {
self.state = PartialBodyContentLength(total, seen);
}
Ok(Some(self.buf.split_to(end).freeze()))
}
PartialBody(seen) => {
self.state = PartialBody(seen + self.buf.len());
Ok(Some(self.buf.split().freeze()))
}
}
}
pub fn finish(&mut self) -> Result<()> {
if let ParseState::PartialBody(seen) = self.state {
self.state = ParseState::Done(seen);
}
if !self.state.is_done() {
Error::e_explain(INCOMPLETE_BODY, format!("{:?}", self.state))
} else {
Ok(())
}
}
}
fn body_type(resp: &ResponseHeader) -> ParseState {
use http::StatusCode;
if matches!(
resp.status,
StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED
) {
// these status codes cannot have body by definition
return ParseState::Done(0);
}
if let Some(cl) = resp.headers.get(http::header::CONTENT_LENGTH) {
// ignore invalid header value
if let Some(cl) = std::str::from_utf8(cl.as_bytes())
.ok()
.and_then(|cl| cl.parse::<usize>().ok())
{
return if cl == 0 {
ParseState::Done(0)
} else {
ParseState::PartialBodyContentLength(cl, 0)
};
}
}
// HTTP/1.0 and chunked encoding are both treated as PartialBody
// The response body payload should _not_ be chunked encoded
// even if the Transfer-Encoding: chunked header is added
ParseState::PartialBody(0)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic_response() {
let input = b"HTTP/1.1 200 OK\r\n\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Header(header, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
assert!(!eos);
let body = b"abc";
let output = parser.inject_data(body).unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Body(data, _eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), &body[..]);
parser.finish().unwrap();
}
#[test]
fn test_partial_response_headers() {
let input = b"HTTP/1.1 200 OK\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
// header is not complete
assert_eq!(output.len(), 0);
let output = parser
.inject_data("Server: pingora\r\n\r\n".as_bytes())
.unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Header(header, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
assert_eq!(header.headers.get("Server").unwrap(), "pingora");
assert!(!eos);
}
#[test]
fn test_invalid_headers() {
let input = b"HTP/1.1 200 OK\r\nServer: pingora\r\n\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input);
// header is not complete
assert!(output.is_err());
match parser.state {
ParseState::Invalid(httparse::Error::Version) => {}
_ => panic!("should have failed to parse"),
}
}
#[test]
fn test_body_content_length() {
let input = b"HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\nabc";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 2);
let HttpTask::Header(header, _eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
let HttpTask::Body(data, eos) = &output[1] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), "abc");
assert!(!eos);
let output = parser.inject_data(b"def").unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Body(data, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), "def");
assert!(eos);
parser.finish().unwrap();
}
#[test]
fn test_body_chunked() {
let input = b"HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\nrust";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 2);
let HttpTask::Header(header, _eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
let HttpTask::Body(data, eos) = &output[1] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), "rust");
assert!(!eos);
parser.finish().unwrap();
}
#[test]
fn test_body_content_length_early() {
let input = b"HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\nabc";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 2);
let HttpTask::Header(header, _eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
let HttpTask::Body(data, eos) = &output[1] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), "abc");
assert!(!eos);
parser.finish().unwrap_err();
}
#[test]
fn test_body_content_length_more_data() {
let input = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nabc";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 2);
let HttpTask::Header(header, _eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
let HttpTask::Body(data, eos) = &output[1] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), "ab");
assert!(eos);
// extra data is dropped without error
parser.finish().unwrap();
}
#[test]
fn test_body_chunked_partial_chunk() {
let input = b"HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\nru";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 2);
let HttpTask::Header(header, _eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
let HttpTask::Body(data, eos) = &output[1] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), "ru");
assert!(!eos);
let output = parser.inject_data(b"st\r\n").unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Body(data, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(data.as_ref().unwrap(), "st\r\n");
assert!(!eos);
}
#[test]
fn test_no_body_content_length() {
let input = b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Header(header, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
assert!(eos);
parser.finish().unwrap();
}
#[test]
fn test_no_body_304_no_content_length() {
let input = b"HTTP/1.1 304 Not Modified\r\nCache-Control: public, max-age=10\r\n\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Header(header, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 304);
assert!(eos);
parser.finish().unwrap();
}
#[test]
fn test_204_with_chunked_body() {
let input = b"HTTP/1.1 204 No Content\r\nCache-Control: public, max-age=10\r\nTransfer-Encoding: chunked\r\n\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Header(header, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 204);
assert!(eos);
// 204 should not have a body, parser ignores bad input
let output = parser.inject_data(b"4\r\nrust\r\n0\r\n\r\n").unwrap();
assert!(output.is_empty());
parser.finish().unwrap();
}
#[test]
fn test_204_with_content_length() {
let input = b"HTTP/1.1 204 No Content\r\nCache-Control: public, max-age=10\r\nContent-Length: 4\r\n\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Header(header, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 204);
assert!(eos);
// 204 should not have a body, parser ignores bad input
let output = parser.inject_data(b"rust").unwrap();
assert!(output.is_empty());
parser.finish().unwrap();
}
#[test]
fn test_200_with_zero_content_length_more_data() {
let input = b"HTTP/1.1 200 OK\r\nCache-Control: public, max-age=10\r\nContent-Length: 0\r\n\r\n";
let mut parser = ResponseParse::new();
let output = parser.inject_data(input).unwrap();
assert_eq!(output.len(), 1);
let HttpTask::Header(header, eos) = &output[0] else {
panic!("{:?}", output);
};
assert_eq!(header.status, 200);
assert!(eos);
let output = parser.inject_data(b"rust").unwrap();
assert!(output.is_empty());
parser.finish().unwrap();
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/filters.rs | pingora-cache/src/filters.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Utility functions to help process HTTP headers for caching
use super::*;
use crate::cache_control::{CacheControl, Cacheable, InterpretCacheControl};
use crate::RespCacheable::*;
use cache_control::DELTA_SECONDS_OVERFLOW_VALUE;
use http::{header, HeaderValue};
use httpdate::HttpDate;
use log::debug;
use pingora_http::RequestHeader;
/// Decide if the request can be cacheable
pub fn request_cacheable(req_header: &ReqHeader) -> bool {
// TODO: the check is incomplete
matches!(req_header.method, Method::GET | Method::HEAD)
}
/// Decide if the response is cacheable.
///
/// `cache_control` is the parsed [CacheControl] from the response header. It is a standalone
/// argument so that caller has the flexibility to choose to use, change or ignore it.
pub fn resp_cacheable(
cache_control: Option<&CacheControl>,
mut resp_header: ResponseHeader,
authorization_present: bool,
defaults: &CacheMetaDefaults,
) -> RespCacheable {
let now = SystemTime::now();
let expire_time = calculate_fresh_until(
now,
cache_control,
&resp_header,
authorization_present,
defaults,
);
if let Some(fresh_until) = expire_time {
let (stale_while_revalidate_duration, stale_if_error_duration) =
calculate_serve_stale_durations(cache_control, defaults);
if let Some(cc) = cache_control {
cc.strip_private_headers(&mut resp_header);
}
return Cacheable(CacheMeta::new(
fresh_until,
now,
stale_while_revalidate_duration,
stale_if_error_duration,
resp_header,
));
}
Uncacheable(NoCacheReason::OriginNotCache)
}
/// Calculate the [SystemTime] at which the asset expires
///
/// Return None when not cacheable.
pub fn calculate_fresh_until(
now: SystemTime,
cache_control: Option<&CacheControl>,
resp_header: &RespHeader,
authorization_present: bool,
defaults: &CacheMetaDefaults,
) -> Option<SystemTime> {
fn freshness_ttl_to_time(now: SystemTime, fresh: Duration) -> Option<SystemTime> {
if fresh.is_zero() {
// ensure that the response is treated as stale
now.checked_sub(Duration::from_secs(1))
} else {
now.checked_add(fresh)
}
}
// A request with Authorization is normally not cacheable, unless Cache-Control allows it
if authorization_present {
let uncacheable = cache_control
.as_ref()
.is_none_or(|cc| !cc.allow_caching_authorized_req());
if uncacheable {
return None;
}
}
let uncacheable = cache_control
.as_ref()
.is_some_and(|cc| cc.is_cacheable() == Cacheable::No);
if uncacheable {
return None;
}
// For TTL check cache-control first, then expires header, then defaults
cache_control
.and_then(|cc| {
cc.fresh_duration()
.and_then(|ttl| freshness_ttl_to_time(now, ttl))
})
.or_else(|| calculate_expires_header_time(resp_header))
.or_else(|| {
defaults
.fresh_sec(resp_header.status)
.and_then(|ttl| freshness_ttl_to_time(now, ttl))
})
}
/// Calculate the expire time from the `Expires` header only
pub fn calculate_expires_header_time(resp_header: &RespHeader) -> Option<SystemTime> {
// according to RFC 7234:
// https://datatracker.ietf.org/doc/html/rfc7234#section-4.2.1
// - treat multiple expires headers as invalid
// https://datatracker.ietf.org/doc/html/rfc7234#section-5.3
// - "MUST interpret invalid date formats... as representing a time in the past"
fn parse_expires_value(expires_value: &HeaderValue) -> Option<SystemTime> {
let expires = expires_value.to_str().ok()?;
Some(SystemTime::from(
expires
.parse::<HttpDate>()
.map_err(|e| debug!("Invalid HttpDate in Expires: {}, error: {}", expires, e))
.ok()?,
))
}
let mut expires_iter = resp_header.headers.get_all("expires").iter();
let expires_header = expires_iter.next();
if expires_header.is_none() || expires_iter.next().is_some() {
return None;
}
parse_expires_value(expires_header.unwrap()).or(Some(SystemTime::UNIX_EPOCH))
}
/// Calculates stale-while-revalidate and stale-if-error seconds from Cache-Control or the [CacheMetaDefaults].
pub fn calculate_serve_stale_durations(
cache_control: Option<&impl InterpretCacheControl>,
defaults: &CacheMetaDefaults,
) -> (u32, u32) {
let serve_stale_while_revalidate = cache_control
.and_then(|cc| cc.serve_stale_while_revalidate_duration())
.unwrap_or_else(|| Duration::from_secs(defaults.serve_stale_while_revalidate_sec() as u64));
let serve_stale_if_error = cache_control
.and_then(|cc| cc.serve_stale_if_error_duration())
.unwrap_or_else(|| Duration::from_secs(defaults.serve_stale_if_error_sec() as u64));
(
serve_stale_while_revalidate
.as_secs()
.try_into()
.unwrap_or(DELTA_SECONDS_OVERFLOW_VALUE),
serve_stale_if_error
.as_secs()
.try_into()
.unwrap_or(DELTA_SECONDS_OVERFLOW_VALUE),
)
}
/// Filters to run when sending requests to upstream
pub mod upstream {
use super::*;
/// Adjust the request header for cacheable requests
///
/// This filter does the following in order to fetch the entire response to cache
/// - Convert HEAD to GET
/// - `If-*` headers are removed
/// - `Range` header is removed
///
/// When `meta` is set, this function will inject `If-modified-since` according to the `Last-Modified` header
/// and inject `If-none-match` according to `Etag` header
pub fn request_filter(req: &mut RequestHeader, meta: Option<&CacheMeta>) {
// change HEAD to GET, HEAD itself is not semantically cacheable
if req.method == Method::HEAD {
req.set_method(Method::GET);
}
// remove downstream precondition headers https://datatracker.ietf.org/doc/html/rfc7232#section-3
// we'd like to cache the 200 not the 304
req.remove_header(&header::IF_MATCH);
req.remove_header(&header::IF_NONE_MATCH);
req.remove_header(&header::IF_MODIFIED_SINCE);
req.remove_header(&header::IF_UNMODIFIED_SINCE);
// see below range header
req.remove_header(&header::IF_RANGE);
// remove downstream range header as we'd like to cache the entire response (this might change in the future)
req.remove_header(&header::RANGE);
// we have a presumably staled response already, add precondition headers for revalidation
if let Some(m) = meta {
// rfc7232: "SHOULD send both validators in cache validation" but
// there have been weird cases that an origin has matching etag but not Last-Modified
if let Some(since) = m.headers().get(&header::LAST_MODIFIED) {
req.insert_header(header::IF_MODIFIED_SINCE, since).unwrap();
}
if let Some(etag) = m.headers().get(&header::ETAG) {
req.insert_header(header::IF_NONE_MATCH, etag).unwrap();
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::RespCacheable::Cacheable;
use http::header::{HeaderName, CACHE_CONTROL, EXPIRES, SET_COOKIE};
use http::StatusCode;
use httpdate::fmt_http_date;
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
const DEFAULTS: CacheMetaDefaults = CacheMetaDefaults::new(
|status| {
match status {
StatusCode::OK => Some(10),
StatusCode::NOT_FOUND => Some(5),
StatusCode::PARTIAL_CONTENT => None,
_ => Some(1),
}
.map(Duration::from_secs)
},
0,
DELTA_SECONDS_OVERFLOW_VALUE, /* "infinite" stale-if-error */
);
// Cache nothing, by default
const BYPASS_CACHE_DEFAULTS: CacheMetaDefaults = CacheMetaDefaults::new(|_| None, 0, 0);
fn build_response(status: u16, headers: &[(HeaderName, &str)]) -> ResponseHeader {
let mut header = ResponseHeader::build(status, Some(headers.len())).unwrap();
for (k, v) in headers {
header.append_header(k.to_string(), *v).unwrap();
}
header
}
fn resp_cacheable_wrapper(
resp: ResponseHeader,
defaults: &CacheMetaDefaults,
authorization_present: bool,
) -> Option<CacheMeta> {
if let Cacheable(meta) = resp_cacheable(
CacheControl::from_resp_headers(&resp).as_ref(),
resp,
authorization_present,
defaults,
) {
Some(meta)
} else {
None
}
}
#[test]
fn test_resp_cacheable() {
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "max-age=12345")]),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
assert!(meta.is_fresh(SystemTime::now()));
assert!(meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(12))
.unwrap()
),);
assert!(!meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(12346))
.unwrap()
));
}
#[test]
fn test_resp_uncacheable_directives() {
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "private, max-age=12345")]),
&DEFAULTS,
false,
);
assert!(meta.is_none());
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "no-store, max-age=12345")]),
&DEFAULTS,
false,
);
assert!(meta.is_none());
}
#[test]
fn test_resp_cache_authorization() {
let meta = resp_cacheable_wrapper(build_response(200, &[]), &DEFAULTS, true);
assert!(meta.is_none());
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "max-age=10")]),
&DEFAULTS,
true,
);
assert!(meta.is_none());
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "s-maxage=10")]),
&DEFAULTS,
true,
);
assert!(meta.unwrap().is_fresh(SystemTime::now()));
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "public, max-age=10")]),
&DEFAULTS,
true,
);
assert!(meta.unwrap().is_fresh(SystemTime::now()));
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "must-revalidate")]),
&DEFAULTS,
true,
);
assert!(meta.unwrap().is_fresh(SystemTime::now()));
}
#[test]
fn test_resp_zero_max_age() {
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "max-age=0, public")]),
&DEFAULTS,
false,
);
// cacheable, but needs revalidation
assert!(!meta.unwrap().is_fresh(SystemTime::now()));
}
#[test]
fn test_resp_expires() {
let five_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(5))
.unwrap();
// future expires is cacheable
let meta = resp_cacheable_wrapper(
build_response(200, &[(EXPIRES, &fmt_http_date(five_sec_time))]),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
assert!(meta.is_fresh(SystemTime::now()));
assert!(!meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(6))
.unwrap()
));
// even on default uncacheable statuses
let meta = resp_cacheable_wrapper(
build_response(206, &[(EXPIRES, &fmt_http_date(five_sec_time))]),
&DEFAULTS,
false,
);
assert!(meta.is_some());
}
#[test]
fn test_resp_past_expires() {
// cacheable, but expired
let meta = resp_cacheable_wrapper(
build_response(200, &[(EXPIRES, "Fri, 15 May 2015 15:34:21 GMT")]),
&BYPASS_CACHE_DEFAULTS,
false,
);
assert!(!meta.unwrap().is_fresh(SystemTime::now()));
}
#[test]
fn test_resp_nonstandard_expires() {
// init log to allow inspecting warnings
init_log();
// invalid cases, according to parser
// (but should be stale according to RFC)
let meta = resp_cacheable_wrapper(
build_response(200, &[(EXPIRES, "Mon, 13 Feb 0002 12:00:00 GMT")]),
&BYPASS_CACHE_DEFAULTS,
false,
);
assert!(!meta.unwrap().is_fresh(SystemTime::now()));
let meta = resp_cacheable_wrapper(
build_response(200, &[(EXPIRES, "Fri, 01 Dec 99999 16:00:00 GMT")]),
&BYPASS_CACHE_DEFAULTS,
false,
);
assert!(!meta.unwrap().is_fresh(SystemTime::now()));
let meta = resp_cacheable_wrapper(
build_response(200, &[(EXPIRES, "0")]),
&BYPASS_CACHE_DEFAULTS,
false,
);
assert!(!meta.unwrap().is_fresh(SystemTime::now()));
}
#[test]
fn test_resp_multiple_expires() {
let five_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(5))
.unwrap();
let ten_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(10))
.unwrap();
// multiple expires = uncacheable
let meta = resp_cacheable_wrapper(
build_response(
200,
&[
(EXPIRES, &fmt_http_date(five_sec_time)),
(EXPIRES, &fmt_http_date(ten_sec_time)),
],
),
&BYPASS_CACHE_DEFAULTS,
false,
);
assert!(meta.is_none());
// unless the default is cacheable
let meta = resp_cacheable_wrapper(
build_response(
200,
&[
(EXPIRES, &fmt_http_date(five_sec_time)),
(EXPIRES, &fmt_http_date(ten_sec_time)),
],
),
&DEFAULTS,
false,
);
assert!(meta.is_some());
}
#[test]
fn test_resp_cache_control_with_expires() {
let five_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(5))
.unwrap();
// cache-control takes precedence over expires
let meta = resp_cacheable_wrapper(
build_response(
200,
&[
(EXPIRES, &fmt_http_date(five_sec_time)),
(CACHE_CONTROL, "max-age=0"),
],
),
&DEFAULTS,
false,
);
assert!(!meta.unwrap().is_fresh(SystemTime::now()));
}
#[test]
fn test_resp_stale_while_revalidate() {
// respect defaults
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "max-age=10")]),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
let eleven_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(11))
.unwrap();
assert!(!meta.is_fresh(eleven_sec_time));
assert!(!meta.serve_stale_while_revalidate(SystemTime::now()));
assert!(!meta.serve_stale_while_revalidate(eleven_sec_time));
// override with stale-while-revalidate
let meta = resp_cacheable_wrapper(
build_response(
200,
&[(CACHE_CONTROL, "max-age=10, stale-while-revalidate=5")],
),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
let eleven_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(11))
.unwrap();
let sixteen_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(16))
.unwrap();
assert!(!meta.is_fresh(eleven_sec_time));
assert!(meta.serve_stale_while_revalidate(eleven_sec_time));
assert!(!meta.serve_stale_while_revalidate(sixteen_sec_time));
}
#[test]
fn test_resp_stale_if_error() {
// respect defaults
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "max-age=10")]),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
let fifty_years_time = SystemTime::now()
.checked_add(Duration::from_secs(86400 * 365 * 50))
.unwrap();
assert!(!meta.is_fresh(fifty_years_time));
assert!(meta.serve_stale_if_error(fifty_years_time));
// override with stale-if-error
let meta = resp_cacheable_wrapper(
build_response(
200,
&[(
CACHE_CONTROL,
"max-age=10, stale-while-revalidate=5, stale-if-error=60",
)],
),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
let eleven_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(11))
.unwrap();
let seventy_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(70))
.unwrap();
assert!(!meta.is_fresh(eleven_sec_time));
assert!(meta.serve_stale_if_error(SystemTime::now()));
assert!(meta.serve_stale_if_error(eleven_sec_time));
assert!(!meta.serve_stale_if_error(seventy_sec_time));
// never serve stale
let meta = resp_cacheable_wrapper(
build_response(200, &[(CACHE_CONTROL, "max-age=10, stale-if-error=0")]),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
let eleven_sec_time = SystemTime::now()
.checked_add(Duration::from_secs(11))
.unwrap();
assert!(!meta.is_fresh(eleven_sec_time));
assert!(!meta.serve_stale_if_error(eleven_sec_time));
}
#[test]
fn test_resp_status_cache_defaults() {
// 200 response
let meta = resp_cacheable_wrapper(build_response(200, &[]), &DEFAULTS, false);
assert!(meta.is_some());
let meta = meta.unwrap();
assert!(meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(9))
.unwrap()
));
assert!(!meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(11))
.unwrap()
));
// 404 response, different ttl
let meta = resp_cacheable_wrapper(build_response(404, &[]), &DEFAULTS, false);
assert!(meta.is_some());
let meta = meta.unwrap();
assert!(meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(4))
.unwrap()
));
assert!(!meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(6))
.unwrap()
));
// 206 marked uncacheable (no cache TTL)
let meta = resp_cacheable_wrapper(build_response(206, &[]), &DEFAULTS, false);
assert!(meta.is_none());
// default uncacheable status with explicit Cache-Control is cacheable
let meta = resp_cacheable_wrapper(
build_response(206, &[(CACHE_CONTROL, "public, max-age=10")]),
&DEFAULTS,
false,
);
assert!(meta.is_some());
let meta = meta.unwrap();
assert!(meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(9))
.unwrap()
));
assert!(!meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(11))
.unwrap()
));
// 416 matches any status
let meta = resp_cacheable_wrapper(build_response(416, &[]), &DEFAULTS, false);
assert!(meta.is_some());
let meta = meta.unwrap();
assert!(meta.is_fresh(SystemTime::now()));
assert!(!meta.is_fresh(
SystemTime::now()
.checked_add(Duration::from_secs(2))
.unwrap()
));
}
#[test]
fn test_resp_cache_no_cache_fields() {
// check #field-names are stripped from the cache header
let meta = resp_cacheable_wrapper(
build_response(
200,
&[
(SET_COOKIE, "my-cookie"),
(CACHE_CONTROL, "private=\"something\", max-age=10"),
(HeaderName::from_bytes(b"Something").unwrap(), "foo"),
],
),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
assert!(meta.headers().contains_key(SET_COOKIE));
assert!(!meta.headers().contains_key("Something"));
let meta = resp_cacheable_wrapper(
build_response(
200,
&[
(SET_COOKIE, "my-cookie"),
(
CACHE_CONTROL,
"max-age=0, no-cache=\"meta1, SeT-Cookie ,meta2\"",
),
(HeaderName::from_bytes(b"meta1").unwrap(), "foo"),
],
),
&DEFAULTS,
false,
);
let meta = meta.unwrap();
assert!(!meta.headers().contains_key(SET_COOKIE));
assert!(!meta.headers().contains_key("meta1"));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/cache_control.rs | pingora-cache/src/cache_control.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and utilities to help parse Cache-Control headers
use super::*;
use http::header::HeaderName;
use http::HeaderValue;
use indexmap::IndexMap;
use once_cell::sync::Lazy;
use pingora_error::{Error, ErrorType};
use regex::bytes::Regex;
use std::num::IntErrorKind;
use std::slice;
use std::str;
/// The max delta-second per [RFC 9111](https://datatracker.ietf.org/doc/html/rfc9111#section-1.2.2)
// "If a cache receives a delta-seconds value
// greater than the greatest integer it can represent, or if any of its
// subsequent calculations overflows, the cache MUST consider the value
// to be 2147483648 (2^31) or the greatest positive integer it can
// conveniently represent.
//
// | *Note:* The value 2147483648 is here for historical reasons,
// | represents infinity (over 68 years), and does not need to be
// | stored in binary form; an implementation could produce it as a
// | string if any overflow occurs, even if the calculations are
// | performed with an arithmetic type incapable of directly
// | representing that number. What matters here is that an
// | overflow be detected and not treated as a negative value in
// | later calculations."
//
// We choose to use i32::MAX for our overflow value to stick to the letter of the RFC.
pub const DELTA_SECONDS_OVERFLOW_VALUE: u32 = i32::MAX as u32;
pub const DELTA_SECONDS_OVERFLOW_DURATION: Duration =
Duration::from_secs(DELTA_SECONDS_OVERFLOW_VALUE as u64);
/// Cache control directive key type
pub type DirectiveKey = String;
/// Cache control directive value type
#[derive(Debug)]
pub struct DirectiveValue(pub Vec<u8>);
impl AsRef<[u8]> for DirectiveValue {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl DirectiveValue {
/// A [DirectiveValue] without quotes (`"`).
pub fn parse_as_bytes(&self) -> &[u8] {
self.0
.strip_prefix(b"\"")
.and_then(|bytes| bytes.strip_suffix(b"\""))
.unwrap_or(&self.0[..])
}
/// A [DirectiveValue] without quotes (`"`) as `str`.
pub fn parse_as_str(&self) -> Result<&str> {
str::from_utf8(self.parse_as_bytes()).or_else(|e| {
Error::e_because(ErrorType::InternalError, "could not parse value as utf8", e)
})
}
/// Parse the [DirectiveValue] as delta seconds
///
/// `"`s are ignored. The value is capped to [DELTA_SECONDS_OVERFLOW_VALUE].
pub fn parse_as_delta_seconds(&self) -> Result<u32> {
match self.parse_as_str()?.parse::<u32>() {
Ok(value) => Ok(value),
Err(e) => {
// delta-seconds expect to handle positive overflow gracefully
if e.kind() == &IntErrorKind::PosOverflow {
Ok(DELTA_SECONDS_OVERFLOW_VALUE)
} else {
Error::e_because(ErrorType::InternalError, "could not parse value as u32", e)
}
}
}
}
}
/// An ordered map to store cache control key value pairs.
pub type DirectiveMap = IndexMap<DirectiveKey, Option<DirectiveValue>>;
/// Parsed Cache-Control directives
#[derive(Debug)]
pub struct CacheControl {
/// The parsed directives
pub directives: DirectiveMap,
}
/// Cacheability calculated from cache control.
#[derive(Debug, PartialEq, Eq)]
pub enum Cacheable {
/// Cacheable
Yes,
/// Not cacheable
No,
/// No directive found for explicit cacheability
Default,
}
/// An iter over all the cache control directives
pub struct ListValueIter<'a>(slice::Split<'a, u8, fn(&u8) -> bool>);
impl<'a> ListValueIter<'a> {
pub fn from(value: &'a DirectiveValue) -> Self {
ListValueIter(value.parse_as_bytes().split(|byte| byte == &b','))
}
}
// https://datatracker.ietf.org/doc/html/rfc9110#name-whitespace
// optional whitespace OWS = *(SP / HTAB); SP = 0x20, HTAB = 0x09
fn trim_ows(bytes: &[u8]) -> &[u8] {
fn not_ows(b: &u8) -> bool {
b != &b'\x20' && b != &b'\x09'
}
// find first non-OWS char from front (head) and from end (tail)
let head = bytes.iter().position(not_ows).unwrap_or(0);
let tail = bytes
.iter()
.rposition(not_ows)
.map(|rpos| rpos + 1)
.unwrap_or(head);
&bytes[head..tail]
}
impl<'a> Iterator for ListValueIter<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
Some(trim_ows(self.0.next()?))
}
}
// Originally from https://github.com/hapijs/wreck which has the following comments:
// Cache-Control = 1#cache-directive
// cache-directive = token [ "=" ( token / quoted-string ) ]
// token = [^\x00-\x20\(\)<>@\,;\:\\"\/\[\]\?\=\{\}\x7F]+
// quoted-string = "(?:[^"\\]|\\.)*"
//
// note the `token` implementation excludes disallowed ASCII ranges
// and disallowed delimiters: https://datatracker.ietf.org/doc/html/rfc9110#section-5.6.2
// though it does not forbid `obs-text`: %x80-FF
static RE_CACHE_DIRECTIVE: Lazy<Regex> =
// to break our version down further:
// `(?-u)`: unicode support disabled, which puts the regex into "ASCII compatible mode" for specifying literal bytes like \x7F: https://docs.rs/regex/1.10.4/regex/bytes/index.html#syntax
// `(?:^|(?:\s*[,;]\s*)`: allow either , or ; as a delimiter
// `([^\x00-\x20\(\)<>@,;:\\"/\[\]\?=\{\}\x7F]+)`: token (directive name capture group)
// `(?:=((?:[^\x00-\x20\(\)<>@,;:\\"/\[\]\?=\{\}\x7F]+|(?:"(?:[^"\\]|\\.)*"))))`: token OR quoted-string (directive value capture-group)
Lazy::new(|| {
Regex::new(r#"(?-u)(?:^|(?:\s*[,;]\s*))([^\x00-\x20\(\)<>@,;:\\"/\[\]\?=\{\}\x7F]+)(?:=((?:[^\x00-\x20\(\)<>@,;:\\"/\[\]\?=\{\}\x7F]+|(?:"(?:[^"\\]|\\.)*"))))?"#).unwrap()
});
impl CacheControl {
// Our parsing strategy is more permissive than the RFC in a few ways:
// - Allows semicolons as delimiters (in addition to commas). See the regex above.
// - Allows octets outside of visible ASCII in `token`s, and in later RFCs, octets outside of
// the `quoted-string` range: https://datatracker.ietf.org/doc/html/rfc9110#section-5.6.2
// See the regex above.
// - Doesn't require no-value for "boolean directives," such as must-revalidate
// - Allows quoted-string format for numeric values.
fn from_headers(headers: http::header::GetAll<HeaderValue>) -> Option<Self> {
let mut directives = IndexMap::new();
// should iterate in header line insertion order
for line in headers {
for captures in RE_CACHE_DIRECTIVE.captures_iter(line.as_bytes()) {
// directive key
// header values don't have to be utf-8, but we store keys as strings for case-insensitive hashing
let key = captures.get(1).and_then(|cap| {
str::from_utf8(cap.as_bytes())
.ok()
.map(|token| token.to_lowercase())
});
if key.is_none() {
continue;
}
// directive value
// match token or quoted-string
let value = captures
.get(2)
.map(|cap| DirectiveValue(cap.as_bytes().to_vec()));
directives.insert(key.unwrap(), value);
}
}
Some(CacheControl { directives })
}
/// Parse from the given header name in `headers`
pub fn from_headers_named(header_name: &str, headers: &http::HeaderMap) -> Option<Self> {
if !headers.contains_key(header_name) {
return None;
}
Self::from_headers(headers.get_all(header_name))
}
/// Parse from the given header name in the [ReqHeader]
pub fn from_req_headers_named(header_name: &str, req_header: &ReqHeader) -> Option<Self> {
Self::from_headers_named(header_name, &req_header.headers)
}
/// Parse `Cache-Control` header name from the [ReqHeader]
pub fn from_req_headers(req_header: &ReqHeader) -> Option<Self> {
Self::from_req_headers_named("cache-control", req_header)
}
/// Parse from the given header name in the [RespHeader]
pub fn from_resp_headers_named(header_name: &str, resp_header: &RespHeader) -> Option<Self> {
Self::from_headers_named(header_name, &resp_header.headers)
}
/// Parse `Cache-Control` header name from the [RespHeader]
pub fn from_resp_headers(resp_header: &RespHeader) -> Option<Self> {
Self::from_resp_headers_named("cache-control", resp_header)
}
/// Whether the given directive is in the cache control.
pub fn has_key(&self, key: &str) -> bool {
self.directives.contains_key(key)
}
/// Whether the `public` directive is in the cache control.
pub fn public(&self) -> bool {
self.has_key("public")
}
/// Whether the given directive exists, and it has no value.
fn has_key_without_value(&self, key: &str) -> bool {
matches!(self.directives.get(key), Some(None))
}
/// Whether the standalone `private` exists in the cache control
// RFC 7234: using the #field-name versions of `private`
// means a shared cache "MUST NOT store the specified field-name(s),
// whereas it MAY store the remainder of the response."
// It must be a boolean form (no value) to apply to the whole response.
// https://datatracker.ietf.org/doc/html/rfc7234#section-5.2.2.6
pub fn private(&self) -> bool {
self.has_key_without_value("private")
}
fn get_field_names(&self, key: &str) -> Option<ListValueIter<'_>> {
let value = self.directives.get(key)?.as_ref()?;
Some(ListValueIter::from(value))
}
/// Get the values of `private=`
pub fn private_field_names(&self) -> Option<ListValueIter<'_>> {
self.get_field_names("private")
}
/// Whether the standalone `no-cache` exists in the cache control
pub fn no_cache(&self) -> bool {
self.has_key_without_value("no-cache")
}
/// Get the values of `no-cache=`
pub fn no_cache_field_names(&self) -> Option<ListValueIter<'_>> {
self.get_field_names("no-cache")
}
/// Whether `no-store` exists.
pub fn no_store(&self) -> bool {
self.has_key("no-store")
}
fn parse_delta_seconds(&self, key: &str) -> Result<Option<u32>> {
if let Some(Some(dir_value)) = self.directives.get(key) {
Ok(Some(dir_value.parse_as_delta_seconds()?))
} else {
Ok(None)
}
}
/// Return the `max-age` seconds
pub fn max_age(&self) -> Result<Option<u32>> {
self.parse_delta_seconds("max-age")
}
/// Return the `s-maxage` seconds
pub fn s_maxage(&self) -> Result<Option<u32>> {
self.parse_delta_seconds("s-maxage")
}
/// Return the `stale-while-revalidate` seconds
pub fn stale_while_revalidate(&self) -> Result<Option<u32>> {
self.parse_delta_seconds("stale-while-revalidate")
}
/// Return the `stale-if-error` seconds
pub fn stale_if_error(&self) -> Result<Option<u32>> {
self.parse_delta_seconds("stale-if-error")
}
/// Whether `must-revalidate` exists.
pub fn must_revalidate(&self) -> bool {
self.has_key("must-revalidate")
}
/// Whether `proxy-revalidate` exists.
pub fn proxy_revalidate(&self) -> bool {
self.has_key("proxy-revalidate")
}
/// Whether `only-if-cached` exists.
pub fn only_if_cached(&self) -> bool {
self.has_key("only-if-cached")
}
}
impl InterpretCacheControl for CacheControl {
fn is_cacheable(&self) -> Cacheable {
if self.no_store() || self.private() {
return Cacheable::No;
}
if self.has_key("s-maxage") || self.has_key("max-age") || self.public() {
return Cacheable::Yes;
}
Cacheable::Default
}
fn allow_caching_authorized_req(&self) -> bool {
// RFC 7234 https://datatracker.ietf.org/doc/html/rfc7234#section-3
// "MUST NOT" store requests with Authorization header
// unless response contains one of these directives
self.must_revalidate() || self.public() || self.has_key("s-maxage")
}
fn fresh_duration(&self) -> Option<Duration> {
if self.no_cache() {
// always treated as stale
return Some(Duration::ZERO);
}
let seconds = self
.s_maxage()
.ok()?
// s-maxage not present
.or_else(|| self.max_age().unwrap_or(None))
.map(|duration| Duration::from_secs(duration as u64))?;
Some(seconds)
}
fn serve_stale_while_revalidate_duration(&self) -> Option<Duration> {
// RFC 7234: these directives forbid serving stale.
// https://datatracker.ietf.org/doc/html/rfc7234#section-4.2.4
if self.must_revalidate() || self.proxy_revalidate() || self.has_key("s-maxage") {
return Some(Duration::ZERO);
}
self.stale_while_revalidate()
.unwrap_or(None)
.map(|secs| Duration::from_secs(secs as u64))
}
fn serve_stale_if_error_duration(&self) -> Option<Duration> {
if self.must_revalidate() || self.proxy_revalidate() || self.has_key("s-maxage") {
return Some(Duration::ZERO);
}
self.stale_if_error()
.unwrap_or(None)
.map(|secs| Duration::from_secs(secs as u64))
}
// Strip header names listed in `private` or `no-cache` directives from a response.
fn strip_private_headers(&self, resp_header: &mut ResponseHeader) {
fn strip_listed_headers(resp: &mut ResponseHeader, field_names: ListValueIter) {
for name in field_names {
if let Ok(header) = HeaderName::from_bytes(name) {
resp.remove_header(&header);
}
}
}
if let Some(headers) = self.private_field_names() {
strip_listed_headers(resp_header, headers);
}
// We interpret `no-cache` the same way as `private`,
// though technically it has a less restrictive requirement
// ("MUST NOT be sent in the response to a subsequent request
// without successful revalidation with the origin server").
// https://datatracker.ietf.org/doc/html/rfc7234#section-5.2.2.2
if let Some(headers) = self.no_cache_field_names() {
strip_listed_headers(resp_header, headers);
}
}
}
/// `InterpretCacheControl` provides a meaningful interface to the parsed `CacheControl`.
/// These functions actually interpret the parsed cache-control directives to return
/// the freshness or other cache meta values that cache-control is signaling.
///
/// By default `CacheControl` implements an RFC-7234 compliant reading that assumes it is being
/// used with a shared (proxy) cache.
pub trait InterpretCacheControl {
/// Does cache-control specify this response is cacheable?
///
/// Note that an RFC-7234 compliant cacheability check must also
/// check if the request contained the Authorization header and
/// `allow_caching_authorized_req`.
fn is_cacheable(&self) -> Cacheable;
/// Does this cache-control allow caching a response to
/// a request with the Authorization header?
fn allow_caching_authorized_req(&self) -> bool;
/// Returns freshness ttl specified in cache-control
///
/// - `Some(_)` indicates cache-control specifies a valid ttl. Some(Duration::ZERO) = always stale.
/// - `None` means cache-control did not specify a valid ttl.
fn fresh_duration(&self) -> Option<Duration>;
/// Returns stale-while-revalidate ttl,
///
/// The result should consider all the relevant cache directives, not just SWR header itself.
///
/// Some(0) means serving such stale is disallowed by directive like `must-revalidate`
/// or `stale-while-revalidater=0`.
///
/// `None` indicates no SWR ttl was specified.
fn serve_stale_while_revalidate_duration(&self) -> Option<Duration>;
/// Returns stale-if-error ttl,
///
/// The result should consider all the relevant cache directives, not just SIE header itself.
///
/// Some(0) means serving such stale is disallowed by directive like `must-revalidate`
/// or `stale-if-error=0`.
///
/// `None` indicates no SIE ttl was specified.
fn serve_stale_if_error_duration(&self) -> Option<Duration>;
/// Strip header names listed in `private` or `no-cache` directives from a response,
/// usually prior to storing that response in cache.
fn strip_private_headers(&self, resp_header: &mut ResponseHeader);
}
#[cfg(test)]
mod tests {
use super::*;
use http::header::CACHE_CONTROL;
use http::{request, response};
fn build_response(cc_key: HeaderName, cc_value: &str) -> response::Parts {
let (parts, _) = response::Builder::new()
.header(cc_key, cc_value)
.body(())
.unwrap()
.into_parts();
parts
}
#[test]
fn test_simple_cache_control() {
let resp = build_response(CACHE_CONTROL, "public, max-age=10000");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.public());
assert_eq!(cc.max_age().unwrap().unwrap(), 10000);
}
#[test]
fn test_private_cache_control() {
let resp = build_response(CACHE_CONTROL, "private");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.private());
assert!(cc.max_age().unwrap().is_none());
}
#[test]
fn test_directives_across_header_lines() {
let (parts, _) = response::Builder::new()
.header(CACHE_CONTROL, "public,")
.header("cache-Control", "max-age=10000")
.body(())
.unwrap()
.into_parts();
let cc = CacheControl::from_resp_headers(&parts).unwrap();
assert!(cc.public());
assert_eq!(cc.max_age().unwrap().unwrap(), 10000);
}
#[test]
fn test_recognizes_semicolons_as_delimiters() {
let resp = build_response(CACHE_CONTROL, "public; max-age=0");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.public());
assert_eq!(cc.max_age().unwrap().unwrap(), 0);
}
#[test]
fn test_unknown_directives() {
let resp = build_response(CACHE_CONTROL, "public,random1=random2, rand3=\"\"");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
let mut directive_iter = cc.directives.iter();
let first = directive_iter.next().unwrap();
assert_eq!(first.0, &"public");
assert!(first.1.is_none());
let second = directive_iter.next().unwrap();
assert_eq!(second.0, &"random1");
assert_eq!(second.1.as_ref().unwrap().0, "random2".as_bytes());
let third = directive_iter.next().unwrap();
assert_eq!(third.0, &"rand3");
assert_eq!(third.1.as_ref().unwrap().0, "\"\"".as_bytes());
assert!(directive_iter.next().is_none());
}
#[test]
fn test_case_insensitive_directive_keys() {
let resp = build_response(
CACHE_CONTROL,
"Public=\"something\", mAx-AGe=\"10000\", foo=cRaZyCaSe, bAr=\"inQuotes\"",
);
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.public());
assert_eq!(cc.max_age().unwrap().unwrap(), 10000);
let mut directive_iter = cc.directives.iter();
let first = directive_iter.next().unwrap();
assert_eq!(first.0, &"public");
assert_eq!(first.1.as_ref().unwrap().0, "\"something\"".as_bytes());
let second = directive_iter.next().unwrap();
assert_eq!(second.0, &"max-age");
assert_eq!(second.1.as_ref().unwrap().0, "\"10000\"".as_bytes());
// values are still stored with casing
let third = directive_iter.next().unwrap();
assert_eq!(third.0, &"foo");
assert_eq!(third.1.as_ref().unwrap().0, "cRaZyCaSe".as_bytes());
let fourth = directive_iter.next().unwrap();
assert_eq!(fourth.0, &"bar");
assert_eq!(fourth.1.as_ref().unwrap().0, "\"inQuotes\"".as_bytes());
assert!(directive_iter.next().is_none());
}
#[test]
fn test_non_ascii() {
let resp = build_response(CACHE_CONTROL, "püblic=💖, max-age=\"💯\"");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
// Not considered valid registered directive keys / values
assert!(!cc.public());
assert_eq!(
cc.max_age().unwrap_err().context.unwrap().to_string(),
"could not parse value as u32"
);
let mut directive_iter = cc.directives.iter();
let first = directive_iter.next().unwrap();
assert_eq!(first.0, &"püblic");
assert_eq!(first.1.as_ref().unwrap().0, "💖".as_bytes());
let second = directive_iter.next().unwrap();
assert_eq!(second.0, &"max-age");
assert_eq!(second.1.as_ref().unwrap().0, "\"💯\"".as_bytes());
assert!(directive_iter.next().is_none());
}
#[test]
fn test_non_utf8_key() {
let mut resp = response::Builder::new().body(()).unwrap();
resp.headers_mut().insert(
CACHE_CONTROL,
HeaderValue::from_bytes(b"bar\xFF=\"baz\", a=b").unwrap(),
);
let (parts, _) = resp.into_parts();
let cc = CacheControl::from_resp_headers(&parts).unwrap();
// invalid bytes for key
let mut directive_iter = cc.directives.iter();
let first = directive_iter.next().unwrap();
assert_eq!(first.0, &"a");
assert_eq!(first.1.as_ref().unwrap().0, "b".as_bytes());
assert!(directive_iter.next().is_none());
}
#[test]
fn test_non_utf8_value() {
// RFC 7230: 0xFF is part of obs-text and is officially considered a valid octet in quoted-strings
let mut resp = response::Builder::new().body(()).unwrap();
resp.headers_mut().insert(
CACHE_CONTROL,
HeaderValue::from_bytes(b"max-age=ba\xFFr, bar=\"baz\xFF\", a=b").unwrap(),
);
let (parts, _) = resp.into_parts();
let cc = CacheControl::from_resp_headers(&parts).unwrap();
assert_eq!(
cc.max_age().unwrap_err().context.unwrap().to_string(),
"could not parse value as utf8"
);
let mut directive_iter = cc.directives.iter();
let first = directive_iter.next().unwrap();
assert_eq!(first.0, &"max-age");
assert_eq!(first.1.as_ref().unwrap().0, b"ba\xFFr");
let second = directive_iter.next().unwrap();
assert_eq!(second.0, &"bar");
assert_eq!(second.1.as_ref().unwrap().0, b"\"baz\xFF\"");
let third = directive_iter.next().unwrap();
assert_eq!(third.0, &"a");
assert_eq!(third.1.as_ref().unwrap().0, "b".as_bytes());
assert!(directive_iter.next().is_none());
}
#[test]
fn test_age_overflow() {
let resp = build_response(
CACHE_CONTROL,
"max-age=-99999999999999999999999999, s-maxage=99999999999999999999999999",
);
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(
cc.s_maxage().unwrap().unwrap(),
DELTA_SECONDS_OVERFLOW_VALUE
);
// negative ages still result in errors even with overflow handling
assert_eq!(
cc.max_age().unwrap_err().context.unwrap().to_string(),
"could not parse value as u32"
);
}
#[test]
fn test_fresh_sec() {
let resp = build_response(CACHE_CONTROL, "");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.fresh_duration().is_none());
let resp = build_response(CACHE_CONTROL, "max-age=12345");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.fresh_duration().unwrap(), Duration::from_secs(12345));
let resp = build_response(CACHE_CONTROL, "max-age=99999,s-maxage=123");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
// prefer s-maxage over max-age
assert_eq!(cc.fresh_duration().unwrap(), Duration::from_secs(123));
}
#[test]
fn test_cacheability() {
let resp = build_response(CACHE_CONTROL, "");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.is_cacheable(), Cacheable::Default);
// uncacheable
let resp = build_response(CACHE_CONTROL, "private, max-age=12345");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.is_cacheable(), Cacheable::No);
let resp = build_response(CACHE_CONTROL, "no-store, max-age=12345");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.is_cacheable(), Cacheable::No);
// cacheable
let resp = build_response(CACHE_CONTROL, "public");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.is_cacheable(), Cacheable::Yes);
let resp = build_response(CACHE_CONTROL, "max-age=0");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.is_cacheable(), Cacheable::Yes);
}
#[test]
fn test_no_cache() {
let resp = build_response(CACHE_CONTROL, "no-cache, max-age=12345");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.is_cacheable(), Cacheable::Yes);
assert_eq!(cc.fresh_duration().unwrap(), Duration::ZERO);
}
#[test]
fn test_no_cache_field_names() {
let resp = build_response(CACHE_CONTROL, "no-cache=\"set-cookie\", max-age=12345");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(!cc.private());
assert_eq!(cc.is_cacheable(), Cacheable::Yes);
assert_eq!(cc.fresh_duration().unwrap(), Duration::from_secs(12345));
let mut field_names = cc.no_cache_field_names().unwrap();
assert_eq!(
str::from_utf8(field_names.next().unwrap()).unwrap(),
"set-cookie"
);
assert!(field_names.next().is_none());
let mut resp = response::Builder::new().body(()).unwrap();
resp.headers_mut().insert(
CACHE_CONTROL,
HeaderValue::from_bytes(
b"private=\"\", no-cache=\"a\xFF, set-cookie, Baz\x09 , c,d ,, \"",
)
.unwrap(),
);
let (parts, _) = resp.into_parts();
let cc = CacheControl::from_resp_headers(&parts).unwrap();
let mut field_names = cc.private_field_names().unwrap();
assert_eq!(str::from_utf8(field_names.next().unwrap()).unwrap(), "");
assert!(field_names.next().is_none());
let mut field_names = cc.no_cache_field_names().unwrap();
assert!(str::from_utf8(field_names.next().unwrap()).is_err());
assert_eq!(
str::from_utf8(field_names.next().unwrap()).unwrap(),
"set-cookie"
);
assert_eq!(str::from_utf8(field_names.next().unwrap()).unwrap(), "Baz");
assert_eq!(str::from_utf8(field_names.next().unwrap()).unwrap(), "c");
assert_eq!(str::from_utf8(field_names.next().unwrap()).unwrap(), "d");
assert_eq!(str::from_utf8(field_names.next().unwrap()).unwrap(), "");
assert_eq!(str::from_utf8(field_names.next().unwrap()).unwrap(), "");
assert!(field_names.next().is_none());
}
#[test]
fn test_strip_private_headers() {
let mut resp = ResponseHeader::build(200, None).unwrap();
resp.append_header(
CACHE_CONTROL,
"no-cache=\"x-private-header\", max-age=12345",
)
.unwrap();
resp.append_header("X-Private-Header", "dropped").unwrap();
let cc = CacheControl::from_resp_headers(&resp).unwrap();
cc.strip_private_headers(&mut resp);
assert!(!resp.headers.contains_key("X-Private-Header"));
}
#[test]
fn test_stale_while_revalidate() {
let resp = build_response(CACHE_CONTROL, "max-age=12345, stale-while-revalidate=5");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.stale_while_revalidate().unwrap().unwrap(), 5);
assert_eq!(
cc.serve_stale_while_revalidate_duration().unwrap(),
Duration::from_secs(5)
);
assert!(cc.serve_stale_if_error_duration().is_none());
}
#[test]
fn test_stale_if_error() {
let resp = build_response(CACHE_CONTROL, "max-age=12345, stale-if-error=3600");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.stale_if_error().unwrap().unwrap(), 3600);
assert_eq!(
cc.serve_stale_if_error_duration().unwrap(),
Duration::from_secs(3600)
);
assert!(cc.serve_stale_while_revalidate_duration().is_none());
}
#[test]
fn test_must_revalidate() {
let resp = build_response(
CACHE_CONTROL,
"max-age=12345, stale-while-revalidate=60, stale-if-error=30, must-revalidate",
);
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.must_revalidate());
assert_eq!(cc.stale_while_revalidate().unwrap().unwrap(), 60);
assert_eq!(cc.stale_if_error().unwrap().unwrap(), 30);
assert_eq!(
cc.serve_stale_while_revalidate_duration().unwrap(),
Duration::ZERO
);
assert_eq!(cc.serve_stale_if_error_duration().unwrap(), Duration::ZERO);
}
#[test]
fn test_proxy_revalidate() {
let resp = build_response(
CACHE_CONTROL,
"max-age=12345, stale-while-revalidate=60, stale-if-error=30, proxy-revalidate",
);
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.proxy_revalidate());
assert_eq!(cc.stale_while_revalidate().unwrap().unwrap(), 60);
assert_eq!(cc.stale_if_error().unwrap().unwrap(), 30);
assert_eq!(
cc.serve_stale_while_revalidate_duration().unwrap(),
Duration::ZERO
);
assert_eq!(cc.serve_stale_if_error_duration().unwrap(), Duration::ZERO);
}
#[test]
fn test_s_maxage_stale() {
let resp = build_response(
CACHE_CONTROL,
"s-maxage=0, stale-while-revalidate=60, stale-if-error=30",
);
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert_eq!(cc.stale_while_revalidate().unwrap().unwrap(), 60);
assert_eq!(cc.stale_if_error().unwrap().unwrap(), 30);
assert_eq!(
cc.serve_stale_while_revalidate_duration().unwrap(),
Duration::ZERO
);
assert_eq!(cc.serve_stale_if_error_duration().unwrap(), Duration::ZERO);
}
#[test]
fn test_authorized_request() {
let resp = build_response(CACHE_CONTROL, "max-age=10");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(!cc.allow_caching_authorized_req());
let resp = build_response(CACHE_CONTROL, "s-maxage=10");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.allow_caching_authorized_req());
let resp = build_response(CACHE_CONTROL, "public");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.allow_caching_authorized_req());
let resp = build_response(CACHE_CONTROL, "must-revalidate, max-age=0");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(cc.allow_caching_authorized_req());
let resp = build_response(CACHE_CONTROL, "");
let cc = CacheControl::from_resp_headers(&resp).unwrap();
assert!(!cc.allow_caching_authorized_req());
}
fn build_request(cc_key: HeaderName, cc_value: &str) -> request::Parts {
let (parts, _) = request::Builder::new()
.header(cc_key, cc_value)
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/max_file_size.rs | pingora-cache/src/max_file_size.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Set limit on the largest size to cache
use pingora_error::ErrorType;
/// Error type returned when the limit is reached.
pub const ERR_RESPONSE_TOO_LARGE: ErrorType = ErrorType::Custom("response too large");
// Body bytes tracker to adjust (predicted) cacheability,
// even if cache has been disabled.
#[derive(Debug)]
pub(crate) struct MaxFileSizeTracker {
body_bytes: usize,
max_size: usize,
}
impl MaxFileSizeTracker {
// Create a new Tracker object.
pub fn new(max_size: usize) -> MaxFileSizeTracker {
MaxFileSizeTracker {
body_bytes: 0,
max_size,
}
}
// Add bytes to the tracker.
// If return value is true, the tracker bytes are under the max size allowed.
pub fn add_body_bytes(&mut self, bytes: usize) -> bool {
self.body_bytes += bytes;
self.allow_caching()
}
pub fn max_file_size_bytes(&self) -> usize {
self.max_size
}
pub fn allow_caching(&self) -> bool {
self.body_bytes <= self.max_size
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/trace.rs | pingora-cache/src/trace.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Distributed tracing helpers
use cf_rustracing_jaeger::span::SpanContextState;
use std::time::SystemTime;
use crate::{CacheMeta, CachePhase, HitStatus};
pub use cf_rustracing::tag::Tag;
pub type Span = cf_rustracing::span::Span<SpanContextState>;
pub type SpanHandle = cf_rustracing::span::SpanHandle<SpanContextState>;
#[derive(Debug)]
pub(crate) struct CacheTraceCTX {
// parent span
pub cache_span: Span,
// only spans across multiple calls need to store here
pub miss_span: Span,
pub hit_span: Span,
}
pub fn tag_span_with_meta(span: &mut Span, meta: &CacheMeta) {
fn ts2epoch(ts: SystemTime) -> f64 {
ts.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default() // should never overflow but be safe here
.as_secs_f64()
}
let internal = &meta.0.internal;
span.set_tags(|| {
[
Tag::new("created", ts2epoch(internal.created)),
Tag::new("fresh_until", ts2epoch(internal.fresh_until)),
Tag::new("updated", ts2epoch(internal.updated)),
Tag::new("stale_if_error_sec", internal.stale_if_error_sec as i64),
Tag::new(
"stale_while_revalidate_sec",
internal.stale_while_revalidate_sec as i64,
),
Tag::new("variance", internal.variance.is_some()),
]
});
}
impl CacheTraceCTX {
pub fn new() -> Self {
CacheTraceCTX {
cache_span: Span::inactive(),
miss_span: Span::inactive(),
hit_span: Span::inactive(),
}
}
pub fn enable(&mut self, cache_span: Span) {
self.cache_span = cache_span;
}
pub fn get_cache_span(&self) -> SpanHandle {
self.cache_span.handle()
}
#[inline]
pub fn child(&self, name: &'static str) -> Span {
self.cache_span.child(name, |o| o.start())
}
pub fn start_miss_span(&mut self) {
self.miss_span = self.child("miss");
}
pub fn get_miss_span(&self) -> SpanHandle {
self.miss_span.handle()
}
pub fn finish_miss_span(&mut self) {
self.miss_span.set_finish_time(SystemTime::now);
}
pub fn start_hit_span(&mut self, phase: CachePhase, hit_status: HitStatus) {
self.hit_span = self.child("hit");
self.hit_span.set_tag(|| Tag::new("phase", phase.as_str()));
self.hit_span
.set_tag(|| Tag::new("status", hit_status.as_str()));
}
pub fn get_hit_span(&self) -> SpanHandle {
self.hit_span.handle()
}
pub fn finish_hit_span(&mut self) {
self.hit_span.set_finish_time(SystemTime::now);
}
pub fn log_meta_in_hit_span(&mut self, meta: &CacheMeta) {
tag_span_with_meta(&mut self.hit_span, meta);
}
pub fn log_meta_in_miss_span(&mut self, meta: &CacheMeta) {
tag_span_with_meta(&mut self.miss_span, meta);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/memory.rs | pingora-cache/src/memory.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Hash map based in memory cache
//!
//! For testing only, not for production use
//TODO: Mark this module #[test] only
use super::*;
use crate::key::CompactCacheKey;
use crate::storage::{streaming_write::U64WriteId, HandleHit, HandleMiss};
use crate::trace::SpanHandle;
use async_trait::async_trait;
use bytes::Bytes;
use parking_lot::RwLock;
use pingora_error::*;
use std::any::Any;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use tokio::sync::watch;
type BinaryMeta = (Vec<u8>, Vec<u8>);
pub(crate) struct CacheObject {
pub meta: BinaryMeta,
pub body: Arc<Vec<u8>>,
}
pub(crate) struct TempObject {
pub meta: BinaryMeta,
// these are Arc because they need to continue to exist after this TempObject is removed
pub body: Arc<RwLock<Vec<u8>>>,
bytes_written: Arc<watch::Sender<PartialState>>, // this should match body.len()
}
impl TempObject {
fn new(meta: BinaryMeta) -> Self {
let (tx, _rx) = watch::channel(PartialState::Partial(0));
TempObject {
meta,
body: Arc::new(RwLock::new(Vec::new())),
bytes_written: Arc::new(tx),
}
}
// this is not at all optimized
fn make_cache_object(&self) -> CacheObject {
let meta = self.meta.clone();
let body = Arc::new(self.body.read().clone());
CacheObject { meta, body }
}
}
/// Hash map based in memory cache
///
/// For testing only, not for production use.
pub struct MemCache {
pub(crate) cached: Arc<RwLock<HashMap<String, CacheObject>>>,
pub(crate) temp: Arc<RwLock<HashMap<String, HashMap<u64, TempObject>>>>,
pub(crate) last_temp_id: AtomicU64,
}
impl MemCache {
/// Create a new [MemCache]
pub fn new() -> Self {
MemCache {
cached: Arc::new(RwLock::new(HashMap::new())),
temp: Arc::new(RwLock::new(HashMap::new())),
last_temp_id: AtomicU64::new(0),
}
}
}
pub enum MemHitHandler {
Complete(CompleteHit),
Partial(PartialHit),
}
#[derive(Copy, Clone)]
enum PartialState {
Partial(usize),
Complete(usize),
}
pub struct CompleteHit {
body: Arc<Vec<u8>>,
done: bool,
range_start: usize,
range_end: usize,
}
impl CompleteHit {
fn get(&mut self) -> Option<Bytes> {
if self.done {
None
} else {
self.done = true;
Some(Bytes::copy_from_slice(
&self.body.as_slice()[self.range_start..self.range_end],
))
}
}
fn seek(&mut self, start: usize, end: Option<usize>) -> Result<()> {
if start >= self.body.len() {
return Error::e_explain(
ErrorType::InternalError,
format!("seek start out of range {start} >= {}", self.body.len()),
);
}
self.range_start = start;
if let Some(end) = end {
// end over the actual last byte is allowed, we just need to return the actual bytes
self.range_end = std::cmp::min(self.body.len(), end);
}
// seek resets read so that one handler can be used for multiple ranges
self.done = false;
Ok(())
}
}
pub struct PartialHit {
body: Arc<RwLock<Vec<u8>>>,
bytes_written: watch::Receiver<PartialState>,
bytes_read: usize,
}
impl PartialHit {
async fn read(&mut self) -> Option<Bytes> {
loop {
let bytes_written = *self.bytes_written.borrow_and_update();
let bytes_end = match bytes_written {
PartialState::Partial(s) => s,
PartialState::Complete(c) => {
// no more data will arrive
if c == self.bytes_read {
return None;
}
c
}
};
assert!(bytes_end >= self.bytes_read);
// more data available to read
if bytes_end > self.bytes_read {
let new_bytes =
Bytes::copy_from_slice(&self.body.read()[self.bytes_read..bytes_end]);
self.bytes_read = bytes_end;
return Some(new_bytes);
}
// wait for more data
if self.bytes_written.changed().await.is_err() {
// err: sender dropped, body is finished
// FIXME: sender could drop because of an error
return None;
}
}
}
}
#[async_trait]
impl HandleHit for MemHitHandler {
async fn read_body(&mut self) -> Result<Option<Bytes>> {
match self {
Self::Complete(c) => Ok(c.get()),
Self::Partial(p) => Ok(p.read().await),
}
}
async fn finish(
self: Box<Self>, // because self is always used as a trait object
_storage: &'static (dyn storage::Storage + Sync),
_key: &CacheKey,
_trace: &SpanHandle,
) -> Result<()> {
Ok(())
}
fn can_seek(&self) -> bool {
match self {
Self::Complete(_) => true,
Self::Partial(_) => false, // TODO: support seeking in partial reads
}
}
fn seek(&mut self, start: usize, end: Option<usize>) -> Result<()> {
match self {
Self::Complete(c) => c.seek(start, end),
Self::Partial(_) => Error::e_explain(
ErrorType::InternalError,
"seek not supported for partial cache",
),
}
}
fn should_count_access(&self) -> bool {
match self {
// avoid counting accesses for partial reads to keep things simple
Self::Complete(_) => true,
Self::Partial(_) => false,
}
}
fn get_eviction_weight(&self) -> usize {
match self {
// FIXME: just body size, also track meta size
Self::Complete(c) => c.body.len(),
// partial read cannot be estimated since body size is unknown
Self::Partial(_) => 0,
}
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
fn as_any_mut(&mut self) -> &mut (dyn Any + Send + Sync) {
self
}
}
pub struct MemMissHandler {
body: Arc<RwLock<Vec<u8>>>,
bytes_written: Arc<watch::Sender<PartialState>>,
// these are used only in finish() to data from temp to cache
key: String,
temp_id: U64WriteId,
// key -> cache object
cache: Arc<RwLock<HashMap<String, CacheObject>>>,
// key -> (temp writer id -> temp object) to support concurrent writers
temp: Arc<RwLock<HashMap<String, HashMap<u64, TempObject>>>>,
}
#[async_trait]
impl HandleMiss for MemMissHandler {
async fn write_body(&mut self, data: bytes::Bytes, eof: bool) -> Result<()> {
let current_bytes = match *self.bytes_written.borrow() {
PartialState::Partial(p) => p,
PartialState::Complete(_) => panic!("already EOF"),
};
self.body.write().extend_from_slice(&data);
let written = current_bytes + data.len();
let new_state = if eof {
PartialState::Complete(written)
} else {
PartialState::Partial(written)
};
self.bytes_written.send_replace(new_state);
Ok(())
}
async fn finish(self: Box<Self>) -> Result<MissFinishType> {
// safe, the temp object is inserted when the miss handler is created
let cache_object = self
.temp
.read()
.get(&self.key)
.unwrap()
.get(&self.temp_id.into())
.unwrap()
.make_cache_object();
let size = cache_object.body.len(); // FIXME: this just body size, also track meta size
self.cache.write().insert(self.key.clone(), cache_object);
self.temp
.write()
.get_mut(&self.key)
.and_then(|map| map.remove(&self.temp_id.into()));
Ok(MissFinishType::Created(size))
}
fn streaming_write_tag(&self) -> Option<&[u8]> {
Some(self.temp_id.as_bytes())
}
}
impl Drop for MemMissHandler {
fn drop(&mut self) {
self.temp
.write()
.get_mut(&self.key)
.and_then(|map| map.remove(&self.temp_id.into()));
}
}
fn hit_from_temp_obj(temp_obj: &TempObject) -> Result<Option<(CacheMeta, HitHandler)>> {
let meta = CacheMeta::deserialize(&temp_obj.meta.0, &temp_obj.meta.1)?;
let partial = PartialHit {
body: temp_obj.body.clone(),
bytes_written: temp_obj.bytes_written.subscribe(),
bytes_read: 0,
};
let hit_handler = MemHitHandler::Partial(partial);
Ok(Some((meta, Box::new(hit_handler))))
}
#[async_trait]
impl Storage for MemCache {
async fn lookup(
&'static self,
key: &CacheKey,
_trace: &SpanHandle,
) -> Result<Option<(CacheMeta, HitHandler)>> {
let hash = key.combined();
// always prefer partial read otherwise fresh asset will not be visible on expired asset
// until it is fully updated
// no preference on which partial read we get (if there are multiple writers)
if let Some((_, temp_obj)) = self
.temp
.read()
.get(&hash)
.and_then(|map| map.iter().next())
{
hit_from_temp_obj(temp_obj)
} else if let Some(obj) = self.cached.read().get(&hash) {
let meta = CacheMeta::deserialize(&obj.meta.0, &obj.meta.1)?;
let hit_handler = CompleteHit {
body: obj.body.clone(),
done: false,
range_start: 0,
range_end: obj.body.len(),
};
let hit_handler = MemHitHandler::Complete(hit_handler);
Ok(Some((meta, Box::new(hit_handler))))
} else {
Ok(None)
}
}
async fn lookup_streaming_write(
&'static self,
key: &CacheKey,
streaming_write_tag: Option<&[u8]>,
_trace: &SpanHandle,
) -> Result<Option<(CacheMeta, HitHandler)>> {
let hash = key.combined();
let write_tag: U64WriteId = streaming_write_tag
.expect("tag must be set during streaming write")
.try_into()
.expect("tag must be correct length");
hit_from_temp_obj(
self.temp
.read()
.get(&hash)
.and_then(|map| map.get(&write_tag.into()))
.expect("must have partial write in progress"),
)
}
async fn get_miss_handler(
&'static self,
key: &CacheKey,
meta: &CacheMeta,
_trace: &SpanHandle,
) -> Result<MissHandler> {
let hash = key.combined();
let meta = meta.serialize()?;
let temp_obj = TempObject::new(meta);
let temp_id = self.last_temp_id.fetch_add(1, Ordering::Relaxed);
let miss_handler = MemMissHandler {
body: temp_obj.body.clone(),
bytes_written: temp_obj.bytes_written.clone(),
key: hash.clone(),
cache: self.cached.clone(),
temp: self.temp.clone(),
temp_id: temp_id.into(),
};
self.temp
.write()
.entry(hash)
.or_default()
.insert(miss_handler.temp_id.into(), temp_obj);
Ok(Box::new(miss_handler))
}
async fn purge(
&'static self,
key: &CompactCacheKey,
_type: PurgeType,
_trace: &SpanHandle,
) -> Result<bool> {
// This usually purges the primary key because, without a lookup, the variance key is usually
// empty
let hash = key.combined();
let temp_removed = self.temp.write().remove(&hash).is_some();
let cache_removed = self.cached.write().remove(&hash).is_some();
Ok(temp_removed || cache_removed)
}
async fn update_meta(
&'static self,
key: &CacheKey,
meta: &CacheMeta,
_trace: &SpanHandle,
) -> Result<bool> {
let hash = key.combined();
if let Some(obj) = self.cached.write().get_mut(&hash) {
obj.meta = meta.serialize()?;
Ok(true)
} else {
panic!("no meta found")
}
}
fn support_streaming_partial_write(&self) -> bool {
true
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
#[cfg(test)]
mod test {
use super::*;
use cf_rustracing::span::Span;
use once_cell::sync::Lazy;
fn gen_meta() -> CacheMeta {
let mut header = ResponseHeader::build(200, None).unwrap();
header.append_header("foo1", "bar1").unwrap();
header.append_header("foo2", "bar2").unwrap();
header.append_header("foo3", "bar3").unwrap();
header.append_header("Server", "Pingora").unwrap();
let internal = crate::meta::InternalMeta::default();
CacheMeta(Box::new(crate::meta::CacheMetaInner {
internal,
header,
extensions: http::Extensions::new(),
}))
}
#[tokio::test]
async fn test_write_then_read() {
static MEM_CACHE: Lazy<MemCache> = Lazy::new(MemCache::new);
let span = &Span::inactive().handle();
let key1 = CacheKey::new("", "a", "1");
let res = MEM_CACHE.lookup(&key1, span).await.unwrap();
assert!(res.is_none());
let cache_meta = gen_meta();
let mut miss_handler = MEM_CACHE
.get_miss_handler(&key1, &cache_meta, span)
.await
.unwrap();
miss_handler
.write_body(b"test1"[..].into(), false)
.await
.unwrap();
miss_handler
.write_body(b"test2"[..].into(), false)
.await
.unwrap();
miss_handler.finish().await.unwrap();
let (cache_meta2, mut hit_handler) = MEM_CACHE.lookup(&key1, span).await.unwrap().unwrap();
assert_eq!(
cache_meta.0.internal.fresh_until,
cache_meta2.0.internal.fresh_until
);
let data = hit_handler.read_body().await.unwrap().unwrap();
assert_eq!("test1test2", data);
let data = hit_handler.read_body().await.unwrap();
assert!(data.is_none());
}
#[tokio::test]
async fn test_read_range() {
static MEM_CACHE: Lazy<MemCache> = Lazy::new(MemCache::new);
let span = &Span::inactive().handle();
let key1 = CacheKey::new("", "a", "1");
let res = MEM_CACHE.lookup(&key1, span).await.unwrap();
assert!(res.is_none());
let cache_meta = gen_meta();
let mut miss_handler = MEM_CACHE
.get_miss_handler(&key1, &cache_meta, span)
.await
.unwrap();
miss_handler
.write_body(b"test1test2"[..].into(), false)
.await
.unwrap();
miss_handler.finish().await.unwrap();
let (cache_meta2, mut hit_handler) = MEM_CACHE.lookup(&key1, span).await.unwrap().unwrap();
assert_eq!(
cache_meta.0.internal.fresh_until,
cache_meta2.0.internal.fresh_until
);
// out of range
assert!(hit_handler.seek(10000, None).is_err());
assert!(hit_handler.seek(5, None).is_ok());
let data = hit_handler.read_body().await.unwrap().unwrap();
assert_eq!("test2", data);
let data = hit_handler.read_body().await.unwrap();
assert!(data.is_none());
assert!(hit_handler.seek(4, Some(5)).is_ok());
let data = hit_handler.read_body().await.unwrap().unwrap();
assert_eq!("1", data);
let data = hit_handler.read_body().await.unwrap();
assert!(data.is_none());
}
#[tokio::test]
async fn test_write_while_read() {
use futures::FutureExt;
static MEM_CACHE: Lazy<MemCache> = Lazy::new(MemCache::new);
let span = &Span::inactive().handle();
let key1 = CacheKey::new("", "a", "1");
let res = MEM_CACHE.lookup(&key1, span).await.unwrap();
assert!(res.is_none());
let cache_meta = gen_meta();
let mut miss_handler = MEM_CACHE
.get_miss_handler(&key1, &cache_meta, span)
.await
.unwrap();
// first reader
let (cache_meta1, mut hit_handler1) = MEM_CACHE.lookup(&key1, span).await.unwrap().unwrap();
assert_eq!(
cache_meta.0.internal.fresh_until,
cache_meta1.0.internal.fresh_until
);
// No body to read
let res = hit_handler1.read_body().now_or_never();
assert!(res.is_none());
miss_handler
.write_body(b"test1"[..].into(), false)
.await
.unwrap();
let data = hit_handler1.read_body().await.unwrap().unwrap();
assert_eq!("test1", data);
let res = hit_handler1.read_body().now_or_never();
assert!(res.is_none());
miss_handler
.write_body(b"test2"[..].into(), false)
.await
.unwrap();
let data = hit_handler1.read_body().await.unwrap().unwrap();
assert_eq!("test2", data);
// second reader
let (cache_meta2, mut hit_handler2) = MEM_CACHE.lookup(&key1, span).await.unwrap().unwrap();
assert_eq!(
cache_meta.0.internal.fresh_until,
cache_meta2.0.internal.fresh_until
);
let data = hit_handler2.read_body().await.unwrap().unwrap();
assert_eq!("test1test2", data);
let res = hit_handler2.read_body().now_or_never();
assert!(res.is_none());
let res = hit_handler1.read_body().now_or_never();
assert!(res.is_none());
miss_handler.finish().await.unwrap();
let data = hit_handler1.read_body().await.unwrap();
assert!(data.is_none());
let data = hit_handler2.read_body().await.unwrap();
assert!(data.is_none());
}
#[tokio::test]
async fn test_purge_partial() {
static MEM_CACHE: Lazy<MemCache> = Lazy::new(MemCache::new);
let cache = &MEM_CACHE;
let key = CacheKey::new("", "a", "1").to_compact();
let hash = key.combined();
let meta = (
"meta_key".as_bytes().to_vec(),
"meta_value".as_bytes().to_vec(),
);
let temp_obj = TempObject::new(meta);
let mut map = HashMap::new();
map.insert(0, temp_obj);
cache.temp.write().insert(hash.clone(), map);
assert!(cache.temp.read().contains_key(&hash));
let result = cache
.purge(&key, PurgeType::Invalidation, &Span::inactive().handle())
.await;
assert!(result.is_ok());
assert!(!cache.temp.read().contains_key(&hash));
}
#[tokio::test]
async fn test_purge_complete() {
static MEM_CACHE: Lazy<MemCache> = Lazy::new(MemCache::new);
let cache = &MEM_CACHE;
let key = CacheKey::new("", "a", "1").to_compact();
let hash = key.combined();
let meta = (
"meta_key".as_bytes().to_vec(),
"meta_value".as_bytes().to_vec(),
);
let body = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
let cache_obj = CacheObject {
meta,
body: Arc::new(body),
};
cache.cached.write().insert(hash.clone(), cache_obj);
assert!(cache.cached.read().contains_key(&hash));
let result = cache
.purge(&key, PurgeType::Invalidation, &Span::inactive().handle())
.await;
assert!(result.is_ok());
assert!(!cache.cached.read().contains_key(&hash));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/lock.rs | pingora-cache/src/lock.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cache lock
use crate::{hashtable::ConcurrentHashTable, key::CacheHashKey, CacheKey};
use crate::{Span, Tag};
use http::Extensions;
use pingora_timeout::timeout;
use std::sync::Arc;
use std::time::Duration;
pub type CacheKeyLockImpl = dyn CacheKeyLock + Send + Sync;
pub trait CacheKeyLock {
/// Try to lock a cache fetch
///
/// If `stale_writer` is true, this fetch is to revalidate an asset already in cache.
/// Else this fetch was a cache miss (i.e. not found via lookup, or force missed).
///
/// Users should call after a cache miss before fetching the asset.
/// The returned [Locked] will tell the caller either to fetch or wait.
fn lock(&self, key: &CacheKey, stale_writer: bool) -> Locked;
/// Release a lock for the given key
///
/// When the write lock is dropped without being released, the read lock holders will consider
/// it to be failed so that they will compete for the write lock again.
fn release(&self, key: &CacheKey, permit: WritePermit, reason: LockStatus);
/// Set tags on a trace span for the cache lock wait.
fn trace_lock_wait(&self, span: &mut Span, _read_lock: &ReadLock, lock_status: LockStatus) {
let tag_value: &'static str = lock_status.into();
span.set_tag(|| Tag::new("status", tag_value));
}
}
const N_SHARDS: usize = 16;
/// The global cache locking manager
#[derive(Debug)]
pub struct CacheLock {
lock_table: ConcurrentHashTable<LockStub, N_SHARDS>,
// fixed lock timeout values for now
age_timeout_default: Duration,
}
/// A struct representing locked cache access
#[derive(Debug)]
pub enum Locked {
/// The writer is allowed to fetch the asset
Write(WritePermit),
/// The reader waits for the writer to fetch the asset
Read(ReadLock),
}
impl Locked {
/// Is this a write lock
pub fn is_write(&self) -> bool {
matches!(self, Self::Write(_))
}
}
impl CacheLock {
/// Create a new [CacheLock] with the given lock timeout
///
/// Age timeout refers to how long a writer has been holding onto a particular lock, and wait
/// timeout refers to how long a reader may hold onto any number of locks before giving up.
/// When either timeout is reached, the read locks are automatically unlocked.
pub fn new_boxed(age_timeout: Duration) -> Box<Self> {
Box::new(CacheLock {
lock_table: ConcurrentHashTable::new(),
age_timeout_default: age_timeout,
})
}
/// Create a new [CacheLock] with the given lock timeout
///
/// Age timeout refers to how long a writer has been holding onto a particular lock, and wait
/// timeout refers to how long a reader may hold onto any number of locks before giving up.
/// When either timeout is reached, the read locks are automatically unlocked.
pub fn new(age_timeout_default: Duration) -> Self {
CacheLock {
lock_table: ConcurrentHashTable::new(),
age_timeout_default,
}
}
}
impl CacheKeyLock for CacheLock {
fn lock(&self, key: &CacheKey, stale_writer: bool) -> Locked {
let hash = key.combined_bin();
let key = u128::from_be_bytes(hash); // endianness doesn't matter
let table = self.lock_table.get(key);
if let Some(lock) = table.read().get(&key) {
// already has an ongoing request
// If the lock status is dangling or timeout, the lock will _remain_ in the table
// and readers should attempt to replace it.
// In the case of writer timeout, any remaining readers that were waiting on THIS
// LockCore should have (or are about to) timed out on their own.
// Finding a Timeout status means that THIS writer's lock already expired, so future
// requests ought to recreate the lock.
if !matches!(
lock.0.lock_status(),
LockStatus::Dangling | LockStatus::AgeTimeout
) {
return Locked::Read(lock.read_lock());
}
// Dangling: the previous writer quit without unlocking the lock. Requests should
// compete for the write lock again.
}
let mut table = table.write();
// check again in case another request already added it
if let Some(lock) = table.get(&key) {
if !matches!(
lock.0.lock_status(),
LockStatus::Dangling | LockStatus::AgeTimeout
) {
return Locked::Read(lock.read_lock());
}
}
let (permit, stub) =
WritePermit::new(self.age_timeout_default, stale_writer, Extensions::new());
table.insert(key, stub);
Locked::Write(permit)
}
fn release(&self, key: &CacheKey, mut permit: WritePermit, reason: LockStatus) {
let hash = key.combined_bin();
let key = u128::from_be_bytes(hash); // endianness doesn't matter
if permit.lock.lock_status() == LockStatus::AgeTimeout {
// if lock age timed out, then readers are capable of
// replacing the lock associated with this permit from the lock table
// (see lock() implementation)
// keep the lock status as Timeout accordingly when unlocking
// (because we aren't removing it from the lock_table)
permit.unlock(LockStatus::AgeTimeout);
} else if let Some(_lock) = self.lock_table.write(key).remove(&key) {
permit.unlock(reason);
}
// these situations above should capture all possible options,
// else dangling cache lock may start
}
}
use log::warn;
use std::sync::atomic::{AtomicU8, Ordering};
use std::time::Instant;
use strum::{FromRepr, IntoStaticStr};
use tokio::sync::Semaphore;
/// Status which the read locks could possibly see.
#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoStaticStr, FromRepr)]
#[repr(u8)]
pub enum LockStatus {
/// Waiting for the writer to populate the asset
Waiting = 0,
/// The writer finishes, readers can start
Done = 1,
/// The writer encountered error, such as network issue. A new writer will be elected.
TransientError = 2,
/// The writer observed that no cache lock is needed (e.g., uncacheable), readers should start
/// to fetch independently without a new writer
GiveUp = 3,
/// The write lock is dropped without being unlocked
Dangling = 4,
/// Reader has held onto cache locks for too long, give up
WaitTimeout = 5,
/// The lock is held for too long by the writer
AgeTimeout = 6,
}
impl From<LockStatus> for u8 {
fn from(l: LockStatus) -> u8 {
match l {
LockStatus::Waiting => 0,
LockStatus::Done => 1,
LockStatus::TransientError => 2,
LockStatus::GiveUp => 3,
LockStatus::Dangling => 4,
LockStatus::WaitTimeout => 5,
LockStatus::AgeTimeout => 6,
}
}
}
impl From<u8> for LockStatus {
fn from(v: u8) -> Self {
Self::from_repr(v).unwrap_or(Self::GiveUp)
}
}
#[derive(Debug)]
pub struct LockCore {
pub lock_start: Instant,
pub age_timeout: Duration,
pub(super) lock: Semaphore,
// use u8 for Atomic enum
lock_status: AtomicU8,
stale_writer: bool,
extensions: Extensions,
}
impl LockCore {
pub fn new_arc(timeout: Duration, stale_writer: bool, extensions: Extensions) -> Arc<Self> {
Arc::new(LockCore {
lock: Semaphore::new(0),
age_timeout: timeout,
lock_start: Instant::now(),
lock_status: AtomicU8::new(LockStatus::Waiting.into()),
stale_writer,
extensions,
})
}
pub fn locked(&self) -> bool {
self.lock.available_permits() == 0
}
pub fn unlock(&self, reason: LockStatus) {
assert!(
reason != LockStatus::WaitTimeout,
"WaitTimeout is not stored in LockCore"
);
self.lock_status.store(reason.into(), Ordering::SeqCst);
// Any small positive number will do, 10 is used for RwLock as well.
// No need to wake up all at once.
self.lock.add_permits(10);
}
pub fn lock_status(&self) -> LockStatus {
self.lock_status.load(Ordering::SeqCst).into()
}
/// Was this lock for a stale cache fetch writer?
pub fn stale_writer(&self) -> bool {
self.stale_writer
}
pub fn extensions(&self) -> &Extensions {
&self.extensions
}
}
// all 3 structs below are just Arc<LockCore> with different interfaces
/// ReadLock: the requests who get it need to wait until it is released
#[derive(Debug)]
pub struct ReadLock(Arc<LockCore>);
impl ReadLock {
/// Wait for the writer to release the lock
pub async fn wait(&self) {
if !self.locked() {
return;
}
// FIXME: for now it is the awkward responsibility of the ReadLock to set the
// timeout status on the lock itself because the write permit cannot lock age
// timeout on its own
// TODO: need to be careful not to wake everyone up at the same time
// (maybe not an issue because regular cache lock release behaves that way)
if let Some(duration) = self.0.age_timeout.checked_sub(self.0.lock_start.elapsed()) {
match timeout(duration, self.0.lock.acquire()).await {
Ok(Ok(_)) => { // permit is returned to Semaphore right away
}
Ok(Err(e)) => {
warn!("error acquiring semaphore {e:?}")
}
Err(_) => {
self.0
.lock_status
.store(LockStatus::AgeTimeout.into(), Ordering::SeqCst);
}
}
} else {
// expiration has already occurred, store timeout status
self.0
.lock_status
.store(LockStatus::AgeTimeout.into(), Ordering::SeqCst);
}
}
/// Test if it is still locked
pub fn locked(&self) -> bool {
self.0.locked()
}
/// Whether the lock is expired, e.g., the writer has been holding the lock for too long
pub fn expired(&self) -> bool {
// NOTE: this is whether the lock is currently expired
// not whether it was timed out during wait()
self.0.lock_start.elapsed() >= self.0.age_timeout
}
/// The current status of the lock
pub fn lock_status(&self) -> LockStatus {
let status = self.0.lock_status();
if matches!(status, LockStatus::Waiting) && self.expired() {
LockStatus::AgeTimeout
} else {
status
}
}
pub fn extensions(&self) -> &Extensions {
self.0.extensions()
}
}
/// WritePermit: requires who get it need to populate the cache and then release it
#[derive(Debug)]
pub struct WritePermit {
lock: Arc<LockCore>,
finished: bool,
}
impl WritePermit {
/// Create a new lock, with a permit to be given to the associated writer.
pub fn new(
timeout: Duration,
stale_writer: bool,
extensions: Extensions,
) -> (WritePermit, LockStub) {
let lock = LockCore::new_arc(timeout, stale_writer, extensions);
let stub = LockStub(lock.clone());
(
WritePermit {
lock,
finished: false,
},
stub,
)
}
/// Was this lock for a stale cache fetch writer?
pub fn stale_writer(&self) -> bool {
self.lock.stale_writer()
}
pub fn unlock(&mut self, reason: LockStatus) {
self.finished = true;
self.lock.unlock(reason);
}
pub fn lock_status(&self) -> LockStatus {
self.lock.lock_status()
}
pub fn extensions(&self) -> &Extensions {
self.lock.extensions()
}
}
impl Drop for WritePermit {
fn drop(&mut self) {
// Writer exited without properly unlocking. We let others to compete for the write lock again
if !self.finished {
debug_assert!(false, "Dangling cache lock started!");
self.unlock(LockStatus::Dangling);
}
}
}
#[derive(Debug)]
pub struct LockStub(pub Arc<LockCore>);
impl LockStub {
pub fn read_lock(&self) -> ReadLock {
ReadLock(self.0.clone())
}
pub fn extensions(&self) -> &Extensions {
&self.0.extensions
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::CacheKey;
#[test]
fn test_get_release() {
let cache_lock = CacheLock::new_boxed(Duration::from_secs(1000));
let key1 = CacheKey::new("", "a", "1");
let locked1 = cache_lock.lock(&key1, false);
assert!(locked1.is_write()); // write permit
let locked2 = cache_lock.lock(&key1, false);
assert!(!locked2.is_write()); // read lock
if let Locked::Write(permit) = locked1 {
cache_lock.release(&key1, permit, LockStatus::Done);
}
let locked3 = cache_lock.lock(&key1, false);
assert!(locked3.is_write()); // write permit again
if let Locked::Write(permit) = locked3 {
cache_lock.release(&key1, permit, LockStatus::Done);
}
}
#[tokio::test]
async fn test_lock() {
let cache_lock = CacheLock::new_boxed(Duration::from_secs(1000));
let key1 = CacheKey::new("", "a", "1");
let mut permit = match cache_lock.lock(&key1, false) {
Locked::Write(w) => w,
_ => panic!(),
};
let lock = match cache_lock.lock(&key1, false) {
Locked::Read(r) => r,
_ => panic!(),
};
assert!(lock.locked());
let handle = tokio::spawn(async move {
lock.wait().await;
assert_eq!(lock.lock_status(), LockStatus::Done);
});
permit.unlock(LockStatus::Done);
handle.await.unwrap(); // check lock is unlocked and the task is returned
}
#[tokio::test]
async fn test_lock_timeout() {
let cache_lock = CacheLock::new_boxed(Duration::from_secs(1));
let key1 = CacheKey::new("", "a", "1");
let mut permit = match cache_lock.lock(&key1, false) {
Locked::Write(w) => w,
_ => panic!(),
};
let lock = match cache_lock.lock(&key1, false) {
Locked::Read(r) => r,
_ => panic!(),
};
assert!(lock.locked());
let handle = tokio::spawn(async move {
// timed out
lock.wait().await;
assert_eq!(lock.lock_status(), LockStatus::AgeTimeout);
});
tokio::time::sleep(Duration::from_millis(2100)).await;
handle.await.unwrap(); // check lock is timed out
// expired lock - we will be able to install a new lock instead
let mut permit2 = match cache_lock.lock(&key1, false) {
Locked::Write(w) => w,
_ => panic!(),
};
let lock2 = match cache_lock.lock(&key1, false) {
Locked::Read(r) => r,
_ => panic!(),
};
assert!(lock2.locked());
let handle = tokio::spawn(async move {
// timed out
lock2.wait().await;
assert_eq!(lock2.lock_status(), LockStatus::Done);
});
permit.unlock(LockStatus::Done);
permit2.unlock(LockStatus::Done);
handle.await.unwrap();
}
#[tokio::test]
async fn test_lock_expired_release() {
let cache_lock = CacheLock::new_boxed(Duration::from_secs(1));
let key1 = CacheKey::new("", "a", "1");
let permit = match cache_lock.lock(&key1, false) {
Locked::Write(w) => w,
_ => panic!(),
};
let lock = match cache_lock.lock(&key1, false) {
Locked::Read(r) => r,
_ => panic!(),
};
assert!(lock.locked());
let handle = tokio::spawn(async move {
// timed out
lock.wait().await;
assert_eq!(lock.lock_status(), LockStatus::AgeTimeout);
});
tokio::time::sleep(Duration::from_millis(1100)).await; // let lock age time out
handle.await.unwrap(); // check lock is timed out
// writer finally finishes
cache_lock.release(&key1, permit, LockStatus::Done);
// can reacquire after release
let mut permit = match cache_lock.lock(&key1, false) {
Locked::Write(w) => w,
_ => panic!(),
};
assert_eq!(permit.lock.lock_status(), LockStatus::Waiting);
let lock2 = match cache_lock.lock(&key1, false) {
Locked::Read(r) => r,
_ => panic!(),
};
assert!(lock2.locked());
let handle = tokio::spawn(async move {
// timed out
lock2.wait().await;
assert_eq!(lock2.lock_status(), LockStatus::Done);
});
permit.unlock(LockStatus::Done);
handle.await.unwrap();
}
#[tokio::test]
async fn test_lock_expired_no_reader() {
let cache_lock = CacheLock::new_boxed(Duration::from_secs(1));
let key1 = CacheKey::new("", "a", "1");
let mut permit = match cache_lock.lock(&key1, false) {
Locked::Write(w) => w,
_ => panic!(),
};
tokio::time::sleep(Duration::from_millis(1100)).await; // let lock age time out
// lock expired without reader, but status is not yet set
assert_eq!(permit.lock.lock_status(), LockStatus::Waiting);
let lock = match cache_lock.lock(&key1, false) {
Locked::Read(r) => r,
_ => panic!(),
};
// reader expires write permit
lock.wait().await;
assert_eq!(lock.lock_status(), LockStatus::AgeTimeout);
assert_eq!(permit.lock.lock_status(), LockStatus::AgeTimeout);
permit.unlock(LockStatus::AgeTimeout);
}
#[tokio::test]
async fn test_lock_concurrent() {
let _ = env_logger::builder().is_test(true).try_init();
// Test that concurrent attempts to compete for a lock run without issues
let cache_lock = Arc::new(CacheLock::new_boxed(Duration::from_secs(1)));
let key1 = CacheKey::new("", "a", "1");
let mut handles = vec![];
const READERS: usize = 30;
for _ in 0..READERS {
let key1 = key1.clone();
let cache_lock = cache_lock.clone();
// simulate a cache lookup / lock attempt loop
handles.push(tokio::spawn(async move {
// timed out
loop {
match cache_lock.lock(&key1, false) {
Locked::Write(permit) => {
let _ = tokio::time::sleep(Duration::from_millis(5)).await;
cache_lock.release(&key1, permit, LockStatus::Done);
break;
}
Locked::Read(r) => {
r.wait().await;
}
}
}
}));
}
for handle in handles {
handle.await.unwrap();
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/storage.rs | pingora-cache/src/storage.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cache backend storage abstraction
use super::{CacheKey, CacheMeta};
use crate::key::CompactCacheKey;
use crate::trace::SpanHandle;
use async_trait::async_trait;
use pingora_error::Result;
use std::any::Any;
/// The reason a purge() is called
#[derive(Debug, Clone, Copy)]
pub enum PurgeType {
// For eviction because the cache storage is full
Eviction,
// For cache invalidation
Invalidation,
}
/// Cache storage interface
#[async_trait]
pub trait Storage {
// TODO: shouldn't have to be static
/// Lookup the storage for the given [CacheKey].
async fn lookup(
&'static self,
key: &CacheKey,
trace: &SpanHandle,
) -> Result<Option<(CacheMeta, HitHandler)>>;
/// Lookup the storage for the given [CacheKey] using a streaming write tag.
///
/// When streaming partial writes is supported, the request that initiates the write will also
/// pass an optional `streaming_write_tag` so that the storage may try to find the associated
/// [HitHandler], for the same ongoing write.
///
/// Therefore, when the write tag is set, the storage implementation should either return a
/// [HitHandler] that can be matched to that tag, or none at all. Otherwise when the storage
/// supports concurrent streaming writes for the same key, the calling request may receive a
/// different body from the one it expected.
///
/// By default this defers to the standard `Storage::lookup` implementation.
async fn lookup_streaming_write(
&'static self,
key: &CacheKey,
_streaming_write_tag: Option<&[u8]>,
trace: &SpanHandle,
) -> Result<Option<(CacheMeta, HitHandler)>> {
self.lookup(key, trace).await
}
/// Write the given [CacheMeta] to the storage. Return [MissHandler] to write the body later.
async fn get_miss_handler(
&'static self,
key: &CacheKey,
meta: &CacheMeta,
trace: &SpanHandle,
) -> Result<MissHandler>;
/// Delete the cached asset for the given key
///
/// [CompactCacheKey] is used here because it is how eviction managers store the keys
async fn purge(
&'static self,
key: &CompactCacheKey,
purge_type: PurgeType,
trace: &SpanHandle,
) -> Result<bool>;
/// Update cache header and metadata for the already stored asset.
async fn update_meta(
&'static self,
key: &CacheKey,
meta: &CacheMeta,
trace: &SpanHandle,
) -> Result<bool>;
/// Whether this storage backend supports reading partially written data
///
/// This is to indicate when cache should unlock readers
fn support_streaming_partial_write(&self) -> bool {
false
}
/// Helper function to cast the trait object to concrete types
fn as_any(&self) -> &(dyn Any + Send + Sync + 'static);
}
/// Cache hit handling trait
#[async_trait]
pub trait HandleHit {
/// Read cached body
///
/// Return `None` when no more body to read.
async fn read_body(&mut self) -> Result<Option<bytes::Bytes>>;
/// Finish the current cache hit
async fn finish(
self: Box<Self>, // because self is always used as a trait object
storage: &'static (dyn Storage + Sync),
key: &CacheKey,
trace: &SpanHandle,
) -> Result<()>;
/// Whether this storage allows seeking to a certain range of body for single ranges.
fn can_seek(&self) -> bool {
false
}
/// Whether this storage allows seeking to a certain range of body for multipart ranges.
///
/// By default uses the `can_seek` implementation.
fn can_seek_multipart(&self) -> bool {
self.can_seek()
}
/// Try to seek to a certain range of the body for single ranges.
///
/// `end: None` means to read to the end of the body.
fn seek(&mut self, _start: usize, _end: Option<usize>) -> Result<()> {
// to prevent impl can_seek() without impl seek
todo!("seek() needs to be implemented")
}
/// Try to seek to a certain range of the body for multipart ranges.
///
/// Works in an identical manner to `seek()`.
///
/// `end: None` means to read to the end of the body.
///
/// By default uses the `seek` implementation, but hit handlers may customize the
/// implementation specifically to anticipate multipart requests.
fn seek_multipart(&mut self, start: usize, end: Option<usize>) -> Result<()> {
// to prevent impl can_seek() without impl seek
self.seek(start, end)
}
// TODO: fn is_stream_hit()
/// Should we count this hit handler instance as an access in the eviction manager.
///
/// Defaults to returning true to track all cache hits as accesses. Customize this if certain
/// hits should not affect the eviction system's view of the asset.
fn should_count_access(&self) -> bool {
true
}
/// Returns the weight of the current cache hit asset to report to the eviction manager.
///
/// This allows the eviction system to initialize a weight for the asset, in case it is not
/// already tracking it (e.g. storage is out of sync with the eviction manager).
///
/// Defaults to 0.
fn get_eviction_weight(&self) -> usize {
0
}
/// Helper function to cast the trait object to concrete types
fn as_any(&self) -> &(dyn Any + Send + Sync);
/// Helper function to cast the trait object to concrete types
fn as_any_mut(&mut self) -> &mut (dyn Any + Send + Sync);
}
/// Hit Handler
pub type HitHandler = Box<dyn HandleHit + Sync + Send>;
/// MissFinishType
pub enum MissFinishType {
/// A new asset was created with the given size.
Created(usize),
/// Appended size to existing asset, with an optional max size param.
Appended(usize, Option<usize>),
}
/// Cache miss handling trait
#[async_trait]
pub trait HandleMiss {
/// Write the given body to the storage
async fn write_body(&mut self, data: bytes::Bytes, eof: bool) -> Result<()>;
/// Finish the cache admission
///
/// When `self` is dropped without calling this function, the storage should consider this write
/// failed.
async fn finish(
self: Box<Self>, // because self is always used as a trait object
) -> Result<MissFinishType>;
/// Return a streaming write tag recognized by the underlying [`Storage`].
///
/// This is an arbitrary data identifier that is used to associate this miss handler's current
/// write with a hit handler for the same write. This identifier will be compared by the
/// storage during `lookup_streaming_write`.
// This write tag is essentially an borrowed data blob of bytes retrieved from the miss handler
// and passed to storage, which means it can support strings or small data types, e.g. bytes
// represented by a u64.
// The downside with the current API is that such a data blob must be owned by the miss handler
// and stored in a way that permits retrieval as a byte slice (not computed on the fly).
// But most use cases likely only require a simple integer and may not like the overhead of a
// Vec/String allocation or even a Cow, though such data types can also be used here.
fn streaming_write_tag(&self) -> Option<&[u8]> {
None
}
}
/// Miss Handler
pub type MissHandler = Box<dyn HandleMiss + Sync + Send>;
pub mod streaming_write {
/// Portable u64 (sized) write id convenience type for use with streaming writes.
///
/// Often an integer value is sufficient for a streaming write tag. This convenience type enables
/// storing such a value and functions for consistent conversion between byte sequence data types.
#[derive(Debug, Clone, Copy)]
pub struct U64WriteId([u8; 8]);
impl U64WriteId {
pub fn as_bytes(&self) -> &[u8] {
&self.0[..]
}
}
impl From<u64> for U64WriteId {
fn from(value: u64) -> U64WriteId {
U64WriteId(value.to_be_bytes())
}
}
impl From<U64WriteId> for u64 {
fn from(value: U64WriteId) -> u64 {
u64::from_be_bytes(value.0)
}
}
impl TryFrom<&[u8]> for U64WriteId {
type Error = std::array::TryFromSliceError;
fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
Ok(U64WriteId(value.try_into()?))
}
}
/// Portable u32 (sized) write id convenience type for use with streaming writes.
///
/// Often an integer value is sufficient for a streaming write tag. This convenience type enables
/// storing such a value and functions for consistent conversion between byte sequence data types.
#[derive(Debug, Clone, Copy)]
pub struct U32WriteId([u8; 4]);
impl U32WriteId {
pub fn as_bytes(&self) -> &[u8] {
&self.0[..]
}
}
impl From<u32> for U32WriteId {
fn from(value: u32) -> U32WriteId {
U32WriteId(value.to_be_bytes())
}
}
impl From<U32WriteId> for u32 {
fn from(value: U32WriteId) -> u32 {
u32::from_be_bytes(value.0)
}
}
impl TryFrom<&[u8]> for U32WriteId {
type Error = std::array::TryFromSliceError;
fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
Ok(U32WriteId(value.try_into()?))
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/variance.rs | pingora-cache/src/variance.rs | use std::{borrow::Cow, collections::BTreeMap};
use blake2::Digest;
use crate::key::{Blake2b128, HashBinary};
/// A builder for variance keys, used for distinguishing multiple cached assets
/// at the same URL. This is intended to be easily passed to helper functions,
/// which can each populate a portion of the variance.
pub struct VarianceBuilder<'a> {
values: BTreeMap<Cow<'a, str>, Cow<'a, [u8]>>,
}
impl<'a> VarianceBuilder<'a> {
/// Create an empty variance key. Has no variance by default - add some variance using
/// [`Self::add_value`].
pub fn new() -> Self {
VarianceBuilder {
values: BTreeMap::new(),
}
}
/// Add a byte string to the variance key. Not sensitive to insertion order.
/// `value` is intended to take either `&str` or `&[u8]`.
pub fn add_value(&mut self, name: &'a str, value: &'a (impl AsRef<[u8]> + ?Sized)) {
self.values
.insert(name.into(), Cow::Borrowed(value.as_ref()));
}
/// Move a byte string to the variance key. Not sensitive to insertion order. Useful when
/// writing helper functions which generate a value then add said value to the VarianceBuilder.
/// Without this, the helper function would have to move the value to the calling function
/// to extend its lifetime to at least match the VarianceBuilder.
pub fn add_owned_value(&mut self, name: &'a str, value: Vec<u8>) {
self.values.insert(name.into(), Cow::Owned(value));
}
/// Check whether this variance key actually has variance, or just refers to the root asset
pub fn has_variance(&self) -> bool {
!self.values.is_empty()
}
/// Hash this variance key. Returns [`None`] if [`Self::has_variance`] is false.
pub fn finalize(self) -> Option<HashBinary> {
const SALT: &[u8; 1] = &[0u8; 1];
if self.has_variance() {
let mut hash = Blake2b128::new();
for (name, value) in self.values.iter() {
hash.update(name.as_bytes());
hash.update(SALT);
hash.update(value);
hash.update(SALT);
}
Some(hash.finalize().into())
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic() {
let key_empty = VarianceBuilder::new().finalize();
assert_eq!(None, key_empty);
let mut key_value = VarianceBuilder::new();
key_value.add_value("a", "a");
let key_value = key_value.finalize();
let mut key_owned_value = VarianceBuilder::new();
key_owned_value.add_owned_value("a", "a".as_bytes().to_vec());
let key_owned_value = key_owned_value.finalize();
assert_ne!(key_empty, key_value);
assert_ne!(key_empty, key_owned_value);
assert_eq!(key_value, key_owned_value);
}
#[test]
fn test_value_ordering() {
let mut key_abc = VarianceBuilder::new();
key_abc.add_value("a", "a");
key_abc.add_value("b", "b");
key_abc.add_value("c", "c");
let key_abc = key_abc.finalize().unwrap();
let mut key_bac = VarianceBuilder::new();
key_bac.add_value("b", "b");
key_bac.add_value("a", "a");
key_bac.add_value("c", "c");
let key_bac = key_bac.finalize().unwrap();
let mut key_cba = VarianceBuilder::new();
key_cba.add_value("c", "c");
key_cba.add_value("b", "b");
key_cba.add_value("a", "a");
let key_cba = key_cba.finalize().unwrap();
assert_eq!(key_abc, key_bac);
assert_eq!(key_abc, key_cba);
}
#[test]
fn test_value_overriding() {
let mut key_a = VarianceBuilder::new();
key_a.add_value("a", "a");
let key_a = key_a.finalize().unwrap();
let mut key_b = VarianceBuilder::new();
key_b.add_value("a", "b");
key_b.add_value("a", "a");
let key_b = key_b.finalize().unwrap();
assert_eq!(key_a, key_b);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/meta.rs | pingora-cache/src/meta.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Metadata for caching
pub use http::Extensions;
use log::warn;
use once_cell::sync::{Lazy, OnceCell};
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use pingora_header_serde::HeaderSerde;
use pingora_http::{HMap, ResponseHeader};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::time::{Duration, SystemTime};
use crate::key::HashBinary;
pub(crate) type InternalMeta = internal_meta::InternalMetaLatest;
mod internal_meta {
use super::*;
pub(crate) type InternalMetaLatest = InternalMetaV2;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub(crate) struct InternalMetaV0 {
pub(crate) fresh_until: SystemTime,
pub(crate) created: SystemTime,
pub(crate) stale_while_revalidate_sec: u32,
pub(crate) stale_if_error_sec: u32,
// Do not add more field
}
impl InternalMetaV0 {
#[allow(dead_code)]
fn serialize(&self) -> Result<Vec<u8>> {
rmp_serde::encode::to_vec(self).or_err(InternalError, "failed to encode cache meta")
}
fn deserialize(buf: &[u8]) -> Result<Self> {
rmp_serde::decode::from_slice(buf)
.or_err(InternalError, "failed to decode cache meta v0")
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub(crate) struct InternalMetaV1 {
pub(crate) version: u8,
pub(crate) fresh_until: SystemTime,
pub(crate) created: SystemTime,
pub(crate) stale_while_revalidate_sec: u32,
pub(crate) stale_if_error_sec: u32,
// Do not add more field
}
impl InternalMetaV1 {
#[allow(dead_code)]
pub const VERSION: u8 = 1;
#[allow(dead_code)]
pub fn serialize(&self) -> Result<Vec<u8>> {
assert_eq!(self.version, 1);
rmp_serde::encode::to_vec(self).or_err(InternalError, "failed to encode cache meta")
}
fn deserialize(buf: &[u8]) -> Result<Self> {
rmp_serde::decode::from_slice(buf)
.or_err(InternalError, "failed to decode cache meta v1")
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub(crate) struct InternalMetaV2 {
pub(crate) version: u8,
pub(crate) fresh_until: SystemTime,
pub(crate) created: SystemTime,
pub(crate) updated: SystemTime,
pub(crate) stale_while_revalidate_sec: u32,
pub(crate) stale_if_error_sec: u32,
// Only the extended field to be added below. One field at a time.
// 1. serde default in order to accept an older version schema without the field existing
// 2. serde skip_serializing_if in order for software with only an older version of this
// schema to decode it
// After full releases, remove `skip_serializing_if` so that we can add the next extended field.
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) variance: Option<HashBinary>,
}
impl Default for InternalMetaV2 {
fn default() -> Self {
let epoch = SystemTime::UNIX_EPOCH;
InternalMetaV2 {
version: InternalMetaV2::VERSION,
fresh_until: epoch,
created: epoch,
updated: epoch,
stale_while_revalidate_sec: 0,
stale_if_error_sec: 0,
variance: None,
}
}
}
impl InternalMetaV2 {
pub const VERSION: u8 = 2;
pub fn serialize(&self) -> Result<Vec<u8>> {
assert_eq!(self.version, Self::VERSION);
rmp_serde::encode::to_vec(self).or_err(InternalError, "failed to encode cache meta")
}
fn deserialize(buf: &[u8]) -> Result<Self> {
rmp_serde::decode::from_slice(buf)
.or_err(InternalError, "failed to decode cache meta v2")
}
}
impl From<InternalMetaV0> for InternalMetaV2 {
fn from(v0: InternalMetaV0) -> Self {
InternalMetaV2 {
version: InternalMetaV2::VERSION,
fresh_until: v0.fresh_until,
created: v0.created,
updated: v0.created,
stale_while_revalidate_sec: v0.stale_while_revalidate_sec,
stale_if_error_sec: v0.stale_if_error_sec,
..Default::default()
}
}
}
impl From<InternalMetaV1> for InternalMetaV2 {
fn from(v1: InternalMetaV1) -> Self {
InternalMetaV2 {
version: InternalMetaV2::VERSION,
fresh_until: v1.fresh_until,
created: v1.created,
updated: v1.created,
stale_while_revalidate_sec: v1.stale_while_revalidate_sec,
stale_if_error_sec: v1.stale_if_error_sec,
..Default::default()
}
}
}
// cross version decode
pub(crate) fn deserialize(buf: &[u8]) -> Result<InternalMetaLatest> {
const MIN_SIZE: usize = 10; // a small number to read the first few bytes
if buf.len() < MIN_SIZE {
return Error::e_explain(
InternalError,
format!("Buf too short ({}) to be InternalMeta", buf.len()),
);
}
let preread_buf = &mut &buf[..MIN_SIZE];
// the struct is always packed as a fixed size array
match rmp::decode::read_array_len(preread_buf)
.or_err(InternalError, "failed to decode cache meta array size")?
{
// v0 has 4 items and no version number
4 => Ok(InternalMetaV0::deserialize(buf)?.into()),
// other V should have version number encoded
_ => {
// rmp will encode `version` < 128 into a fixint (one byte),
// so we use read_pfix
let version = rmp::decode::read_pfix(preread_buf)
.or_err(InternalError, "failed to decode meta version")?;
match version {
1 => Ok(InternalMetaV1::deserialize(buf)?.into()),
2 => InternalMetaV2::deserialize(buf),
_ => Error::e_explain(
InternalError,
format!("Unknown InternalMeta version {version}"),
),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_internal_meta_serde_v0() {
let meta = InternalMetaV0 {
fresh_until: SystemTime::now(),
created: SystemTime::now(),
stale_while_revalidate_sec: 0,
stale_if_error_sec: 0,
};
let binary = meta.serialize().unwrap();
let meta2 = InternalMetaV0::deserialize(&binary).unwrap();
assert_eq!(meta.fresh_until, meta2.fresh_until);
}
#[test]
fn test_internal_meta_serde_v1() {
let meta = InternalMetaV1 {
version: InternalMetaV1::VERSION,
fresh_until: SystemTime::now(),
created: SystemTime::now(),
stale_while_revalidate_sec: 0,
stale_if_error_sec: 0,
};
let binary = meta.serialize().unwrap();
let meta2 = InternalMetaV1::deserialize(&binary).unwrap();
assert_eq!(meta.fresh_until, meta2.fresh_until);
}
#[test]
fn test_internal_meta_serde_v2() {
let meta = InternalMetaV2::default();
let binary = meta.serialize().unwrap();
let meta2 = InternalMetaV2::deserialize(&binary).unwrap();
assert_eq!(meta2.version, 2);
assert_eq!(meta.fresh_until, meta2.fresh_until);
assert_eq!(meta.created, meta2.created);
assert_eq!(meta.updated, meta2.updated);
}
#[test]
fn test_internal_meta_serde_across_versions() {
let meta = InternalMetaV0 {
fresh_until: SystemTime::now(),
created: SystemTime::now(),
stale_while_revalidate_sec: 0,
stale_if_error_sec: 0,
};
let binary = meta.serialize().unwrap();
let meta2 = deserialize(&binary).unwrap();
assert_eq!(meta2.version, 2);
assert_eq!(meta.fresh_until, meta2.fresh_until);
let meta = InternalMetaV1 {
version: 1,
fresh_until: SystemTime::now(),
created: SystemTime::now(),
stale_while_revalidate_sec: 0,
stale_if_error_sec: 0,
};
let binary = meta.serialize().unwrap();
let meta2 = deserialize(&binary).unwrap();
assert_eq!(meta2.version, 2);
assert_eq!(meta.fresh_until, meta2.fresh_until);
// `updated` == `created` when upgrading to v2
assert_eq!(meta2.created, meta2.updated);
}
#[test]
fn test_internal_meta_serde_v2_extend_fields() {
// make sure that v2 format is backward compatible
// this is the base version of v2 without any extended fields
#[derive(Deserialize, Serialize)]
pub(crate) struct InternalMetaV2Base {
pub(crate) version: u8,
pub(crate) fresh_until: SystemTime,
pub(crate) created: SystemTime,
pub(crate) updated: SystemTime,
pub(crate) stale_while_revalidate_sec: u32,
pub(crate) stale_if_error_sec: u32,
}
impl InternalMetaV2Base {
pub const VERSION: u8 = 2;
pub fn serialize(&self) -> Result<Vec<u8>> {
assert!(self.version >= Self::VERSION);
rmp_serde::encode::to_vec(self)
.or_err(InternalError, "failed to encode cache meta")
}
fn deserialize(buf: &[u8]) -> Result<Self> {
rmp_serde::decode::from_slice(buf)
.or_err(InternalError, "failed to decode cache meta v2")
}
}
// ext V2 to base v2
let meta = InternalMetaV2::default();
let binary = meta.serialize().unwrap();
let meta2 = InternalMetaV2Base::deserialize(&binary).unwrap();
assert_eq!(meta2.version, 2);
assert_eq!(meta.fresh_until, meta2.fresh_until);
assert_eq!(meta.created, meta2.created);
assert_eq!(meta.updated, meta2.updated);
// base V2 to ext v2
let now = SystemTime::now();
let meta = InternalMetaV2Base {
version: InternalMetaV2::VERSION,
fresh_until: now,
created: now,
updated: now,
stale_while_revalidate_sec: 0,
stale_if_error_sec: 0,
};
let binary = meta.serialize().unwrap();
let meta2 = InternalMetaV2::deserialize(&binary).unwrap();
assert_eq!(meta2.version, 2);
assert_eq!(meta.fresh_until, meta2.fresh_until);
assert_eq!(meta.created, meta2.created);
assert_eq!(meta.updated, meta2.updated);
}
}
}
#[derive(Debug)]
pub(crate) struct CacheMetaInner {
// http header and Internal meta have different ways of serialization, so keep them separated
pub(crate) internal: InternalMeta,
pub(crate) header: ResponseHeader,
/// An opaque type map to hold extra information for communication between cache backends
/// and users. This field is **not** guaranteed be persistently stored in the cache backend.
pub extensions: Extensions,
}
/// The cacheable response header and cache metadata
#[derive(Debug)]
pub struct CacheMeta(pub(crate) Box<CacheMetaInner>);
impl CacheMeta {
/// Create a [CacheMeta] from the given metadata and the response header
pub fn new(
fresh_until: SystemTime,
created: SystemTime,
stale_while_revalidate_sec: u32,
stale_if_error_sec: u32,
header: ResponseHeader,
) -> CacheMeta {
CacheMeta(Box::new(CacheMetaInner {
internal: InternalMeta {
version: InternalMeta::VERSION,
fresh_until,
created,
updated: created, // created == updated for new meta
stale_while_revalidate_sec,
stale_if_error_sec,
..Default::default()
},
header,
extensions: Extensions::new(),
}))
}
/// When the asset was created/admitted to cache
pub fn created(&self) -> SystemTime {
self.0.internal.created
}
/// The last time the asset was revalidated
///
/// This value will be the same as [Self::created()] if no revalidation ever happens
pub fn updated(&self) -> SystemTime {
self.0.internal.updated
}
/// Is the asset still valid
pub fn is_fresh(&self, time: SystemTime) -> bool {
// NOTE: HTTP cache time resolution is second
self.0.internal.fresh_until >= time
}
/// How long (in seconds) the asset should be fresh since its admission/revalidation
///
/// This is essentially the max-age value (or its equivalence)
pub fn fresh_sec(&self) -> u64 {
// swallow `duration_since` error, assets that are always stale have earlier `fresh_until` than `created`
// practically speaking we can always treat these as 0 ttl
// XXX: return Error if `fresh_until` is much earlier than expected?
self.0
.internal
.fresh_until
.duration_since(self.0.internal.updated)
.map_or(0, |duration| duration.as_secs())
}
/// Until when the asset is considered fresh
pub fn fresh_until(&self) -> SystemTime {
self.0.internal.fresh_until
}
/// How old the asset is since its admission/revalidation
pub fn age(&self) -> Duration {
SystemTime::now()
.duration_since(self.updated())
.unwrap_or_default()
}
/// The stale-while-revalidate limit in seconds
pub fn stale_while_revalidate_sec(&self) -> u32 {
self.0.internal.stale_while_revalidate_sec
}
/// The stale-if-error limit in seconds
pub fn stale_if_error_sec(&self) -> u32 {
self.0.internal.stale_if_error_sec
}
/// Can the asset be used to serve stale during revalidation at the given time.
///
/// NOTE: the serve stale functions do not check !is_fresh(time),
/// i.e. the object is already assumed to be stale.
pub fn serve_stale_while_revalidate(&self, time: SystemTime) -> bool {
self.can_serve_stale(self.0.internal.stale_while_revalidate_sec, time)
}
/// Can the asset be used to serve stale after error at the given time.
///
/// NOTE: the serve stale functions do not check !is_fresh(time),
/// i.e. the object is already assumed to be stale.
pub fn serve_stale_if_error(&self, time: SystemTime) -> bool {
self.can_serve_stale(self.0.internal.stale_if_error_sec, time)
}
/// Disable serve stale for this asset
pub fn disable_serve_stale(&mut self) {
self.0.internal.stale_if_error_sec = 0;
self.0.internal.stale_while_revalidate_sec = 0;
}
/// Get the variance hash of this asset
pub fn variance(&self) -> Option<HashBinary> {
self.0.internal.variance
}
/// Set the variance key of this asset
pub fn set_variance_key(&mut self, variance_key: HashBinary) {
self.0.internal.variance = Some(variance_key);
}
/// Set the variance (hash) of this asset
pub fn set_variance(&mut self, variance: HashBinary) {
self.0.internal.variance = Some(variance)
}
/// Removes the variance (hash) of this asset
pub fn remove_variance(&mut self) {
self.0.internal.variance = None
}
/// Get the response header in this asset
pub fn response_header(&self) -> &ResponseHeader {
&self.0.header
}
/// Modify the header in this asset
pub fn response_header_mut(&mut self) -> &mut ResponseHeader {
&mut self.0.header
}
/// Expose the extensions to read
pub fn extensions(&self) -> &Extensions {
&self.0.extensions
}
/// Expose the extensions to modify
pub fn extensions_mut(&mut self) -> &mut Extensions {
&mut self.0.extensions
}
/// Get a copy of the response header
pub fn response_header_copy(&self) -> ResponseHeader {
self.0.header.clone()
}
/// get all the headers of this asset
pub fn headers(&self) -> &HMap {
&self.0.header.headers
}
fn can_serve_stale(&self, serve_stale_sec: u32, time: SystemTime) -> bool {
if serve_stale_sec == 0 {
return false;
}
if let Some(stale_until) = self
.0
.internal
.fresh_until
.checked_add(Duration::from_secs(serve_stale_sec.into()))
{
stale_until >= time
} else {
// overflowed: treat as infinite ttl
true
}
}
/// Serialize this object
pub fn serialize(&self) -> Result<(Vec<u8>, Vec<u8>)> {
let internal = self.0.internal.serialize()?;
let header = header_serialize(&self.0.header)?;
log::debug!("header to serialize: {:?}", &self.0.header);
Ok((internal, header))
}
/// Deserialize from the binary format
pub fn deserialize(internal: &[u8], header: &[u8]) -> Result<Self> {
let internal = internal_meta::deserialize(internal)?;
let header = header_deserialize(header)?;
Ok(CacheMeta(Box::new(CacheMetaInner {
internal,
header,
extensions: Extensions::new(),
})))
}
}
use http::StatusCode;
/// The function to generate TTL from the given [StatusCode].
pub type FreshDurationByStatusFn = fn(StatusCode) -> Option<Duration>;
/// The default settings to generate [CacheMeta]
pub struct CacheMetaDefaults {
// if a status code is not included in fresh_sec, it's not considered cacheable by default.
fresh_sec_fn: FreshDurationByStatusFn,
stale_while_revalidate_sec: u32,
// TODO: allow "error" condition to be configurable?
stale_if_error_sec: u32,
}
impl CacheMetaDefaults {
/// Create a new [CacheMetaDefaults]
pub const fn new(
fresh_sec_fn: FreshDurationByStatusFn,
stale_while_revalidate_sec: u32,
stale_if_error_sec: u32,
) -> Self {
CacheMetaDefaults {
fresh_sec_fn,
stale_while_revalidate_sec,
stale_if_error_sec,
}
}
/// Return the default TTL for the given [StatusCode]
///
/// `None`: do no cache this code.
pub fn fresh_sec(&self, resp_status: StatusCode) -> Option<Duration> {
// safe guard to make sure 304 response to share the same default ttl of 200
if resp_status == StatusCode::NOT_MODIFIED {
(self.fresh_sec_fn)(StatusCode::OK)
} else {
(self.fresh_sec_fn)(resp_status)
}
}
/// The default SWR seconds
pub fn serve_stale_while_revalidate_sec(&self) -> u32 {
self.stale_while_revalidate_sec
}
/// The default SIE seconds
pub fn serve_stale_if_error_sec(&self) -> u32 {
self.stale_if_error_sec
}
}
/// The dictionary content for header compression.
///
/// Used during initialization of [`HEADER_SERDE`].
static COMPRESSION_DICT_CONTENT: OnceCell<Cow<'static, [u8]>> = OnceCell::new();
static HEADER_SERDE: Lazy<HeaderSerde> = Lazy::new(|| {
let dict_opt = if let Some(dict_content) = COMPRESSION_DICT_CONTENT.get() {
Some(dict_content.to_vec())
} else {
warn!("no header compression dictionary loaded - use set_compression_dict_content() or set_compression_dict_path() to set one");
None
};
HeaderSerde::new(dict_opt)
});
pub(crate) fn header_serialize(header: &ResponseHeader) -> Result<Vec<u8>> {
HEADER_SERDE.serialize(header)
}
pub(crate) fn header_deserialize<T: AsRef<[u8]>>(buf: T) -> Result<ResponseHeader> {
HEADER_SERDE.deserialize(buf.as_ref())
}
/// Load the header compression dictionary from a file, which helps serialize http header.
///
/// Returns false if it is already set or if the file could not be read.
///
/// Use [`set_compression_dict_content`] to set the dictionary from memory instead.
pub fn set_compression_dict_path(path: &str) -> bool {
match std::fs::read(path) {
Ok(dict) => COMPRESSION_DICT_CONTENT.set(dict.into()).is_ok(),
Err(e) => {
warn!(
"failed to read header compress dictionary file at {}, {:?}",
path, e
);
false
}
}
}
/// Set the header compression dictionary content, which helps serialize http header.
///
/// Returns false if it is already set.
///
/// This is an alernative to [`set_compression_dict_path`], allowing use of
/// a dictionary without an external file.
pub fn set_compression_dict_content(content: Cow<'static, [u8]>) -> bool {
COMPRESSION_DICT_CONTENT.set(content).is_ok()
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/eviction/lru.rs | pingora-cache/src/eviction/lru.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A shared LRU cache manager
use super::EvictionManager;
use crate::key::CompactCacheKey;
use async_trait::async_trait;
use log::{info, warn};
use pingora_error::{BError, ErrorType::*, OrErr, Result};
use pingora_lru::Lru;
use rand::Rng;
use serde::de::SeqAccess;
use serde::{Deserialize, Serialize};
use std::fs::{rename, File};
use std::hash::{Hash, Hasher};
use std::io::prelude::*;
use std::path::Path;
use std::time::SystemTime;
/// A shared LRU cache manager designed to manage a large volume of assets.
///
/// - Space optimized in-memory LRU (see [pingora_lru]).
/// - Instead of a single giant LRU, this struct shards the assets into `N` independent LRUs.
///
/// This allows [EvictionManager::save()] not to lock the entire cache manager while performing
/// serialization.
pub struct Manager<const N: usize>(Lru<CompactCacheKey, N>);
#[derive(Debug, Serialize, Deserialize)]
struct SerdeHelperNode(CompactCacheKey, usize);
impl<const N: usize> Manager<N> {
/// Create a [Manager] with the given size limit and estimated per shard capacity.
///
/// The `capacity` is for preallocating to avoid reallocation cost when the LRU grows.
pub fn with_capacity(limit: usize, capacity: usize) -> Self {
Manager(Lru::with_capacity(limit, capacity))
}
/// Create a [Manager] with an optional watermark in addition to weight limit.
///
/// When `watermark` is set, the underlying LRU will also evict to keep total item count
/// under or equal to that watermark.
pub fn with_capacity_and_watermark(
limit: usize,
capacity: usize,
watermark: Option<usize>,
) -> Self {
Manager(Lru::with_capacity_and_watermark(limit, capacity, watermark))
}
/// Get the number of shards
pub fn shards(&self) -> usize {
self.0.shards()
}
/// Get the weight (total size) of a specific shard
pub fn shard_weight(&self, shard: usize) -> usize {
self.0.shard_weight(shard)
}
/// Get the number of items in a specific shard
pub fn shard_len(&self, shard: usize) -> usize {
self.0.shard_len(shard)
}
/// Get the shard index for a given cache key
///
/// This allows callers to know which shard was affected by an operation
/// without acquiring any locks.
pub fn get_shard_for_key(&self, key: &CompactCacheKey) -> usize {
(u64key(key) % N as u64) as usize
}
/// Serialize the given shard
pub fn serialize_shard(&self, shard: usize) -> Result<Vec<u8>> {
use rmp_serde::encode::Serializer;
use serde::ser::SerializeSeq;
use serde::ser::Serializer as _;
assert!(shard < N);
// NOTE: This could use a lot of memory to buffer the serialized data in memory
// NOTE: This for loop could lock the LRU for too long
let mut nodes = Vec::with_capacity(self.0.shard_len(shard));
self.0.iter_for_each(shard, |(node, size)| {
nodes.push(SerdeHelperNode(node.clone(), size));
});
let mut ser = Serializer::new(vec![]);
let mut seq = ser
.serialize_seq(Some(self.0.shard_len(shard)))
.or_err(InternalError, "fail to serialize node")?;
for node in nodes {
seq.serialize_element(&node).unwrap(); // write to vec, safe
}
seq.end().or_err(InternalError, "when serializing LRU")?;
Ok(ser.into_inner())
}
/// Deserialize a shard
///
/// Shard number is not needed because the key itself will hash to the correct shard.
pub fn deserialize_shard(&self, buf: &[u8]) -> Result<()> {
use rmp_serde::decode::Deserializer;
use serde::de::Deserializer as _;
let mut de = Deserializer::new(buf);
let visitor = InsertToManager { lru: self };
de.deserialize_seq(visitor)
.or_err(InternalError, "when deserializing LRU")?;
Ok(())
}
/// Peek the weight associated with a cache key without changing its LRU order.
pub fn peek_weight(&self, item: &CompactCacheKey) -> Option<usize> {
let key = u64key(item);
self.0.peek_weight(key)
}
}
struct InsertToManager<'a, const N: usize> {
lru: &'a Manager<N>,
}
impl<'de, const N: usize> serde::de::Visitor<'de> for InsertToManager<'_, N> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("array of lru nodes")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
while let Some(node) = seq.next_element::<SerdeHelperNode>()? {
let key = u64key(&node.0);
self.lru.0.insert_tail(key, node.0, node.1); // insert in the back
}
Ok(())
}
}
#[inline]
fn u64key(key: &CompactCacheKey) -> u64 {
// note that std hash is not uniform, I'm not sure if ahash is also the case
let mut hasher = ahash::AHasher::default();
key.hash(&mut hasher);
hasher.finish()
}
const FILE_NAME: &str = "lru.data";
#[inline]
fn err_str_path(s: &str, path: &Path) -> String {
format!("{s} {}", path.display())
}
#[async_trait]
impl<const N: usize> EvictionManager for Manager<N> {
fn total_size(&self) -> usize {
self.0.weight()
}
fn total_items(&self) -> usize {
self.0.len()
}
fn evicted_size(&self) -> usize {
self.0.evicted_weight()
}
fn evicted_items(&self) -> usize {
self.0.evicted_len()
}
fn admit(
&self,
item: CompactCacheKey,
size: usize,
_fresh_until: SystemTime,
) -> Vec<CompactCacheKey> {
let key = u64key(&item);
self.0.admit(key, item, size);
self.0
.evict_to_limit()
.into_iter()
.map(|(key, _weight)| key)
.collect()
}
fn increment_weight(
&self,
item: &CompactCacheKey,
delta: usize,
max_weight: Option<usize>,
) -> Vec<CompactCacheKey> {
let key = u64key(item);
self.0.increment_weight(key, delta, max_weight);
self.0
.evict_to_limit()
.into_iter()
.map(|(key, _weight)| key)
.collect()
}
fn remove(&self, item: &CompactCacheKey) {
let key = u64key(item);
self.0.remove(key);
}
fn access(&self, item: &CompactCacheKey, size: usize, _fresh_until: SystemTime) -> bool {
let key = u64key(item);
if !self.0.promote(key) {
self.0.admit(key, item.clone(), size);
false
} else {
true
}
}
fn peek(&self, item: &CompactCacheKey) -> bool {
let key = u64key(item);
self.0.peek(key)
}
async fn save(&self, dir_path: &str) -> Result<()> {
let dir_path_str = dir_path.to_owned();
tokio::task::spawn_blocking(move || {
let dir_path = Path::new(&dir_path_str);
std::fs::create_dir_all(dir_path)
.or_err_with(InternalError, || err_str_path("fail to create", dir_path))
})
.await
.or_err(InternalError, "async blocking IO failure")??;
for i in 0..N {
let data = self.serialize_shard(i)?;
let dir_path = dir_path.to_owned();
tokio::task::spawn_blocking(move || {
let dir_path = Path::new(&dir_path);
let final_path = dir_path.join(format!("{}.{i}", FILE_NAME));
// create a temporary filename using a randomized u32 hash to minimize the chance of multiple writers writing to the same tmp file
let random_suffix: u32 = rand::thread_rng().gen();
let temp_path =
dir_path.join(format!("{}.{i}.{:08x}.tmp", FILE_NAME, random_suffix));
let mut file = File::create(&temp_path)
.or_err_with(InternalError, || err_str_path("fail to create", &temp_path))?;
file.write_all(&data).or_err_with(InternalError, || {
err_str_path("fail to write to", &temp_path)
})?;
file.flush().or_err_with(InternalError, || {
err_str_path("fail to flush temp file", &temp_path)
})?;
rename(&temp_path, &final_path).or_err_with(InternalError, || {
format!(
"Failed to rename file from {} to {}",
temp_path.display(),
final_path.display(),
)
})
})
.await
.or_err(InternalError, "async blocking IO failure")??;
}
Ok(())
}
async fn load(&self, dir_path: &str) -> Result<()> {
// TODO: check the saved shards so that we load all the save files
let mut loaded_shards = 0;
for i in 0..N {
let dir_path = dir_path.to_owned();
let data = tokio::task::spawn_blocking(move || {
let file_path = Path::new(&dir_path).join(format!("{}.{i}", FILE_NAME));
let mut file = File::open(&file_path)
.or_err_with(InternalError, || err_str_path("fail to open", &file_path))?;
let mut buffer = Vec::with_capacity(8192);
file.read_to_end(&mut buffer)
.or_err_with(InternalError, || {
err_str_path("fail to read from", &file_path)
})?;
Ok::<Vec<u8>, BError>(buffer)
})
.await
.or_err(InternalError, "async blocking IO failure")??;
if let Err(e) = self.deserialize_shard(&data) {
warn!("Failed to deserialize shard {}: {}. Skipping shard.", i, e);
continue; // Skip shard and move onto the next one
}
loaded_shards += 1;
}
// Log how many shards were successfully loaded
if loaded_shards < N {
warn!(
"Only loaded {}/{} shards. Cache may be incomplete.",
loaded_shards, N
)
} else {
info!("Successfully loaded {}/{} shards.", loaded_shards, N)
}
cleanup_temp_files(dir_path);
Ok(())
}
}
fn cleanup_temp_files(dir_path: &str) {
let dir_path = Path::new(dir_path).to_owned();
tokio::task::spawn_blocking({
move || {
if !dir_path.exists() {
return;
}
let entries = match std::fs::read_dir(&dir_path) {
Ok(entries) => entries,
Err(e) => {
warn!("Failed to read directory {}: {e}", dir_path.display());
return;
}
};
let mut cleaned_count = 0;
let mut error_count = 0;
for entry in entries {
let entry = match entry {
Ok(entry) => entry,
Err(e) => {
warn!(
"Failed to read directory entry in {}: {e}",
dir_path.display()
);
error_count += 1;
continue;
}
};
let file_name = entry.file_name();
let file_name_str = file_name.to_string_lossy();
if file_name_str.starts_with(FILE_NAME) && file_name_str.ends_with(".tmp") {
match std::fs::remove_file(entry.path()) {
Ok(()) => {
info!("Cleaned up orphaned temp file: {}", entry.path().display());
cleaned_count += 1;
}
Err(e) => {
warn!("Failed to remove temp file {}: {e}", entry.path().display());
error_count += 1;
}
}
}
}
if cleaned_count > 0 || error_count > 0 {
info!(
"Temp file cleanup completed. Removed: {cleaned_count}, Errors: {error_count}"
);
}
}
});
}
#[cfg(test)]
mod test {
use super::*;
use crate::CacheKey;
// we use shard (N) = 1 for eviction consistency in all tests
#[test]
fn test_admission() {
let lru = Manager::<1>::with_capacity(4, 10);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru si full (4) now
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
// need to reduce used by at least 2, both key1 and key2 are evicted to make room for 3
assert_eq!(v.len(), 2);
assert_eq!(v[0], key1);
assert_eq!(v[1], key2);
}
#[test]
fn test_access() {
let lru = Manager::<1>::with_capacity(4, 10);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// make key1 most recently used
lru.access(&key1, 1, until);
assert_eq!(v.len(), 0);
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
assert_eq!(v.len(), 1);
assert_eq!(v[0], key2);
}
#[test]
fn test_remove() {
let lru = Manager::<1>::with_capacity(4, 10);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// remove key1
lru.remove(&key1);
// key2 is the least recently used one now
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
assert_eq!(v.len(), 1);
assert_eq!(v[0], key2);
}
#[test]
fn test_access_add() {
let lru = Manager::<1>::with_capacity(4, 10);
let until = SystemTime::now(); // unused value as a placeholder
let key1 = CacheKey::new("", "a", "1").to_compact();
lru.access(&key1, 1, until);
let key2 = CacheKey::new("", "b", "1").to_compact();
lru.access(&key2, 2, until);
let key3 = CacheKey::new("", "c", "1").to_compact();
lru.access(&key3, 2, until);
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
// need to reduce used by at least 2, both key1 and key2 are evicted to make room for 3
assert_eq!(v.len(), 2);
assert_eq!(v[0], key1);
assert_eq!(v[1], key2);
}
#[test]
fn test_admit_update() {
let lru = Manager::<1>::with_capacity(4, 10);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// update key2 to reduce its size by 1
let v = lru.admit(key2, 1, until);
assert_eq!(v.len(), 0);
// lru is not full anymore
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4.clone(), 1, until);
assert_eq!(v.len(), 0);
// make key4 larger
let v = lru.admit(key4, 2, until);
// need to evict now
assert_eq!(v.len(), 1);
assert_eq!(v[0], key1);
}
#[test]
fn test_peek() {
let lru = Manager::<1>::with_capacity(4, 10);
let until = SystemTime::now(); // unused value as a placeholder
let key1 = CacheKey::new("", "a", "1").to_compact();
lru.access(&key1, 1, until);
let key2 = CacheKey::new("", "b", "1").to_compact();
lru.access(&key2, 2, until);
assert!(lru.peek(&key1));
assert!(lru.peek(&key2));
}
#[test]
fn test_serde() {
let lru = Manager::<1>::with_capacity(4, 10);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// make key1 most recently used
lru.access(&key1, 1, until);
assert_eq!(v.len(), 0);
// load lru2 with lru's data
let ser = lru.serialize_shard(0).unwrap();
let lru2 = Manager::<1>::with_capacity(4, 10);
lru2.deserialize_shard(&ser).unwrap();
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru2.admit(key4, 2, until);
assert_eq!(v.len(), 1);
assert_eq!(v[0], key2);
}
#[tokio::test]
async fn test_save_to_disk() {
let until = SystemTime::now(); // unused value as a placeholder
let lru = Manager::<2>::with_capacity(10, 10);
lru.admit(CacheKey::new("", "a", "1").to_compact(), 1, until);
lru.admit(CacheKey::new("", "b", "1").to_compact(), 2, until);
lru.admit(CacheKey::new("", "c", "1").to_compact(), 1, until);
lru.admit(CacheKey::new("", "d", "1").to_compact(), 1, until);
lru.admit(CacheKey::new("", "e", "1").to_compact(), 2, until);
lru.admit(CacheKey::new("", "f", "1").to_compact(), 1, until);
// load lru2 with lru's data
lru.save("/tmp/test_lru_save").await.unwrap();
let lru2 = Manager::<2>::with_capacity(4, 10);
lru2.load("/tmp/test_lru_save").await.unwrap();
let ser0 = lru.serialize_shard(0).unwrap();
let ser1 = lru.serialize_shard(1).unwrap();
assert_eq!(ser0, lru2.serialize_shard(0).unwrap());
assert_eq!(ser1, lru2.serialize_shard(1).unwrap());
}
#[tokio::test]
async fn test_temp_file_cleanup() {
let test_dir = "/tmp/test_lru_cleanup";
let dir_path = Path::new(test_dir);
// Create test directory
std::fs::create_dir_all(dir_path).unwrap();
// Create some fake temp files
let temp_files = [
"lru.data.0.12345678.tmp",
"lru.data.1.abcdef00.tmp",
"other_file.tmp", // Should not be removed
"lru.data.2", // Should not be removed
];
for file in temp_files {
let file_path = dir_path.join(file);
std::fs::write(&file_path, b"test").unwrap();
}
// Run cleanup
cleanup_temp_files(test_dir);
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
// Check results
assert!(!dir_path.join("lru.data.0.12345678.tmp").exists());
assert!(!dir_path.join("lru.data.1.abcdef00.tmp").exists());
assert!(dir_path.join("other_file.tmp").exists()); // Should remain
assert!(dir_path.join("lru.data.2").exists()); // Should remain
// Cleanup test directory
std::fs::remove_dir_all(dir_path).unwrap();
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/eviction/simple_lru.rs | pingora-cache/src/eviction/simple_lru.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A simple LRU cache manager built on top of the `lru` crate
use super::EvictionManager;
use crate::key::CompactCacheKey;
use async_trait::async_trait;
use lru::LruCache;
use parking_lot::RwLock;
use pingora_error::{BError, ErrorType::*, OrErr, Result};
use rand::Rng;
use serde::de::SeqAccess;
use serde::{Deserialize, Serialize};
use std::collections::hash_map::DefaultHasher;
use std::fs::File;
use std::hash::{Hash, Hasher};
use std::io::prelude::*;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::SystemTime;
#[derive(Debug, Deserialize, Serialize)]
struct Node {
key: CompactCacheKey,
size: usize,
}
/// A simple LRU eviction manager
///
/// The implementation is not optimized. All operations require global locks.
pub struct Manager {
lru: RwLock<LruCache<u64, Node>>,
limit: usize,
items_watermark: Option<usize>,
used: AtomicUsize,
items: AtomicUsize,
evicted_size: AtomicUsize,
evicted_items: AtomicUsize,
}
impl Manager {
/// Create a new [Manager] with the given total size limit `limit`.
pub fn new(limit: usize) -> Self {
Manager {
lru: RwLock::new(LruCache::unbounded()),
limit,
items_watermark: None,
used: AtomicUsize::new(0),
items: AtomicUsize::new(0),
evicted_size: AtomicUsize::new(0),
evicted_items: AtomicUsize::new(0),
}
}
/// Create a new [Manager] with optional watermark in addition to size limit `limit`.
pub fn new_with_watermark(limit: usize, items_watermark: Option<usize>) -> Self {
Manager {
lru: RwLock::new(LruCache::unbounded()),
limit,
items_watermark,
used: AtomicUsize::new(0),
items: AtomicUsize::new(0),
evicted_size: AtomicUsize::new(0),
evicted_items: AtomicUsize::new(0),
}
}
fn insert(&self, hash_key: u64, node: CompactCacheKey, size: usize, reverse: bool) {
use std::cmp::Ordering::*;
let node = Node { key: node, size };
let old = {
let mut lru = self.lru.write();
let old = lru.push(hash_key, node);
if reverse && old.is_none() {
lru.demote(&hash_key);
}
old
};
if let Some(old) = old {
// replacing a node, just need to update used size
match size.cmp(&old.1.size) {
Greater => self.used.fetch_add(size - old.1.size, Ordering::Relaxed),
Less => self.used.fetch_sub(old.1.size - size, Ordering::Relaxed),
Equal => 0, // same size, update nothing, use 0 to match other arms' type
};
} else {
self.used.fetch_add(size, Ordering::Relaxed);
self.items.fetch_add(1, Ordering::Relaxed);
}
}
fn increase_weight(&self, key: u64, delta: usize) {
let mut lru = self.lru.write();
let Some(node) = lru.get_key_value_mut(&key) else {
return;
};
node.1.size += delta;
self.used.fetch_add(delta, Ordering::Relaxed);
}
#[inline]
fn over_limits(&self) -> bool {
self.used.load(Ordering::Relaxed) > self.limit
|| self
.items_watermark
.is_some_and(|w| self.items.load(Ordering::Relaxed) > w)
}
// evict items until the used capacity is below the size limit and watermark count
fn evict(&self) -> Vec<CompactCacheKey> {
if self.used.load(Ordering::Relaxed) <= self.limit
&& self
.items_watermark
.is_none_or(|w| self.items.load(Ordering::Relaxed) <= w)
{
return vec![];
}
let mut to_evict = Vec::with_capacity(1); // we will at least pop 1 item
while self.over_limits() {
if let Some((_, node)) = self.lru.write().pop_lru() {
self.used.fetch_sub(node.size, Ordering::Relaxed);
self.items.fetch_sub(1, Ordering::Relaxed);
self.evicted_size.fetch_add(node.size, Ordering::Relaxed);
self.evicted_items.fetch_add(1, Ordering::Relaxed);
to_evict.push(node.key);
} else {
// lru empty
return to_evict;
}
}
to_evict
}
// This could use a lot of memory to buffer the serialized data in memory and could lock the LRU
// for too long
fn serialize(&self) -> Result<Vec<u8>> {
use rmp_serde::encode::Serializer;
use serde::ser::SerializeSeq;
use serde::ser::Serializer as _;
// NOTE: This could use a lot of memory to buffer the serialized data in memory
let mut ser = Serializer::new(vec![]);
// NOTE: This long for loop could lock the LRU for too long
let lru = self.lru.read();
let mut seq = ser
.serialize_seq(Some(lru.len()))
.or_err(InternalError, "fail to serialize node")?;
for item in lru.iter() {
seq.serialize_element(item.1).unwrap(); // write to vec, safe
}
seq.end().or_err(InternalError, "when serializing LRU")?;
Ok(ser.into_inner())
}
fn deserialize(&self, buf: &[u8]) -> Result<()> {
use rmp_serde::decode::Deserializer;
use serde::de::Deserializer as _;
let mut de = Deserializer::new(buf);
let visitor = InsertToManager { lru: self };
de.deserialize_seq(visitor)
.or_err(InternalError, "when deserializing LRU")?;
Ok(())
}
}
struct InsertToManager<'a> {
lru: &'a Manager,
}
impl<'de> serde::de::Visitor<'de> for InsertToManager<'_> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("array of lru nodes")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
while let Some(node) = seq.next_element::<Node>()? {
let key = u64key(&node.key);
self.lru.insert(key, node.key, node.size, true); // insert in the back
}
Ok(())
}
}
#[inline]
fn u64key(key: &CompactCacheKey) -> u64 {
let mut hasher = DefaultHasher::new();
key.hash(&mut hasher);
hasher.finish()
}
const FILE_NAME: &str = "simple_lru.data";
#[async_trait]
impl EvictionManager for Manager {
fn total_size(&self) -> usize {
self.used.load(Ordering::Relaxed)
}
fn total_items(&self) -> usize {
self.items.load(Ordering::Relaxed)
}
fn evicted_size(&self) -> usize {
self.evicted_size.load(Ordering::Relaxed)
}
fn evicted_items(&self) -> usize {
self.evicted_items.load(Ordering::Relaxed)
}
fn admit(
&self,
item: CompactCacheKey,
size: usize,
_fresh_until: SystemTime,
) -> Vec<CompactCacheKey> {
let key = u64key(&item);
self.insert(key, item, size, false);
self.evict()
}
fn increment_weight(
&self,
item: &CompactCacheKey,
delta: usize,
_max_weight: Option<usize>,
) -> Vec<CompactCacheKey> {
let key = u64key(item);
self.increase_weight(key, delta);
self.evict()
}
fn remove(&self, item: &CompactCacheKey) {
let key = u64key(item);
let node = self.lru.write().pop(&key);
if let Some(n) = node {
self.used.fetch_sub(n.size, Ordering::Relaxed);
self.items.fetch_sub(1, Ordering::Relaxed);
}
}
fn access(&self, item: &CompactCacheKey, size: usize, _fresh_until: SystemTime) -> bool {
let key = u64key(item);
if self.lru.write().get(&key).is_none() {
self.insert(key, item.clone(), size, false);
false
} else {
true
}
}
fn peek(&self, item: &CompactCacheKey) -> bool {
let key = u64key(item);
self.lru.read().peek(&key).is_some()
}
async fn save(&self, dir_path: &str) -> Result<()> {
let data = self.serialize()?;
let dir_str = dir_path.to_owned();
tokio::task::spawn_blocking(move || {
let dir_path = Path::new(&dir_str);
std::fs::create_dir_all(dir_path)
.or_err_with(InternalError, || format!("fail to create {dir_str}"))?;
let final_file_path = dir_path.join(FILE_NAME);
// create a temporary filename using a randomized u32 hash to minimize the chance of multiple writers writing to the same tmp file
let random_suffix: u32 = rand::thread_rng().gen();
let temp_file_path = dir_path.join(format!("{}.{:08x}.tmp", FILE_NAME, random_suffix));
let mut file = File::create(&temp_file_path).or_err_with(InternalError, || {
format!("fail to create temporary file {}", temp_file_path.display())
})?;
file.write_all(&data).or_err_with(InternalError, || {
format!("fail to write to {}", temp_file_path.display())
})?;
file.flush().or_err_with(InternalError, || {
format!("fail to flush temp file {}", temp_file_path.display())
})?;
std::fs::rename(&temp_file_path, &final_file_path).or_err_with(InternalError, || {
format!(
"fail to rename temporary file {} to {}",
temp_file_path.display(),
final_file_path.display()
)
})
})
.await
.or_err(InternalError, "async blocking IO failure")?
}
async fn load(&self, dir_path: &str) -> Result<()> {
let dir_path = dir_path.to_owned();
let data = tokio::task::spawn_blocking(move || {
let file_path = Path::new(&dir_path).join(FILE_NAME);
let mut file = File::open(file_path.clone()).or_err_with(InternalError, || {
format!("fail to open {}", file_path.display())
})?;
let mut buffer = Vec::with_capacity(8192);
file.read_to_end(&mut buffer)
.or_err(InternalError, "fail to read from {file_path}")?;
Ok::<Vec<u8>, BError>(buffer)
})
.await
.or_err(InternalError, "async blocking IO failure")??;
self.deserialize(&data)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::CacheKey;
#[test]
fn test_admission() {
let lru = Manager::new(4);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru si full (4) now
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
// need to reduce used by at least 2, both key1 and key2 are evicted to make room for 3
assert_eq!(v.len(), 2);
assert_eq!(v[0], key1);
assert_eq!(v[1], key2);
}
#[test]
fn test_access() {
let lru = Manager::new(4);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// make key1 most recently used
lru.access(&key1, 1, until);
assert_eq!(v.len(), 0);
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
assert_eq!(v.len(), 1);
assert_eq!(v[0], key2);
}
#[test]
fn test_remove() {
let lru = Manager::new(4);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// remove key1
lru.remove(&key1);
// key2 is the least recently used one now
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
assert_eq!(v.len(), 1);
assert_eq!(v[0], key2);
}
#[test]
fn test_access_add() {
let lru = Manager::new(4);
let until = SystemTime::now(); // unused value as a placeholder
let key1 = CacheKey::new("", "a", "1").to_compact();
lru.access(&key1, 1, until);
let key2 = CacheKey::new("", "b", "1").to_compact();
lru.access(&key2, 2, until);
let key3 = CacheKey::new("", "c", "1").to_compact();
lru.access(&key3, 2, until);
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4, 2, until);
// need to reduce used by at least 2, both key1 and key2 are evicted to make room for 3
assert_eq!(v.len(), 2);
assert_eq!(v[0], key1);
assert_eq!(v[1], key2);
}
#[test]
fn test_admit_update() {
let lru = Manager::new(4);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// update key2 to reduce its size by 1
let v = lru.admit(key2, 1, until);
assert_eq!(v.len(), 0);
// lru is not full anymore
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru.admit(key4.clone(), 1, until);
assert_eq!(v.len(), 0);
// make key4 larger
let v = lru.admit(key4, 2, until);
// need to evict now
assert_eq!(v.len(), 1);
assert_eq!(v[0], key1);
}
#[test]
fn test_serde() {
let lru = Manager::new(4);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// make key1 most recently used
lru.access(&key1, 1, until);
assert_eq!(v.len(), 0);
// load lru2 with lru's data
let ser = lru.serialize().unwrap();
let lru2 = Manager::new(4);
lru2.deserialize(&ser).unwrap();
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru2.admit(key4, 2, until);
assert_eq!(v.len(), 1);
assert_eq!(v[0], key2);
}
#[tokio::test]
async fn test_save_to_disk() {
let lru = Manager::new(4);
let key1 = CacheKey::new("", "a", "1").to_compact();
let until = SystemTime::now(); // unused value as a placeholder
let v = lru.admit(key1.clone(), 1, until);
assert_eq!(v.len(), 0);
let key2 = CacheKey::new("", "b", "1").to_compact();
let v = lru.admit(key2.clone(), 2, until);
assert_eq!(v.len(), 0);
let key3 = CacheKey::new("", "c", "1").to_compact();
let v = lru.admit(key3, 1, until);
assert_eq!(v.len(), 0);
// lru is full (4) now
// make key1 most recently used
lru.access(&key1, 1, until);
assert_eq!(v.len(), 0);
// load lru2 with lru's data
lru.save("/tmp/test_simple_lru_save").await.unwrap();
let lru2 = Manager::new(4);
lru2.load("/tmp/test_simple_lru_save").await.unwrap();
let key4 = CacheKey::new("", "d", "1").to_compact();
let v = lru2.admit(key4, 2, until);
assert_eq!(v.len(), 1);
assert_eq!(v[0], key2);
}
#[test]
fn test_watermark_eviction() {
const SIZE_LIMIT: usize = usize::MAX / 2;
let lru = Manager::new_with_watermark(SIZE_LIMIT, Some(4));
let until = SystemTime::now();
// admit 6 items of size 1
for name in ["a", "b", "c", "d", "e", "f"] {
let key = CacheKey::new("", name, "1").to_compact();
let _ = lru.admit(key, 1, until);
}
// test items were evicted due to watermark
assert_eq!(lru.total_items(), 4);
assert_eq!(lru.evicted_items(), 2);
assert_eq!(lru.evicted_size(), 2);
assert!(lru.total_size() <= SIZE_LIMIT);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/src/eviction/mod.rs | pingora-cache/src/eviction/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cache eviction module
use crate::key::CompactCacheKey;
use async_trait::async_trait;
use pingora_error::Result;
use std::time::SystemTime;
pub mod lru;
pub mod simple_lru;
/// The trait that a cache eviction algorithm needs to implement
///
/// NOTE: these trait methods require &self not &mut self, which means concurrency should
/// be handled the implementations internally.
#[async_trait]
pub trait EvictionManager: Send + Sync {
/// Total size of the cache in bytes tracked by this eviction manager
fn total_size(&self) -> usize;
/// Number of assets tracked by this eviction manager
fn total_items(&self) -> usize;
/// Number of bytes that are already evicted
///
/// The accumulated number is returned to play well with Prometheus counter metric type.
fn evicted_size(&self) -> usize;
/// Number of assets that are already evicted
///
/// The accumulated number is returned to play well with Prometheus counter metric type.
fn evicted_items(&self) -> usize;
/// Admit an item
///
/// Return one or more items to evict. The sizes of these items are deducted
/// from the total size already. The caller needs to make sure that these assets are actually
/// removed from the storage.
///
/// If the item is already admitted, A. update its freshness; B. if the new size is larger than the
/// existing one, Some(_) might be returned for the caller to evict.
fn admit(
&self,
item: CompactCacheKey,
size: usize,
fresh_until: SystemTime,
) -> Vec<CompactCacheKey>;
/// Adjust an item's weight upwards by a delta. If the item is not already admitted,
/// nothing will happen.
///
/// An optional `max_weight` hint indicates the known max weight of the current key in case the
/// weight should not be incremented above this amount.
///
/// Return one or more items to evict. The sizes of these items are deducted
/// from the total size already. The caller needs to make sure that these assets are actually
/// removed from the storage.
fn increment_weight(
&self,
item: &CompactCacheKey,
delta: usize,
max_weight: Option<usize>,
) -> Vec<CompactCacheKey>;
/// Remove an item from the eviction manager.
///
/// The size of the item will be deducted.
fn remove(&self, item: &CompactCacheKey);
/// Access an item that should already be in cache.
///
/// If the item is not tracked by this [EvictionManager], track it but no eviction will happen.
///
/// The call used for asking the eviction manager to track the assets that are already admitted
/// in the cache storage system.
fn access(&self, item: &CompactCacheKey, size: usize, fresh_until: SystemTime) -> bool;
/// Peek into the manager to see if the item is already tracked by the system
///
/// This function should have no side-effect on the asset itself. For example, for LRU, this
/// method shouldn't change the popularity of the asset being peeked.
fn peek(&self, item: &CompactCacheKey) -> bool;
/// Serialize to save the state of this eviction manager to disk
///
/// This function is for preserving the eviction manager's state across server restarts.
///
/// `dir_path` define the directory on disk that the data should use.
// dir_path is &str no AsRef<Path> so that trait objects can be used
async fn save(&self, dir_path: &str) -> Result<()>;
/// The counterpart of [Self::save()].
async fn load(&self, dir_path: &str) -> Result<()>;
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/benches/lru_memory.rs | pingora-cache/benches/lru_memory.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
use pingora_cache::{
eviction::{lru::Manager, EvictionManager},
CacheKey,
};
const ITEMS: usize = 5 * usize::pow(2, 20);
/*
Total: 681,836,456 bytes (100%, 28,192,797.16/s) in 10,485,845 blocks (100%, 433,572.15/s), avg size 65.02 bytes, avg lifetime 5,935,075.17 µs (24.54% of program duration)
At t-gmax: 569,114,536 bytes (100%) in 5,242,947 blocks (100%), avg size 108.55 bytes
At t-end: 88 bytes (100%) in 3 blocks (100%), avg size 29.33 bytes
Allocated at {
#0: [root]
}
├── PP 1.1/5 {
│ Total: 293,601,280 bytes (43.06%, 12,139,921.91/s) in 5,242,880 blocks (50%, 216,784.32/s), avg size 56 bytes, avg lifetime 11,870,032.65 µs (49.08% of program duration)
│ Max: 293,601,280 bytes in 5,242,880 blocks, avg size 56 bytes
│ At t-gmax: 293,601,280 bytes (51.59%) in 5,242,880 blocks (100%), avg size 56 bytes
│ At t-end: 0 bytes (0%) in 0 blocks (0%), avg size 0 bytes
│ Allocated at {
│ #1: 0x5555703cf69c: alloc::alloc::exchange_malloc (alloc/src/alloc.rs:326:11)
│ #2: 0x5555703cf69c: alloc::boxed::Box<T>::new (alloc/src/boxed.rs:215:9)
│ #3: 0x5555703cf69c: pingora_lru::LruUnit<T>::admit (pingora-lru/src/lib.rs:201:20)
│ #4: 0x5555703cf69c: pingora_lru::Lru<T,_>::admit (pingora-lru/src/lib.rs:48:26)
│ #5: 0x5555703cf69c: <pingora_cache::eviction::lru::Manager<_> as pingora_cache::eviction::EvictionManager>::admit (src/eviction/lru.rs:114:9)
│ #6: 0x5555703cf69c: lru_memory::main (pingora-cache/benches/lru_memory.rs:78:9)
│ }
│ }
├── PP 1.2/5 {
│ Total: 203,685,456 bytes (29.87%, 8,422,052.97/s) in 50 blocks (0%, 2.07/s), avg size 4,073,709.12 bytes, avg lifetime 6,842,528.74 µs (28.29% of program duration)
│ Max: 132,906,576 bytes in 32 blocks, avg size 4,153,330.5 bytes
│ At t-gmax: 132,906,576 bytes (23.35%) in 32 blocks (0%), avg size 4,153,330.5 bytes
│ At t-end: 0 bytes (0%) in 0 blocks (0%), avg size 0 bytes
│ Allocated at {
│ #1: 0x5555703cec54: <alloc::alloc::Global as core::alloc::Allocator>::allocate (alloc/src/alloc.rs:237:9)
│ #2: 0x5555703cec54: alloc::raw_vec::RawVec<T,A>::allocate_in (alloc/src/raw_vec.rs:185:45)
│ #3: 0x5555703cec54: alloc::raw_vec::RawVec<T,A>::with_capacity_in (alloc/src/raw_vec.rs:131:9)
│ #4: 0x5555703cec54: alloc::vec::Vec<T,A>::with_capacity_in (src/vec/mod.rs:641:20)
│ #5: 0x5555703cec54: alloc::vec::Vec<T>::with_capacity (src/vec/mod.rs:483:9)
│ #6: 0x5555703cec54: pingora_lru::linked_list::Nodes::with_capacity (pingora-lru/src/linked_list.rs:50:25)
│ #7: 0x5555703cec54: pingora_lru::linked_list::LinkedList::with_capacity (pingora-lru/src/linked_list.rs:121:20)
│ #8: 0x5555703cec54: pingora_lru::LruUnit<T>::with_capacity (pingora-lru/src/lib.rs:176:20)
│ #9: 0x5555703cec54: pingora_lru::Lru<T,_>::with_capacity (pingora-lru/src/lib.rs:28:36)
│ #10: 0x5555703cec54: pingora_cache::eviction::lru::Manager<_>::with_capacity (src/eviction/lru.rs:22:17)
│ #11: 0x5555703cec54: lru_memory::main (pingora-cache/benches/lru_memory.rs:74:19)
│ }
│ }
├── PP 1.3/5 {
│ Total: 142,606,592 bytes (20.92%, 5,896,544.09/s) in 32 blocks (0%, 1.32/s), avg size 4,456,456 bytes, avg lifetime 22,056,252.88 µs (91.2% of program duration)
│ Max: 142,606,592 bytes in 32 blocks, avg size 4,456,456 bytes
│ At t-gmax: 142,606,592 bytes (25.06%) in 32 blocks (0%), avg size 4,456,456 bytes
│ At t-end: 0 bytes (0%) in 0 blocks (0%), avg size 0 bytes
│ Allocated at {
│ #1: 0x5555703ceb64: alloc::alloc::alloc (alloc/src/alloc.rs:95:14)
│ #2: 0x5555703ceb64: <hashbrown::raw::alloc::inner::Global as hashbrown::raw::alloc::inner::Allocator>::allocate (src/raw/alloc.rs:47:35)
│ #3: 0x5555703ceb64: hashbrown::raw::alloc::inner::do_alloc (src/raw/alloc.rs:62:9)
│ #4: 0x5555703ceb64: hashbrown::raw::RawTableInner<A>::new_uninitialized (src/raw/mod.rs:1080:38)
│ #5: 0x5555703ceb64: hashbrown::raw::RawTableInner<A>::fallible_with_capacity (src/raw/mod.rs:1109:30)
│ #6: 0x5555703ceb64: hashbrown::raw::RawTable<T,A>::fallible_with_capacity (src/raw/mod.rs:460:20)
│ #7: 0x5555703ceb64: hashbrown::raw::RawTable<T,A>::with_capacity_in (src/raw/mod.rs:481:15)
│ #8: 0x5555703ceb64: hashbrown::raw::RawTable<T>::with_capacity (src/raw/mod.rs:411:9)
│ #9: 0x5555703ceb64: hashbrown::map::HashMap<K,V,S>::with_capacity_and_hasher (hashbrown-0.12.3/src/map.rs:422:20)
│ #10: 0x5555703ceb64: hashbrown::map::HashMap<K,V>::with_capacity (hashbrown-0.12.3/src/map.rs:326:9)
│ #11: 0x5555703ceb64: pingora_lru::LruUnit<T>::with_capacity (pingora-lru/src/lib.rs:175:27)
│ #12: 0x5555703ceb64: pingora_lru::Lru<T,_>::with_capacity (pingora-lru/src/lib.rs:28:36)
│ #13: 0x5555703ceb64: pingora_cache::eviction::lru::Manager<_>::with_capacity (src/eviction/lru.rs:22:17)
│ #14: 0x5555703ceb64: lru_memory::main (pingora-cache/benches/lru_memory.rs:74:19)
│ }
│ }
*/
fn main() {
let _profiler = dhat::Profiler::new_heap();
let manager = Manager::<32>::with_capacity(ITEMS, ITEMS / 32);
let unused_ttl = std::time::SystemTime::now();
for i in 0..ITEMS {
let item = CacheKey::new("", i.to_string(), "").to_compact();
manager.admit(item, 1, unused_ttl);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/benches/lru_serde.rs | pingora-cache/benches/lru_serde.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Instant;
use pingora_cache::{
eviction::{lru::Manager, EvictionManager},
CacheKey,
};
const ITEMS: usize = 5 * usize::pow(2, 20);
fn main() {
let manager = Manager::<32>::with_capacity(ITEMS, ITEMS / 32);
let manager2 = Manager::<32>::with_capacity(ITEMS, ITEMS / 32);
let unused_ttl = std::time::SystemTime::now();
for i in 0..ITEMS {
let item = CacheKey::new("", i.to_string(), "").to_compact();
manager.admit(item, 1, unused_ttl);
}
/* lru serialize shard 19 22.573338ms, 5241623 bytes
* lru deserialize shard 19 39.260669ms, 5241623 bytes */
for i in 0..32 {
let before = Instant::now();
let ser = manager.serialize_shard(i).unwrap();
let elapsed = before.elapsed();
println!("lru serialize shard {i} {elapsed:?}, {} bytes", ser.len());
let before = Instant::now();
manager2.deserialize_shard(&ser).unwrap();
let elapsed = before.elapsed();
println!("lru deserialize shard {i} {elapsed:?}, {} bytes", ser.len());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-cache/benches/simple_lru_memory.rs | pingora-cache/benches/simple_lru_memory.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
use pingora_cache::{
eviction::{simple_lru::Manager, EvictionManager},
CacheKey,
};
const ITEMS: usize = 5 * usize::pow(2, 20);
/*
Total: 704,643,412 bytes (100%, 29,014,058.85/s) in 10,485,787 blocks (100%, 431,757.73/s), avg size 67.2 bytes, avg lifetime 6,163,799.09 µs (25.38% of program duration)
At t-gmax: 520,093,936 bytes (100%) in 5,242,886 blocks (100%), avg size 99.2 bytes
├── PP 1.1/4 {
│ Total: 377,487,360 bytes (53.57%, 15,543,238.31/s) in 5,242,880 blocks (50%, 215,878.31/s), avg size 72 bytes, avg lifetime 12,327,602.83 µs (50.76% of program duration)
│ Max: 377,487,360 bytes in 5,242,880 blocks, avg size 72 bytes
│ At t-gmax: 377,487,360 bytes (72.58%) in 5,242,880 blocks (100%), avg size 72 bytes
│ At t-end: 0 bytes (0%) in 0 blocks (0%), avg size 0 bytes
│ Allocated at {
│ #1: 0x5555791dd7e0: alloc::alloc::exchange_malloc (alloc/src/alloc.rs:326:11)
│ #2: 0x5555791dd7e0: alloc::boxed::Box<T>::new (alloc/src/boxed.rs:215:9)
│ #3: 0x5555791dd7e0: lru::LruCache<K,V,S>::replace_or_create_node (lru-0.8.1/src/lib.rs:391:20)
│ #4: 0x5555791dd7e0: lru::LruCache<K,V,S>::capturing_put (lru-0.8.1/src/lib.rs:355:44)
│ #5: 0x5555791dd7e0: lru::LruCache<K,V,S>::push (lru-0.8.1/src/lib.rs:334:9)
│ #6: 0x5555791dd7e0: pingora_cache::eviction::simple_lru::Manager::insert (src/eviction/simple_lru.rs:49:23)
│ #7: 0x5555791dd7e0: <pingora_cache::eviction::simple_lru::Manager as pingora_cache::eviction::EvictionManager>::admit (src/eviction/simple_lru.rs:166:9)
│ #8: 0x5555791dd7e0: simple_lru_memory::main (pingora-cache/benches/simple_lru_memory.rs:21:9)
│ }
│ }
├── PP 1.2/4 {
│ Total: 285,212,780 bytes (40.48%, 11,743,784.5/s) in 22 blocks (0%, 0.91/s), avg size 12,964,217.27 bytes, avg lifetime 1,116,774.23 µs (4.6% of program duration)
│ Max: 213,909,520 bytes in 2 blocks, avg size 106,954,760 bytes
│ At t-gmax: 142,606,344 bytes (27.42%) in 1 blocks (0%), avg size 142,606,344 bytes
│ At t-end: 0 bytes (0%) in 0 blocks (0%), avg size 0 bytes
│ Allocated at {
│ #1: 0x5555791dae20: alloc::alloc::alloc (alloc/src/alloc.rs:95:14)
│ #2: 0x5555791dae20: <hashbrown::raw::alloc::inner::Global as hashbrown::raw::alloc::inner::Allocator>::allocate (src/raw/alloc.rs:47:35)
│ #3: 0x5555791dae20: hashbrown::raw::alloc::inner::do_alloc (src/raw/alloc.rs:62:9)
│ #4: 0x5555791dae20: hashbrown::raw::RawTableInner<A>::new_uninitialized (src/raw/mod.rs:1080:38)
│ #5: 0x5555791dae20: hashbrown::raw::RawTableInner<A>::fallible_with_capacity (src/raw/mod.rs:1109:30)
│ #6: 0x5555791dae20: hashbrown::raw::RawTableInner<A>::prepare_resize (src/raw/mod.rs:1353:29)
│ #7: 0x5555791dae20: hashbrown::raw::RawTableInner<A>::resize_inner (src/raw/mod.rs:1426:29)
│ #8: 0x5555791dae20: hashbrown::raw::RawTableInner<A>::reserve_rehash_inner (src/raw/mod.rs:1403:13)
│ #9: 0x5555791dae20: hashbrown::raw::RawTable<T,A>::reserve_rehash (src/raw/mod.rs:680:13)
│ #10: 0x5555791dde50: hashbrown::raw::RawTable<T,A>::reserve (src/raw/mod.rs:646:16)
│ #11: 0x5555791dde50: hashbrown::raw::RawTable<T,A>::insert (src/raw/mod.rs:725:17)
│ #12: 0x5555791dde50: hashbrown::map::HashMap<K,V,S,A>::insert (hashbrown-0.12.3/src/map.rs:1679:13)
│ #13: 0x5555791dde50: lru::LruCache<K,V,S>::capturing_put (lru-0.8.1/src/lib.rs:361:17)
│ #14: 0x5555791dde50: lru::LruCache<K,V,S>::push (lru-0.8.1/src/lib.rs:334:9)
│ #15: 0x5555791dde50: pingora_cache::eviction::simple_lru::Manager::insert (src/eviction/simple_lru.rs:49:23)
│ #16: 0x5555791dde50: <pingora_cache::eviction::simple_lru::Manager as pingora_cache::eviction::EvictionManager>::admit (src/eviction/simple_lru.rs:166:9)
│ #17: 0x5555791dde50: simple_lru_memory::main (pingora-cache/benches/simple_lru_memory.rs:21:9)
│ }
│ }
*/
fn main() {
let _profiler = dhat::Profiler::new_heap();
let manager = Manager::new(ITEMS);
let unused_ttl = std::time::SystemTime::now();
for i in 0..ITEMS {
let item = CacheKey::new("", i.to_string(), "").to_compact();
manager.admit(item, 1, unused_ttl);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-runtime/src/lib.rs | pingora-runtime/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Pingora tokio runtime.
//!
//! Tokio runtime comes in two flavors: a single-threaded runtime
//! and a multi-threaded one which provides work stealing.
//! Benchmark shows that, compared to the single-threaded runtime, the multi-threaded one
//! has some overhead due to its more sophisticated work steal scheduling.
//!
//! This crate provides a third flavor: a multi-threaded runtime without work stealing.
//! This flavor is as efficient as the single-threaded runtime while allows the async
//! program to use multiple cores.
use once_cell::sync::{Lazy, OnceCell};
use rand::Rng;
use std::sync::Arc;
use std::thread::JoinHandle;
use std::time::Duration;
use thread_local::ThreadLocal;
use tokio::runtime::{Builder, Handle};
use tokio::sync::oneshot::{channel, Sender};
/// Pingora async multi-threaded runtime
///
/// The `Steal` flavor is effectively tokio multi-threaded runtime.
///
/// The `NoSteal` flavor is backed by multiple tokio single-threaded runtime.
pub enum Runtime {
Steal(tokio::runtime::Runtime),
NoSteal(NoStealRuntime),
}
impl Runtime {
/// Create a `Steal` flavor runtime. This just a regular tokio runtime
pub fn new_steal(threads: usize, name: &str) -> Self {
Self::Steal(
Builder::new_multi_thread()
.enable_all()
.worker_threads(threads)
.thread_name(name)
.build()
.unwrap(),
)
}
/// Create a `NoSteal` flavor runtime. This is backed by multiple tokio current-thread runtime
pub fn new_no_steal(threads: usize, name: &str) -> Self {
Self::NoSteal(NoStealRuntime::new(threads, name))
}
/// Return the &[Handle] of the [Runtime].
/// For `Steal` flavor, it will just return the &[Handle].
/// For `NoSteal` flavor, it will return the &[Handle] of a random thread in its pool.
/// So if we want tasks to spawn on all the threads, call this function to get a fresh [Handle]
/// for each async task.
pub fn get_handle(&self) -> &Handle {
match self {
Self::Steal(r) => r.handle(),
Self::NoSteal(r) => r.get_runtime(),
}
}
/// Call tokio's `shutdown_timeout` of all the runtimes. This function is blocking until
/// all runtimes exit.
pub fn shutdown_timeout(self, timeout: Duration) {
match self {
Self::Steal(r) => r.shutdown_timeout(timeout),
Self::NoSteal(r) => r.shutdown_timeout(timeout),
}
}
}
// only NoStealRuntime set the pools in thread threads
static CURRENT_HANDLE: Lazy<ThreadLocal<Pools>> = Lazy::new(ThreadLocal::new);
/// Return the [Handle] of current runtime.
/// If the current thread is under a `Steal` runtime, the current [Handle] is returned.
/// If the current thread is under a `NoSteal` runtime, the [Handle] of a random thread
/// under this runtime is returned. This function will panic if called outside any runtime.
pub fn current_handle() -> Handle {
if let Some(pools) = CURRENT_HANDLE.get() {
// safety: the CURRENT_HANDLE is set when the pool is being initialized in init_pools()
let pools = pools.get().unwrap();
let mut rng = rand::thread_rng();
let index = rng.gen_range(0..pools.len());
pools[index].clone()
} else {
// not NoStealRuntime, just check the current tokio runtime
Handle::current()
}
}
type Control = (Sender<Duration>, JoinHandle<()>);
type Pools = Arc<OnceCell<Box<[Handle]>>>;
/// Multi-threaded runtime backed by a pool of single threaded tokio runtime
pub struct NoStealRuntime {
threads: usize,
name: String,
// Lazily init the runtimes so that they are created after pingora
// daemonize itself. Otherwise the runtime threads are lost.
pools: Pools,
controls: OnceCell<Vec<Control>>,
}
impl NoStealRuntime {
/// Create a new [NoStealRuntime]. Panic if `threads` is 0
pub fn new(threads: usize, name: &str) -> Self {
assert!(threads != 0);
NoStealRuntime {
threads,
name: name.to_string(),
pools: Arc::new(OnceCell::new()),
controls: OnceCell::new(),
}
}
fn init_pools(&self) -> (Box<[Handle]>, Vec<Control>) {
let mut pools = Vec::with_capacity(self.threads);
let mut controls = Vec::with_capacity(self.threads);
for _ in 0..self.threads {
let rt = Builder::new_current_thread().enable_all().build().unwrap();
let handler = rt.handle().clone();
let (tx, rx) = channel::<Duration>();
let pools_ref = self.pools.clone();
let join = std::thread::Builder::new()
.name(self.name.clone())
.spawn(move || {
CURRENT_HANDLE.get_or(|| pools_ref);
if let Ok(timeout) = rt.block_on(rx) {
rt.shutdown_timeout(timeout);
} // else Err(_): tx is dropped, just exit
})
.unwrap();
pools.push(handler);
controls.push((tx, join));
}
(pools.into_boxed_slice(), controls)
}
/// Return the &[Handle] of a random thread of this runtime
pub fn get_runtime(&self) -> &Handle {
let mut rng = rand::thread_rng();
let index = rng.gen_range(0..self.threads);
self.get_runtime_at(index)
}
/// Return the number of threads of this runtime
pub fn threads(&self) -> usize {
self.threads
}
fn get_pools(&self) -> &[Handle] {
if let Some(p) = self.pools.get() {
p
} else {
// TODO: use a mutex to avoid creating a lot threads only to drop them
let (pools, controls) = self.init_pools();
// there could be another thread racing with this one to init the pools
match self.pools.try_insert(pools) {
Ok(p) => {
// unwrap to make sure that this is the one that init both pools and controls
self.controls.set(controls).unwrap();
p
}
// another thread already set it, just return it
Err((p, _my_pools)) => p,
}
}
}
/// Return the &[Handle] of a given thread of this runtime
pub fn get_runtime_at(&self, index: usize) -> &Handle {
let pools = self.get_pools();
&pools[index]
}
/// Call tokio's `shutdown_timeout` of all the runtimes. This function is blocking until
/// all runtimes exit.
pub fn shutdown_timeout(mut self, timeout: Duration) {
if let Some(controls) = self.controls.take() {
let (txs, joins): (Vec<Sender<_>>, Vec<JoinHandle<()>>) = controls.into_iter().unzip();
for tx in txs {
let _ = tx.send(timeout); // Err() when rx is dropped
}
for join in joins {
let _ = join.join(); // ignore thread error
}
} // else, the controls and the runtimes are not even init yet, just return;
}
// TODO: runtime metrics
}
#[test]
fn test_steal_runtime() {
use tokio::time::{sleep, Duration};
let threads = 2;
let rt = Runtime::new_steal(threads, "test");
let handle = rt.get_handle();
let ret = handle.block_on(async {
sleep(Duration::from_secs(1)).await;
let handle = current_handle();
let join = handle.spawn(async {
sleep(Duration::from_secs(1)).await;
});
join.await.unwrap();
1
});
#[cfg(target_os = "linux")]
assert_eq!(handle.metrics().num_workers(), threads);
assert_eq!(ret, 1);
}
#[test]
fn test_no_steal_runtime() {
use tokio::time::{sleep, Duration};
let rt = Runtime::new_no_steal(2, "test");
let handle = rt.get_handle();
let ret = handle.block_on(async {
sleep(Duration::from_secs(1)).await;
let handle = current_handle();
let join = handle.spawn(async {
sleep(Duration::from_secs(1)).await;
});
join.await.unwrap();
1
});
assert_eq!(ret, 1);
}
#[test]
fn test_no_steal_shutdown() {
use tokio::time::{sleep, Duration};
let rt = Runtime::new_no_steal(2, "test");
let handle = rt.get_handle();
let ret = handle.block_on(async {
sleep(Duration::from_secs(1)).await;
let handle = current_handle();
let join = handle.spawn(async {
sleep(Duration::from_secs(1)).await;
});
join.await.unwrap();
1
});
assert_eq!(ret, 1);
rt.shutdown_timeout(Duration::from_secs(1));
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-runtime/benches/hello.rs | pingora-runtime/benches/hello.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Pingora tokio runtime.
//!
//! Tokio runtime comes in two flavors: a single-threaded runtime
//! and a multi-threaded one which provides work stealing.
//! Benchmark shows that, compared to the single-threaded runtime, the multi-threaded one
//! has some overhead due to its more sophisticated work steal scheduling.
//!
//! This crate provides a third flavor: a multi-threaded runtime without work stealing.
//! This flavor is as efficient as the single-threaded runtime while allows the async
//! program to use multiple cores.
use pingora_runtime::{current_handle, Runtime};
use std::error::Error;
use std::{thread, time};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
async fn hello_server(port: usize) -> Result<(), Box<dyn Error + Send>> {
let addr = format!("127.0.0.1:{port}");
let listener = TcpListener::bind(&addr).await.unwrap();
println!("Listening on: {}", addr);
loop {
let (mut socket, _) = listener.accept().await.unwrap();
socket.set_nodelay(true).unwrap();
let rt = current_handle();
rt.spawn(async move {
loop {
let mut buf = [0; 1024];
let res = socket.read(&mut buf).await;
let n = match res {
Ok(n) => n,
Err(_) => return,
};
if n == 0 {
return;
}
let _ = socket
.write_all(
b"HTTP/1.1 200 OK\r\ncontent-length: 12\r\nconnection: keep-alive\r\n\r\nHello world!",
)
.await;
}
});
}
}
/* On M1 macbook pro
wrk -t40 -c1000 -d10 http://127.0.0.1:3001 --latency
Running 10s test @ http://127.0.0.1:3001
40 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 3.53ms 0.87ms 17.12ms 84.99%
Req/Sec 7.09k 1.29k 33.11k 93.30%
Latency Distribution
50% 3.56ms
75% 3.95ms
90% 4.37ms
99% 5.38ms
2844034 requests in 10.10s, 203.42MB read
Requests/sec: 281689.27
Transfer/sec: 20.15MB
wrk -t40 -c1000 -d10 http://127.0.0.1:3000 --latency
Running 10s test @ http://127.0.0.1:3000
40 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 12.16ms 16.29ms 112.29ms 83.40%
Req/Sec 5.47k 2.01k 48.85k 83.67%
Latency Distribution
50% 2.09ms
75% 20.23ms
90% 37.11ms
99% 65.16ms
2190869 requests in 10.10s, 156.70MB read
Requests/sec: 216918.71
Transfer/sec: 15.52MB
*/
fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let rt = Runtime::new_steal(2, "");
let handle = rt.get_handle();
handle.spawn(hello_server(3000));
let rt2 = Runtime::new_no_steal(2, "");
let handle = rt2.get_handle();
handle.spawn(hello_server(3001));
thread::sleep(time::Duration::from_secs(999999999));
Ok(())
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/lib.rs | pingora-core/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![warn(clippy::all)]
#![allow(clippy::new_without_default)]
#![allow(clippy::type_complexity)]
#![allow(clippy::match_wild_err_arm)]
#![allow(clippy::missing_safety_doc)]
#![allow(clippy::upper_case_acronyms)]
//! # Pingora
//!
//! Pingora is a collection of service frameworks and network libraries battle-tested by the Internet.
//! It is to build robust, scalable and secure network infrastructures and services at Internet scale.
//!
//! # Features
//! - Http 1.x and Http 2
//! - Modern TLS with OpenSSL or BoringSSL (FIPS compatible)
//! - Zero downtime upgrade
//!
//! # Usage
//! This crate provides low level service and protocol implementation and abstraction.
//!
//! If looking to build a (reverse) proxy, see [`pingora-proxy`](https://docs.rs/pingora-proxy) crate.
//!
//! # Optional features
//!
//! ## TLS backends (mutually exclusive)
//! - `openssl`: Use OpenSSL as the TLS library (default if no TLS feature is specified)
//! - `boringssl`: Use BoringSSL as the TLS library (FIPS compatible)
//! - `rustls`: Use Rustls as the TLS library
//!
//! ## Additional features
//! - `connection_filter`: Enable early TCP connection filtering before TLS handshake.
//! This allows implementing custom logic to accept/reject connections based on peer address
//! with zero overhead when disabled.
//! - `sentry`: Enable Sentry error reporting integration
//! - `patched_http1`: Enable patched HTTP/1 parser
//!
//! # Connection Filtering
//!
//! With the `connection_filter` feature enabled, you can implement early connection filtering
//! at the TCP level, before any TLS handshake or HTTP processing occurs. This is useful for:
//! - IP-based access control
//! - Rate limiting at the connection level
//! - Geographic restrictions
//! - DDoS mitigation
//!
//! ## Example
//!
//! ```rust,ignore
//! # #[cfg(feature = "connection_filter")]
//! # {
//! use async_trait::async_trait;
//! use pingora_core::listeners::ConnectionFilter;
//! use std::net::SocketAddr;
//! use std::sync::Arc;
//!
//! #[derive(Debug)]
//! struct MyFilter;
//!
//! #[async_trait]
//! impl ConnectionFilter for MyFilter {
//! async fn should_accept(&self, addr: &SocketAddr) -> bool {
//! // Custom logic to filter connections
//! !is_blocked_ip(addr.ip())
//! }
//! }
//!
//! // Apply the filter to a service
//! let mut service = my_service();
//! service.set_connection_filter(Arc::new(MyFilter));
//! # }
//! ```
//!
//! When the `connection_filter` feature is disabled, the filter API remains available
//! but becomes a no-op, ensuring zero overhead for users who don't need this functionality.
// This enables the feature that labels modules that are only available with
// certain pingora features
#![cfg_attr(docsrs, feature(doc_cfg))]
pub mod apps;
pub mod connectors;
pub mod listeners;
pub mod modules;
pub mod protocols;
pub mod server;
pub mod services;
pub mod upstreams;
pub mod utils;
pub use pingora_error::{ErrorType::*, *};
// If both openssl and boringssl are enabled, prefer boringssl.
// This is to make sure that boringssl can override the default openssl feature
// when this crate is used indirectly by other crates.
#[cfg(feature = "boringssl")]
pub use pingora_boringssl as tls;
#[cfg(feature = "openssl")]
pub use pingora_openssl as tls;
#[cfg(feature = "rustls")]
pub use pingora_rustls as tls;
#[cfg(feature = "s2n")]
pub use pingora_s2n as tls;
#[cfg(not(feature = "any_tls"))]
pub use protocols::tls::noop_tls as tls;
pub mod prelude {
pub use crate::server::configuration::Opt;
pub use crate::server::Server;
pub use crate::services::background::background_service;
pub use crate::upstreams::peer::HttpPeer;
pub use pingora_error::{ErrorType::*, *};
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/services/listening.rs | pingora-core/src/services/listening.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The listening service
//!
//! A [Service] (listening service) responds to incoming requests on its endpoints.
//! Each [Service] can be configured with custom application logic (e.g. an `HTTPProxy`) and one or
//! more endpoints to listen to.
use crate::apps::ServerApp;
use crate::listeners::tls::TlsSettings;
#[cfg(feature = "connection_filter")]
use crate::listeners::AcceptAllFilter;
use crate::listeners::{
ConnectionFilter, Listeners, ServerAddress, TcpSocketOptions, TransportStack,
};
use crate::protocols::Stream;
#[cfg(unix)]
use crate::server::ListenFds;
use crate::server::ShutdownWatch;
use crate::services::Service as ServiceTrait;
use async_trait::async_trait;
use log::{debug, error, info};
use pingora_error::Result;
use pingora_runtime::current_handle;
use pingora_timeout::timeout;
use std::fs::Permissions;
use std::sync::Arc;
use std::time::Duration;
/// The type of service that is associated with a list of listening endpoints and a particular application
pub struct Service<A> {
name: String,
listeners: Listeners,
app_logic: Option<A>,
/// The number of preferred threads. `None` to follow global setting.
pub threads: Option<usize>,
#[cfg(feature = "connection_filter")]
connection_filter: Arc<dyn ConnectionFilter>,
}
impl<A> Service<A> {
/// Create a new [`Service`] with the given application (see [`crate::apps`]).
pub fn new(name: String, app_logic: A) -> Self {
Service {
name,
listeners: Listeners::new(),
app_logic: Some(app_logic),
threads: None,
#[cfg(feature = "connection_filter")]
connection_filter: Arc::new(AcceptAllFilter),
}
}
/// Create a new [`Service`] with the given application (see [`crate::apps`]) and the given
/// [`Listeners`].
pub fn with_listeners(name: String, listeners: Listeners, app_logic: A) -> Self {
Service {
name,
listeners,
app_logic: Some(app_logic),
threads: None,
#[cfg(feature = "connection_filter")]
connection_filter: Arc::new(AcceptAllFilter),
}
}
/// Set a custom connection filter for this service.
///
/// The connection filter will be applied to all incoming connections
/// on all endpoints of this service. Connections that don't pass the
/// filter will be dropped immediately at the TCP level, before TLS
/// handshake or any HTTP processing.
///
/// # Feature Flag
///
/// This method requires the `connection_filter` feature to be enabled.
/// When the feature is disabled, this method is a no-op.
///
/// # Example
///
/// ```rust,no_run
/// # use std::sync::Arc;
/// # use pingora_core::listeners::{ConnectionFilter, AcceptAllFilter};
/// # struct MyService;
/// # impl MyService {
/// # fn new() -> Self { MyService }
/// # }
/// let mut service = MyService::new();
/// let filter = Arc::new(AcceptAllFilter);
/// service.set_connection_filter(filter);
/// ```
#[cfg(feature = "connection_filter")]
pub fn set_connection_filter(&mut self, filter: Arc<dyn ConnectionFilter>) {
self.connection_filter = filter.clone();
self.listeners.set_connection_filter(filter);
}
#[cfg(not(feature = "connection_filter"))]
pub fn set_connection_filter(&mut self, _filter: Arc<dyn ConnectionFilter>) {}
/// Get the [`Listeners`], mostly to add more endpoints.
pub fn endpoints(&mut self) -> &mut Listeners {
&mut self.listeners
}
// the follow add* function has no effect if the server is already started
/// Add a TCP listening endpoint with the given address (e.g., `127.0.0.1:8000`).
pub fn add_tcp(&mut self, addr: &str) {
self.listeners.add_tcp(addr);
}
/// Add a TCP listening endpoint with the given [`TcpSocketOptions`].
pub fn add_tcp_with_settings(&mut self, addr: &str, sock_opt: TcpSocketOptions) {
self.listeners.add_tcp_with_settings(addr, sock_opt);
}
/// Add a Unix domain socket listening endpoint with the given path.
///
/// Optionally take a permission of the socket file. The default is read and write access for
/// everyone (0o666).
#[cfg(unix)]
pub fn add_uds(&mut self, addr: &str, perm: Option<Permissions>) {
self.listeners.add_uds(addr, perm);
}
/// Add a TLS listening endpoint with the given certificate and key paths.
pub fn add_tls(&mut self, addr: &str, cert_path: &str, key_path: &str) -> Result<()> {
self.listeners.add_tls(addr, cert_path, key_path)
}
/// Add a TLS listening endpoint with the given [`TlsSettings`] and [`TcpSocketOptions`].
pub fn add_tls_with_settings(
&mut self,
addr: &str,
sock_opt: Option<TcpSocketOptions>,
settings: TlsSettings,
) {
self.listeners
.add_tls_with_settings(addr, sock_opt, settings)
}
/// Add an endpoint according to the given [`ServerAddress`]
pub fn add_address(&mut self, addr: ServerAddress) {
self.listeners.add_address(addr);
}
/// Get a reference to the application inside this service
pub fn app_logic(&self) -> Option<&A> {
self.app_logic.as_ref()
}
/// Get a mutable reference to the application inside this service
pub fn app_logic_mut(&mut self) -> Option<&mut A> {
self.app_logic.as_mut()
}
}
impl<A: ServerApp + Send + Sync + 'static> Service<A> {
pub async fn handle_event(event: Stream, app_logic: Arc<A>, shutdown: ShutdownWatch) {
debug!("new event!");
let mut reuse_event = app_logic.process_new(event, &shutdown).await;
while let Some(event) = reuse_event {
// TODO: with no steal runtime, consider spawn() the next event on
// another thread for more evenly load balancing
debug!("new reusable event!");
reuse_event = app_logic.process_new(event, &shutdown).await;
}
}
async fn run_endpoint(
app_logic: Arc<A>,
mut stack: TransportStack,
mut shutdown: ShutdownWatch,
) {
// the accept loop, until the system is shutting down
loop {
let new_io = tokio::select! { // TODO: consider biased for perf reason?
new_io = stack.accept() => new_io,
shutdown_signal = shutdown.changed() => {
match shutdown_signal {
Ok(()) => {
if !*shutdown.borrow() {
// happen in the initial read
continue;
}
info!("Shutting down {}", stack.as_str());
break;
}
Err(e) => {
error!("shutdown_signal error {e}");
break;
}
}
}
};
match new_io {
Ok(io) => {
let app = app_logic.clone();
let shutdown = shutdown.clone();
current_handle().spawn(async move {
let peer_addr = io.peer_addr();
match timeout(Duration::from_secs(60), io.handshake()).await {
Ok(handshake) => {
match handshake {
Ok(io) => Self::handle_event(io, app, shutdown).await,
Err(e) => {
// TODO: Maybe IOApp trait needs a fn to handle/filter out this error
if let Some(addr) = peer_addr {
error!("Downstream handshake error from {}: {e}", addr);
} else {
error!("Downstream handshake error: {e}");
}
}
}
}
Err(_) => {
error!("Downstream handshake timeout");
}
}
});
}
Err(e) => {
error!("Accept() failed {e}");
if let Some(io_error) = e
.root_cause()
.downcast_ref::<std::io::Error>()
.and_then(|e| e.raw_os_error())
{
// 24: too many open files. In this case accept() will continue return this
// error without blocking, which could use up all the resources
if io_error == 24 {
// call sleep to calm the thread down and wait for others to release
// some resources
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
}
}
}
stack.cleanup();
}
}
#[async_trait]
impl<A: ServerApp + Send + Sync + 'static> ServiceTrait for Service<A> {
async fn start_service(
&mut self,
#[cfg(unix)] fds: Option<ListenFds>,
shutdown: ShutdownWatch,
listeners_per_fd: usize,
) {
let runtime = current_handle();
let endpoints = self
.listeners
.build(
#[cfg(unix)]
fds,
)
.await
.expect("Failed to build listeners");
let app_logic = self
.app_logic
.take()
.expect("can only start_service() once");
let app_logic = Arc::new(app_logic);
let mut handlers = Vec::new();
endpoints.into_iter().for_each(|endpoint| {
for _ in 0..listeners_per_fd {
let shutdown = shutdown.clone();
let my_app_logic = app_logic.clone();
let endpoint = endpoint.clone();
let jh = runtime.spawn(async move {
Self::run_endpoint(my_app_logic, endpoint, shutdown).await;
});
handlers.push(jh);
}
});
futures::future::join_all(handlers).await;
self.listeners.cleanup();
app_logic.cleanup().await;
}
fn name(&self) -> &str {
&self.name
}
fn threads(&self) -> Option<usize> {
self.threads
}
}
use crate::apps::prometheus_http_app::PrometheusServer;
impl Service<PrometheusServer> {
/// The Prometheus HTTP server
///
/// The HTTP server endpoint that reports Prometheus metrics collected in the entire service
pub fn prometheus_http_service() -> Self {
Service::new(
"Prometheus metric HTTP".to_string(),
PrometheusServer::new(),
)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/services/mod.rs | pingora-core/src/services/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The service interface
//!
//! A service to the pingora server is just something runs forever until the server is shutting
//! down.
//!
//! Two types of services are particularly useful
//! - services that are listening to some (TCP) endpoints
//! - services that are just running in the background.
use async_trait::async_trait;
#[cfg(unix)]
use crate::server::ListenFds;
use crate::server::ShutdownWatch;
pub mod background;
pub mod listening;
/// The service interface
#[async_trait]
pub trait Service: Sync + Send {
/// This function will be called when the server is ready to start the service.
///
/// - `fds` (Unix only): a collection of listening file descriptors. During zero downtime restart
/// the `fds` would contain the listening sockets passed from the old service, services should
/// take the sockets they need to use then. If the sockets the service looks for don't appear in
/// the collection, the service should create its own listening sockets and then put them into
/// the collection in order for them to be passed to the next server.
/// - `shutdown`: the shutdown signal this server would receive.
async fn start_service(
&mut self,
#[cfg(unix)] fds: Option<ListenFds>,
mut shutdown: ShutdownWatch,
listeners_per_fd: usize,
);
/// The name of the service, just for logging and naming the threads assigned to this service
///
/// Note that due to the limit of the underlying system, only the first 16 chars will be used
fn name(&self) -> &str;
/// The preferred number of threads to run this service
///
/// If `None`, the global setting will be used
fn threads(&self) -> Option<usize> {
None
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/services/background.rs | pingora-core/src/services/background.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The background service
//!
//! A [BackgroundService] can be run as part of a Pingora application to add supporting logic that
//! exists outside of the request/response lifecycle.
//! Examples might include service discovery (load balancing) and background updates such as
//! push-style metrics.
use async_trait::async_trait;
use std::sync::Arc;
use super::Service;
#[cfg(unix)]
use crate::server::ListenFds;
use crate::server::ShutdownWatch;
/// The background service interface
#[async_trait]
pub trait BackgroundService {
/// This function is called when the pingora server tries to start all the
/// services. The background service can return at anytime or wait for the
/// `shutdown` signal.
async fn start(&self, mut shutdown: ShutdownWatch);
}
/// A generic type of background service
pub struct GenBackgroundService<A> {
// Name of the service
name: String,
// Task the service will execute
task: Arc<A>,
/// The number of threads. Default is 1
pub threads: Option<usize>,
}
impl<A> GenBackgroundService<A> {
/// Generates a background service that can run in the pingora runtime
pub fn new(name: String, task: Arc<A>) -> Self {
Self {
name,
task,
threads: Some(1),
}
}
/// Return the task behind [Arc] to be shared other logic.
pub fn task(&self) -> Arc<A> {
self.task.clone()
}
}
#[async_trait]
impl<A> Service for GenBackgroundService<A>
where
A: BackgroundService + Send + Sync + 'static,
{
async fn start_service(
&mut self,
#[cfg(unix)] _fds: Option<ListenFds>,
shutdown: ShutdownWatch,
_listeners_per_fd: usize,
) {
self.task.start(shutdown).await;
}
fn name(&self) -> &str {
&self.name
}
fn threads(&self) -> Option<usize> {
self.threads
}
}
// Helper function to create a background service with a human readable name
pub fn background_service<SV>(name: &str, task: SV) -> GenBackgroundService<SV> {
GenBackgroundService::new(format!("BG {name}"), Arc::new(task))
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/tls/mod.rs | pingora-core/src/tls/mod.rs | // Copyright 2024 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This module contains a dummy TLS implementation for the scenarios where real TLS
//! implementations are unavailable.
macro_rules! impl_display {
($ty:ty) => {
impl std::fmt::Display for $ty {
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
Ok(())
}
}
};
}
macro_rules! impl_deref {
($from:ty => $to:ty) => {
impl std::ops::Deref for $from {
type Target = $to;
fn deref(&self) -> &$to {
panic!("Not implemented");
}
}
impl std::ops::DerefMut for $from {
fn deref_mut(&mut self) -> &mut $to {
panic!("Not implemented");
}
}
};
}
pub mod ssl {
use super::error::ErrorStack;
use super::x509::verify::X509VerifyParamRef;
use super::x509::{X509VerifyResult, X509};
/// An error returned from an ALPN selection callback.
pub struct AlpnError;
impl AlpnError {
/// Terminate the handshake with a fatal alert.
pub const ALERT_FATAL: AlpnError = Self {};
/// Do not select a protocol, but continue the handshake.
pub const NOACK: AlpnError = Self {};
}
/// A type which allows for configuration of a client-side TLS session before connection.
pub struct ConnectConfiguration;
impl_deref! {ConnectConfiguration => SslRef}
impl ConnectConfiguration {
/// Configures the use of Server Name Indication (SNI) when connecting.
pub fn set_use_server_name_indication(&mut self, _use_sni: bool) {
panic!("Not implemented");
}
/// Configures the use of hostname verification when connecting.
pub fn set_verify_hostname(&mut self, _verify_hostname: bool) {
panic!("Not implemented");
}
/// Returns an `Ssl` configured to connect to the provided domain.
pub fn into_ssl(self, _domain: &str) -> Result<Ssl, ErrorStack> {
panic!("Not implemented");
}
/// Like `SslContextBuilder::set_verify`.
pub fn set_verify(&mut self, _mode: SslVerifyMode) {
panic!("Not implemented");
}
/// Like `SslContextBuilder::set_alpn_protos`.
pub fn set_alpn_protos(&mut self, _protocols: &[u8]) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Returns a mutable reference to the X509 verification configuration.
pub fn param_mut(&mut self) -> &mut X509VerifyParamRef {
panic!("Not implemented");
}
}
/// An SSL error.
#[derive(Debug)]
pub struct Error;
impl_display!(Error);
impl Error {
pub fn code(&self) -> ErrorCode {
panic!("Not implemented");
}
}
/// An error code returned from SSL functions.
#[derive(PartialEq)]
pub struct ErrorCode(i32);
impl ErrorCode {
/// An error occurred in the SSL library.
pub const SSL: ErrorCode = Self(0);
}
/// An identifier of a session name type.
pub struct NameType;
impl NameType {
pub const HOST_NAME: NameType = Self {};
}
/// The state of an SSL/TLS session.
pub struct Ssl;
impl Ssl {
/// Creates a new `Ssl`.
pub fn new(_ctx: &SslContextRef) -> Result<Ssl, ErrorStack> {
panic!("Not implemented");
}
}
impl_deref! {Ssl => SslRef}
/// A type which wraps server-side streams in a TLS session.
pub struct SslAcceptor;
impl SslAcceptor {
/// Creates a new builder configured to connect to non-legacy clients. This should
/// generally be considered a reasonable default choice.
pub fn mozilla_intermediate_v5(
_method: SslMethod,
) -> Result<SslAcceptorBuilder, ErrorStack> {
panic!("Not implemented");
}
}
/// A builder for `SslAcceptor`s.
pub struct SslAcceptorBuilder;
impl SslAcceptorBuilder {
/// Consumes the builder, returning a `SslAcceptor`.
pub fn build(self) -> SslAcceptor {
panic!("Not implemented");
}
/// Sets the callback used by a server to select a protocol for Application Layer Protocol
/// Negotiation (ALPN).
pub fn set_alpn_select_callback<F>(&mut self, _callback: F)
where
F: for<'a> Fn(&mut SslRef, &'a [u8]) -> Result<&'a [u8], AlpnError>
+ 'static
+ Sync
+ Send,
{
panic!("Not implemented");
}
/// Loads a certificate chain from a file.
pub fn set_certificate_chain_file<P: AsRef<std::path::Path>>(
&mut self,
_file: P,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Loads the private key from a file.
pub fn set_private_key_file<P: AsRef<std::path::Path>>(
&mut self,
_file: P,
_file_type: SslFiletype,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Sets the maximum supported protocol version.
pub fn set_max_proto_version(
&mut self,
_version: Option<SslVersion>,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
}
/// Reference to an [`SslCipher`].
pub struct SslCipherRef;
impl SslCipherRef {
/// Returns the name of the cipher.
pub fn name(&self) -> &'static str {
panic!("Not implemented");
}
}
/// A type which wraps client-side streams in a TLS session.
pub struct SslConnector;
impl SslConnector {
/// Creates a new builder for TLS connections.
pub fn builder(_method: SslMethod) -> Result<SslConnectorBuilder, ErrorStack> {
panic!("Not implemented");
}
/// Returns a structure allowing for configuration of a single TLS session before connection.
pub fn configure(&self) -> Result<ConnectConfiguration, ErrorStack> {
panic!("Not implemented");
}
/// Returns a shared reference to the inner raw `SslContext`.
pub fn context(&self) -> &SslContextRef {
panic!("Not implemented");
}
}
/// A builder for `SslConnector`s.
pub struct SslConnectorBuilder;
impl SslConnectorBuilder {
/// Consumes the builder, returning an `SslConnector`.
pub fn build(self) -> SslConnector {
panic!("Not implemented");
}
/// Sets the list of supported ciphers for protocols before TLSv1.3.
pub fn set_cipher_list(&mut self, _cipher_list: &str) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Sets the context’s supported signature algorithms.
pub fn set_sigalgs_list(&mut self, _sigalgs: &str) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Sets the minimum supported protocol version.
pub fn set_min_proto_version(
&mut self,
_version: Option<SslVersion>,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Sets the maximum supported protocol version.
pub fn set_max_proto_version(
&mut self,
_version: Option<SslVersion>,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Use the default locations of trusted certificates for verification.
pub fn set_default_verify_paths(&mut self) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Loads trusted root certificates from a file.
pub fn set_ca_file<P: AsRef<std::path::Path>>(
&mut self,
_file: P,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Loads a leaf certificate from a file.
pub fn set_certificate_file<P: AsRef<std::path::Path>>(
&mut self,
_file: P,
_file_type: SslFiletype,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Loads the private key from a file.
pub fn set_private_key_file<P: AsRef<std::path::Path>>(
&mut self,
_file: P,
_file_type: SslFiletype,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Sets the TLS key logging callback.
pub fn set_keylog_callback<F>(&mut self, _callback: F)
where
F: Fn(&SslRef, &str) + 'static + Sync + Send,
{
panic!("Not implemented");
}
}
/// A context object for TLS streams.
pub struct SslContext;
impl SslContext {
/// Creates a new builder object for an `SslContext`.
pub fn builder(_method: SslMethod) -> Result<SslContextBuilder, ErrorStack> {
panic!("Not implemented");
}
}
impl_deref! {SslContext => SslContextRef}
/// A builder for `SslContext`s.
pub struct SslContextBuilder;
impl SslContextBuilder {
/// Consumes the builder, returning a new `SslContext`.
pub fn build(self) -> SslContext {
panic!("Not implemented");
}
}
/// Reference to [`SslContext`]
pub struct SslContextRef;
/// An identifier of the format of a certificate or key file.
pub struct SslFiletype;
impl SslFiletype {
/// The PEM format.
pub const PEM: SslFiletype = Self {};
}
/// A type specifying the kind of protocol an `SslContext`` will speak.
pub struct SslMethod;
impl SslMethod {
/// Support all versions of the TLS protocol.
pub fn tls() -> SslMethod {
panic!("Not implemented");
}
}
/// Reference to an [`Ssl`].
pub struct SslRef;
impl SslRef {
/// Like [`SslContextBuilder::set_verify`].
pub fn set_verify(&mut self, _mode: SslVerifyMode) {
panic!("Not implemented");
}
/// Returns the current cipher if the session is active.
pub fn current_cipher(&self) -> Option<&SslCipherRef> {
panic!("Not implemented");
}
/// Sets the host name to be sent to the server for Server Name Indication (SNI).
pub fn set_hostname(&mut self, _hostname: &str) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Returns the peer’s certificate, if present.
pub fn peer_certificate(&self) -> Option<X509> {
panic!("Not implemented");
}
/// Returns the certificate verification result.
pub fn verify_result(&self) -> X509VerifyResult {
panic!("Not implemented");
}
/// Returns a string describing the protocol version of the session.
pub fn version_str(&self) -> &'static str {
panic!("Not implemented");
}
/// Returns the protocol selected via Application Layer Protocol Negotiation (ALPN).
pub fn selected_alpn_protocol(&self) -> Option<&[u8]> {
panic!("Not implemented");
}
/// Returns the servername sent by the client via Server Name Indication (SNI).
pub fn servername(&self, _type_: NameType) -> Option<&str> {
panic!("Not implemented");
}
}
/// Options controlling the behavior of certificate verification.
pub struct SslVerifyMode;
impl SslVerifyMode {
/// Verifies that the peer’s certificate is trusted.
pub const PEER: Self = Self {};
/// Disables verification of the peer’s certificate.
pub const NONE: Self = Self {};
}
/// An SSL/TLS protocol version.
pub struct SslVersion;
impl SslVersion {
/// TLSv1.0
pub const TLS1: SslVersion = Self {};
/// TLSv1.2
pub const TLS1_2: SslVersion = Self {};
/// TLSv1.3
pub const TLS1_3: SslVersion = Self {};
}
/// A standard implementation of protocol selection for Application Layer Protocol Negotiation
/// (ALPN).
pub fn select_next_proto<'a>(_server: &[u8], _client: &'a [u8]) -> Option<&'a [u8]> {
panic!("Not implemented");
}
}
pub mod ssl_sys {
pub const X509_V_OK: i32 = 0;
pub const X509_V_ERR_INVALID_CALL: i32 = 69;
}
pub mod error {
use super::ssl::Error;
/// Collection of [`Errors`] from OpenSSL.
#[derive(Debug)]
pub struct ErrorStack;
impl_display!(ErrorStack);
impl std::error::Error for ErrorStack {}
impl ErrorStack {
/// Returns the contents of the OpenSSL error stack.
pub fn get() -> ErrorStack {
panic!("Not implemented");
}
/// Returns the errors in the stack.
pub fn errors(&self) -> &[Error] {
panic!("Not implemented");
}
}
}
pub mod x509 {
use super::asn1::{Asn1IntegerRef, Asn1StringRef, Asn1TimeRef};
use super::error::ErrorStack;
use super::hash::{DigestBytes, MessageDigest};
use super::nid::Nid;
/// An `X509` public key certificate.
#[derive(Debug, Clone)]
pub struct X509;
impl_deref! {X509 => X509Ref}
impl X509 {
/// Deserializes a PEM-encoded X509 structure.
pub fn from_pem(_pem: &[u8]) -> Result<X509, ErrorStack> {
panic!("Not implemented");
}
}
/// A type to destructure and examine an `X509Name`.
pub struct X509NameEntries<'a> {
marker: std::marker::PhantomData<&'a ()>,
}
impl<'a> Iterator for X509NameEntries<'a> {
type Item = &'a X509NameEntryRef;
fn next(&mut self) -> Option<&'a X509NameEntryRef> {
panic!("Not implemented");
}
}
/// Reference to `X509NameEntry`.
pub struct X509NameEntryRef;
impl X509NameEntryRef {
pub fn data(&self) -> &Asn1StringRef {
panic!("Not implemented");
}
}
/// Reference to `X509Name`.
pub struct X509NameRef;
impl X509NameRef {
/// Returns the name entries by the nid.
pub fn entries_by_nid(&self, _nid: Nid) -> X509NameEntries<'_> {
panic!("Not implemented");
}
}
/// Reference to `X509`.
pub struct X509Ref;
impl X509Ref {
/// Returns this certificate’s subject name.
pub fn subject_name(&self) -> &X509NameRef {
panic!("Not implemented");
}
/// Returns a digest of the DER representation of the certificate.
pub fn digest(&self, _hash_type: MessageDigest) -> Result<DigestBytes, ErrorStack> {
panic!("Not implemented");
}
/// Returns the certificate’s Not After validity period.
pub fn not_after(&self) -> &Asn1TimeRef {
panic!("Not implemented");
}
/// Returns this certificate’s serial number.
pub fn serial_number(&self) -> &Asn1IntegerRef {
panic!("Not implemented");
}
}
/// The result of peer certificate verification.
pub struct X509VerifyResult;
impl X509VerifyResult {
/// Return the integer representation of an `X509VerifyResult`.
pub fn as_raw(&self) -> i32 {
panic!("Not implemented");
}
}
pub mod store {
use super::super::error::ErrorStack;
use super::X509;
/// A builder type used to construct an `X509Store`.
pub struct X509StoreBuilder;
impl X509StoreBuilder {
/// Returns a builder for a certificate store..
pub fn new() -> Result<X509StoreBuilder, ErrorStack> {
panic!("Not implemented");
}
/// Constructs the `X509Store`.
pub fn build(self) -> X509Store {
panic!("Not implemented");
}
/// Adds a certificate to the certificate store.
pub fn add_cert(&mut self, _cert: X509) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
}
/// A certificate store to hold trusted X509 certificates.
pub struct X509Store;
impl_deref! {X509Store => X509StoreRef}
/// Reference to an `X509Store`.
pub struct X509StoreRef;
}
pub mod verify {
/// Reference to `X509VerifyParam`.
pub struct X509VerifyParamRef;
}
}
pub mod nid {
/// A numerical identifier for an OpenSSL object.
pub struct Nid;
impl Nid {
pub const COMMONNAME: Nid = Self {};
pub const ORGANIZATIONNAME: Nid = Self {};
pub const ORGANIZATIONALUNITNAME: Nid = Self {};
}
}
pub mod pkey {
use super::error::ErrorStack;
/// A public or private key.
#[derive(Clone)]
pub struct PKey<T> {
marker: std::marker::PhantomData<T>,
}
impl<T> std::ops::Deref for PKey<T> {
type Target = PKeyRef<T>;
fn deref(&self) -> &PKeyRef<T> {
panic!("Not implemented");
}
}
impl<T> std::ops::DerefMut for PKey<T> {
fn deref_mut(&mut self) -> &mut PKeyRef<T> {
panic!("Not implemented");
}
}
impl PKey<Private> {
pub fn private_key_from_pem(_pem: &[u8]) -> Result<PKey<Private>, ErrorStack> {
panic!("Not implemented");
}
}
/// Reference to `PKey`.
pub struct PKeyRef<T> {
marker: std::marker::PhantomData<T>,
}
/// A tag type indicating that a key has private components.
#[derive(Clone)]
pub enum Private {}
unsafe impl HasPrivate for Private {}
/// A trait indicating that a key has private components.
pub unsafe trait HasPrivate {}
}
pub mod hash {
/// A message digest algorithm.
pub struct MessageDigest;
impl MessageDigest {
pub fn sha256() -> MessageDigest {
panic!("Not implemented");
}
}
/// The resulting bytes of a digest.
pub struct DigestBytes;
impl AsRef<[u8]> for DigestBytes {
fn as_ref(&self) -> &[u8] {
panic!("Not implemented");
}
}
}
pub mod asn1 {
use super::bn::BigNum;
use super::error::ErrorStack;
/// A reference to an `Asn1Integer`.
pub struct Asn1IntegerRef;
impl Asn1IntegerRef {
/// Converts the integer to a `BigNum`.
pub fn to_bn(&self) -> Result<BigNum, ErrorStack> {
panic!("Not implemented");
}
}
/// A reference to an `Asn1String`.
pub struct Asn1StringRef;
impl Asn1StringRef {
pub fn as_utf8(&self) -> Result<&str, ErrorStack> {
panic!("Not implemented");
}
}
/// Reference to an `Asn1Time`
pub struct Asn1TimeRef;
impl_display! {Asn1TimeRef}
}
pub mod bn {
use super::error::ErrorStack;
/// Dynamically sized large number implementation
pub struct BigNum;
impl BigNum {
/// Returns a hexadecimal string representation of `self`.
pub fn to_hex_str(&self) -> Result<&str, ErrorStack> {
panic!("Not implemented");
}
}
}
pub mod ext {
use super::error::ErrorStack;
use super::pkey::{HasPrivate, PKeyRef};
use super::ssl::{Ssl, SslAcceptor, SslRef};
use super::x509::store::X509StoreRef;
use super::x509::verify::X509VerifyParamRef;
use super::x509::X509Ref;
/// Add name as an additional reference identifier that can match the peer's certificate
pub fn add_host(_verify_param: &mut X509VerifyParamRef, _host: &str) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Set the verify cert store of `_ssl`
pub fn ssl_set_verify_cert_store(
_ssl: &mut SslRef,
_cert_store: &X509StoreRef,
) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Load the certificate into `_ssl`
pub fn ssl_use_certificate(_ssl: &mut SslRef, _cert: &X509Ref) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Load the private key into `_ssl`
pub fn ssl_use_private_key<T>(_ssl: &mut SslRef, _key: &PKeyRef<T>) -> Result<(), ErrorStack>
where
T: HasPrivate,
{
panic!("Not implemented");
}
/// Clear the error stack
pub fn clear_error_stack() {}
/// Create a new [Ssl] from &[SslAcceptor]
pub fn ssl_from_acceptor(_acceptor: &SslAcceptor) -> Result<Ssl, ErrorStack> {
panic!("Not implemented");
}
/// Suspend the TLS handshake when a certificate is needed.
pub fn suspend_when_need_ssl_cert(_ssl: &mut SslRef) {
panic!("Not implemented");
}
/// Unblock a TLS handshake after the certificate is set.
pub fn unblock_ssl_cert(_ssl: &mut SslRef) {
panic!("Not implemented");
}
/// Whether the TLS error is SSL_ERROR_WANT_X509_LOOKUP
pub fn is_suspended_for_cert(_error: &super::ssl::Error) -> bool {
panic!("Not implemented");
}
/// Add the certificate into the cert chain of `_ssl`
pub fn ssl_add_chain_cert(_ssl: &mut SslRef, _cert: &X509Ref) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Set renegotiation
pub fn ssl_set_renegotiate_mode_freely(_ssl: &mut SslRef) {}
/// Set the curves/groups of `_ssl`
pub fn ssl_set_groups_list(_ssl: &mut SslRef, _groups: &str) -> Result<(), ErrorStack> {
panic!("Not implemented");
}
/// Sets whether a second keyshare to be sent in client hello when PQ is used.
pub fn ssl_use_second_key_share(_ssl: &mut SslRef, _enabled: bool) {}
/// Get a mutable SslRef ouf of SslRef, which is a missing functionality even when holding &mut SslStream
/// # Safety
pub unsafe fn ssl_mut(_ssl: &SslRef) -> &mut SslRef {
panic!("Not implemented");
}
}
pub mod tokio_ssl {
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use super::error::ErrorStack;
use super::ssl::{Error, Ssl, SslRef};
/// A TLS session over a stream.
#[derive(Debug)]
pub struct SslStream<S> {
marker: std::marker::PhantomData<S>,
}
impl<S> SslStream<S> {
/// Creates a new `SslStream`.
pub fn new(_ssl: Ssl, _stream: S) -> Result<Self, ErrorStack> {
panic!("Not implemented");
}
/// Initiates a client-side TLS handshake.
pub async fn connect(self: Pin<&mut Self>) -> Result<(), Error> {
panic!("Not implemented");
}
/// Initiates a server-side TLS handshake.
pub async fn accept(self: Pin<&mut Self>) -> Result<(), Error> {
panic!("Not implemented");
}
/// Returns a shared reference to the `Ssl` object associated with this stream.
pub fn ssl(&self) -> &SslRef {
panic!("Not implemented");
}
/// Returns a shared reference to the underlying stream.
pub fn get_ref(&self) -> &S {
panic!("Not implemented");
}
/// Returns a mutable reference to the underlying stream.
pub fn get_mut(&mut self) -> &mut S {
panic!("Not implemented");
}
}
impl<S> AsyncRead for SslStream<S>
where
S: AsyncRead + AsyncWrite,
{
fn poll_read(
self: Pin<&mut Self>,
_ctx: &mut Context<'_>,
_buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
panic!("Not implemented");
}
}
impl<S> AsyncWrite for SslStream<S>
where
S: AsyncRead + AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
_ctx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<std::io::Result<usize>> {
panic!("Not implemented");
}
fn poll_flush(self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
panic!("Not implemented");
}
fn poll_shutdown(
self: Pin<&mut Self>,
_ctx: &mut Context<'_>,
) -> Poll<std::io::Result<()>> {
panic!("Not implemented");
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/apps/prometheus_http_app.rs | pingora-core/src/apps/prometheus_http_app.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An HTTP application that reports Prometheus metrics.
use async_trait::async_trait;
use http::Response;
use prometheus::{Encoder, TextEncoder};
use super::http_app::HttpServer;
use crate::apps::http_app::ServeHttp;
use crate::modules::http::compression::ResponseCompressionBuilder;
use crate::protocols::http::ServerSession;
/// An HTTP application that reports Prometheus metrics.
///
/// This application will report all the [static metrics](https://docs.rs/prometheus/latest/prometheus/index.html#static-metrics)
/// collected via the [Prometheus](https://docs.rs/prometheus/) crate;
pub struct PrometheusHttpApp;
#[async_trait]
impl ServeHttp for PrometheusHttpApp {
async fn response(&self, _http_session: &mut ServerSession) -> Response<Vec<u8>> {
let encoder = TextEncoder::new();
let metric_families = prometheus::gather();
let mut buffer = vec![];
encoder.encode(&metric_families, &mut buffer).unwrap();
Response::builder()
.status(200)
.header(http::header::CONTENT_TYPE, encoder.format_type())
.header(http::header::CONTENT_LENGTH, buffer.len())
.body(buffer)
.unwrap()
}
}
/// The [HttpServer] for [PrometheusHttpApp]
///
/// This type provides the functionality of [PrometheusHttpApp] with compression enabled
pub type PrometheusServer = HttpServer<PrometheusHttpApp>;
impl PrometheusServer {
pub fn new() -> Self {
let mut server = Self::new_app(PrometheusHttpApp);
// enable gzip level 7 compression
server.add_module(ResponseCompressionBuilder::enable(7));
server
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/apps/http_app.rs | pingora-core/src/apps/http_app.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A simple HTTP application trait that maps a request to a response
use async_trait::async_trait;
use http::Response;
use log::{debug, error, trace};
use pingora_http::ResponseHeader;
use std::sync::Arc;
use crate::apps::{HttpPersistentSettings, HttpServerApp, HttpServerOptions, ReusedHttpStream};
use crate::modules::http::{HttpModules, ModuleBuilder};
use crate::protocols::http::v2::server::H2Options;
use crate::protocols::http::HttpTask;
use crate::protocols::http::ServerSession;
use crate::server::ShutdownWatch;
/// This trait defines how to map a request to a response
#[async_trait]
pub trait ServeHttp {
/// Define the mapping from a request to a response.
/// Note that the request header is already read, but the implementation needs to read the
/// request body if any.
///
/// # Limitation
/// In this API, the entire response has to be generated before the end of this call.
/// So it is not suitable for streaming response or interactive communications.
/// Users need to implement their own [`super::HttpServerApp`] for those use cases.
async fn response(&self, http_session: &mut ServerSession) -> Response<Vec<u8>>;
}
// TODO: remove this in favor of HttpServer?
#[async_trait]
impl<SV> HttpServerApp for SV
where
SV: ServeHttp + Send + Sync,
{
async fn process_new_http(
self: &Arc<Self>,
mut http: ServerSession,
shutdown: &ShutdownWatch,
) -> Option<ReusedHttpStream> {
match http.read_request().await {
Ok(res) => match res {
false => {
debug!("Failed to read request header");
return None;
}
true => {
debug!("Successfully get a new request");
}
},
Err(e) => {
error!("HTTP server fails to read from downstream: {e}");
return None;
}
}
trace!("{:?}", http.req_header());
if *shutdown.borrow() {
http.set_keepalive(None);
} else {
http.set_keepalive(Some(60));
}
let new_response = self.response(&mut http).await;
let (parts, body) = new_response.into_parts();
let resp_header: ResponseHeader = parts.into();
match http.write_response_header(Box::new(resp_header)).await {
Ok(()) => {
debug!("HTTP response header done.");
}
Err(e) => {
error!(
"HTTP server fails to write to downstream: {e}, {}",
http.request_summary()
);
}
}
if !body.is_empty() {
// TODO: check if chunked encoding is needed
match http.write_response_body(body.into(), true).await {
Ok(_) => debug!("HTTP response written."),
Err(e) => error!(
"HTTP server fails to write to downstream: {e}, {}",
http.request_summary()
),
}
}
let persistent_settings = HttpPersistentSettings::for_session(&http);
match http.finish().await {
Ok(c) => c.map(|s| ReusedHttpStream::new(s, Some(persistent_settings))),
Err(e) => {
error!("HTTP server fails to finish the request: {e}");
None
}
}
}
}
/// A helper struct for HTTP server with http modules embedded
pub struct HttpServer<SV> {
app: SV,
modules: HttpModules,
pub server_options: Option<HttpServerOptions>,
pub h2_options: Option<H2Options>,
}
impl<SV> HttpServer<SV> {
/// Create a new [HttpServer] with the given app which implements [ServeHttp]
pub fn new_app(app: SV) -> Self {
HttpServer {
app,
modules: HttpModules::new(),
server_options: None,
h2_options: None,
}
}
/// Add [ModuleBuilder] to this [HttpServer]
pub fn add_module(&mut self, module: ModuleBuilder) {
self.modules.add_module(module)
}
}
#[async_trait]
impl<SV> HttpServerApp for HttpServer<SV>
where
SV: ServeHttp + Send + Sync,
{
async fn process_new_http(
self: &Arc<Self>,
mut http: ServerSession,
shutdown: &ShutdownWatch,
) -> Option<ReusedHttpStream> {
match http.read_request().await {
Ok(res) => match res {
false => {
debug!("Failed to read request header");
return None;
}
true => {
debug!("Successfully get a new request");
}
},
Err(e) => {
error!("HTTP server fails to read from downstream: {e}");
return None;
}
}
trace!("{:?}", http.req_header());
if *shutdown.borrow() {
http.set_keepalive(None);
} else {
http.set_keepalive(Some(60));
}
let mut module_ctx = self.modules.build_ctx();
let req = http.req_header_mut();
module_ctx.request_header_filter(req).await.ok()?;
let new_response = self.app.response(&mut http).await;
let (parts, body) = new_response.into_parts();
let mut resp_header: ResponseHeader = parts.into();
module_ctx
.response_header_filter(&mut resp_header, body.is_empty())
.await
.ok()?;
let task = HttpTask::Header(Box::new(resp_header), body.is_empty());
trace!("{task:?}");
match http.response_duplex_vec(vec![task]).await {
Ok(_) => {
debug!("HTTP response header done.");
}
Err(e) => {
error!(
"HTTP server fails to write to downstream: {e}, {}",
http.request_summary()
);
}
}
let mut body = Some(body.into());
module_ctx.response_body_filter(&mut body, true).ok()?;
let task = HttpTask::Body(body, true);
trace!("{task:?}");
// TODO: check if chunked encoding is needed
match http.response_duplex_vec(vec![task]).await {
Ok(_) => debug!("HTTP response written."),
Err(e) => error!(
"HTTP server fails to write to downstream: {e}, {}",
http.request_summary()
),
}
let persistent_settings = HttpPersistentSettings::for_session(&http);
match http.finish().await {
Ok(c) => c.map(|s| ReusedHttpStream::new(s, Some(persistent_settings))),
Err(e) => {
error!("HTTP server fails to finish the request: {e}");
None
}
}
}
fn h2_options(&self) -> Option<H2Options> {
self.h2_options.clone()
}
fn server_options(&self) -> Option<&HttpServerOptions> {
self.server_options.as_ref()
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/apps/mod.rs | pingora-core/src/apps/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The abstraction and implementation interface for service application logic
pub mod http_app;
pub mod prometheus_http_app;
use crate::server::ShutdownWatch;
use async_trait::async_trait;
use log::{debug, error};
use std::future::poll_fn;
use std::sync::Arc;
use crate::protocols::http::v2::server;
use crate::protocols::http::ServerSession;
use crate::protocols::Digest;
use crate::protocols::Stream;
use crate::protocols::ALPN;
// https://datatracker.ietf.org/doc/html/rfc9113#section-3.4
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
#[async_trait]
/// This trait defines the interface of a transport layer (TCP or TLS) application.
pub trait ServerApp {
/// Whenever a new connection is established, this function will be called with the established
/// [`Stream`] object provided.
///
/// The application can do whatever it wants with the `session`.
///
/// After processing the `session`, if the `session`'s connection is reusable, This function
/// can return it to the service by returning `Some(session)`. The returned `session` will be
/// fed to another [`Self::process_new()`] for another round of processing.
/// If not reusable, `None` should be returned.
///
/// The `shutdown` argument will change from `false` to `true` when the server receives a
/// signal to shutdown. This argument allows the application to react accordingly.
async fn process_new(
self: &Arc<Self>,
mut session: Stream,
// TODO: make this ShutdownWatch so that all task can await on this event
shutdown: &ShutdownWatch,
) -> Option<Stream>;
/// This callback will be called once after the service stops listening to its endpoints.
async fn cleanup(&self) {}
}
#[non_exhaustive]
#[derive(Default)]
/// HTTP Server options that control how the server handles some transport types.
pub struct HttpServerOptions {
/// Use HTTP/2 for plaintext.
pub h2c: bool,
#[doc(hidden)]
pub force_custom: bool,
}
#[derive(Debug, Clone)]
pub struct HttpPersistentSettings {
keepalive_timeout: Option<u64>,
}
impl HttpPersistentSettings {
pub fn for_session(session: &ServerSession) -> Self {
HttpPersistentSettings {
keepalive_timeout: session.get_keepalive(),
}
}
pub fn apply_to_session(&self, session: &mut ServerSession) {
session.set_keepalive(self.keepalive_timeout);
}
}
#[derive(Debug)]
pub struct ReusedHttpStream {
stream: Stream,
persistent_settings: Option<HttpPersistentSettings>,
}
impl ReusedHttpStream {
pub fn new(stream: Stream, persistent_settings: Option<HttpPersistentSettings>) -> Self {
ReusedHttpStream {
stream,
persistent_settings,
}
}
pub fn consume(self) -> (Stream, Option<HttpPersistentSettings>) {
(self.stream, self.persistent_settings)
}
}
/// This trait defines the interface of an HTTP application.
#[async_trait]
pub trait HttpServerApp {
/// Similar to the [`ServerApp`], this function is called whenever a new HTTP session is established.
///
/// After successful processing, [`ServerSession::finish()`] can be called to return an optionally reusable
/// connection back to the service. The caller needs to make sure that the connection is in a reusable state
/// i.e., no error or incomplete read or write headers or bodies. Otherwise a `None` should be returned.
async fn process_new_http(
self: &Arc<Self>,
mut session: ServerSession,
// TODO: make this ShutdownWatch so that all task can await on this event
shutdown: &ShutdownWatch,
) -> Option<ReusedHttpStream>;
/// Provide options on how HTTP/2 connection should be established. This function will be called
/// every time a new HTTP/2 **connection** needs to be established.
///
/// A `None` means to use the built-in default options. See [`server::H2Options`] for more details.
fn h2_options(&self) -> Option<server::H2Options> {
None
}
/// Provide HTTP server options used to override default behavior. This function will be called
/// every time a new connection is processed.
///
/// A `None` means no server options will be applied.
fn server_options(&self) -> Option<&HttpServerOptions> {
None
}
async fn http_cleanup(&self) {}
#[doc(hidden)]
async fn process_custom_session(
self: Arc<Self>,
_stream: Stream,
_shutdown: &ShutdownWatch,
) -> Option<Stream> {
None
}
}
#[async_trait]
impl<T> ServerApp for T
where
T: HttpServerApp + Send + Sync + 'static,
{
async fn process_new(
self: &Arc<Self>,
mut stream: Stream,
shutdown: &ShutdownWatch,
) -> Option<Stream> {
let mut h2c = self.server_options().as_ref().map_or(false, |o| o.h2c);
let custom = self
.server_options()
.as_ref()
.map_or(false, |o| o.force_custom);
// try to read h2 preface
if h2c && !custom {
let mut buf = [0u8; H2_PREFACE.len()];
let peeked = stream
.try_peek(&mut buf)
.await
.map_err(|e| {
// this error is normal when h1 reuse and close the connection
debug!("Read error while peeking h2c preface {e}");
e
})
.ok()?;
// not all streams support peeking
if peeked {
// turn off h2c (use h1) if h2 preface doesn't exist
h2c = buf == H2_PREFACE;
}
}
if h2c || matches!(stream.selected_alpn_proto(), Some(ALPN::H2)) {
// create a shared connection digest
let digest = Arc::new(Digest {
ssl_digest: stream.get_ssl_digest(),
// TODO: log h2 handshake time
timing_digest: stream.get_timing_digest(),
proxy_digest: stream.get_proxy_digest(),
socket_digest: stream.get_socket_digest(),
});
let h2_options = self.h2_options();
let h2_conn = server::handshake(stream, h2_options).await;
let mut h2_conn = match h2_conn {
Err(e) => {
error!("H2 handshake error {e}");
return None;
}
Ok(c) => c,
};
let mut shutdown = shutdown.clone();
loop {
// this loop ends when the client decides to close the h2 conn
// TODO: add a timeout?
let h2_stream = tokio::select! {
_ = shutdown.changed() => {
h2_conn.graceful_shutdown();
let _ = poll_fn(|cx| h2_conn.poll_closed(cx))
.await.map_err(|e| error!("H2 error waiting for shutdown {e}"));
return None;
}
h2_stream = server::HttpSession::from_h2_conn(&mut h2_conn, digest.clone()) => h2_stream
};
let h2_stream = match h2_stream {
Err(e) => {
// It is common for the client to just disconnect TCP without properly
// closing H2. So we don't log the errors here
debug!("H2 error when accepting new stream {e}");
return None;
}
Ok(s) => s?, // None means the connection is ready to be closed
};
let app = self.clone();
let shutdown = shutdown.clone();
pingora_runtime::current_handle().spawn(async move {
// Note, `PersistentSettings` not currently relevant for h2
app.process_new_http(ServerSession::new_http2(h2_stream), &shutdown)
.await;
});
}
} else if custom || matches!(stream.selected_alpn_proto(), Some(ALPN::Custom(_))) {
return self.clone().process_custom_session(stream, shutdown).await;
} else {
// No ALPN or ALPN::H1 and h2c was not configured, fallback to HTTP/1.1
let mut session = ServerSession::new_http1(stream);
if *shutdown.borrow() {
// stop downstream from reusing if this service is shutting down soon
session.set_keepalive(None);
} else {
// default 60s
session.set_keepalive(Some(60));
}
let mut result = self.process_new_http(session, shutdown).await;
while let Some((stream, persistent_settings)) = result.map(|r| r.consume()) {
let mut session = ServerSession::new_http1(stream);
if let Some(persistent_settings) = persistent_settings {
persistent_settings.apply_to_session(&mut session);
}
result = self.process_new_http(session, shutdown).await;
}
}
None
}
async fn cleanup(&self) {
self.http_cleanup().await;
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/upstreams/mod.rs | pingora-core/src/upstreams/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The interface to connect to a remote server
pub mod peer;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/upstreams/peer.rs | pingora-core/src/upstreams/peer.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Defines where to connect to and how to connect to a remote server
use crate::connectors::{l4::BindTo, L4Connect};
use crate::protocols::l4::socket::SocketAddr;
use crate::protocols::tls::CaType;
#[cfg(feature = "openssl_derived")]
use crate::protocols::tls::HandshakeCompleteHook;
#[cfg(feature = "s2n")]
use crate::protocols::tls::PskType;
#[cfg(unix)]
use crate::protocols::ConnFdReusable;
use crate::protocols::TcpKeepalive;
use crate::utils::tls::{get_organization_unit, CertKey};
use ahash::AHasher;
use derivative::Derivative;
use pingora_error::{
ErrorType::{InternalError, SocketError},
OrErr, Result,
};
#[cfg(feature = "s2n")]
use pingora_s2n::S2NPolicy;
use std::collections::BTreeMap;
use std::fmt::{Display, Formatter, Result as FmtResult};
use std::hash::{Hash, Hasher};
use std::net::{IpAddr, SocketAddr as InetSocketAddr, ToSocketAddrs as ToInetSocketAddrs};
#[cfg(unix)]
use std::os::unix::{net::SocketAddr as UnixSocketAddr, prelude::AsRawFd};
#[cfg(windows)]
use std::os::windows::io::AsRawSocket;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use tokio::net::TcpSocket;
pub use crate::protocols::tls::ALPN;
/// A hook function that may generate user data for [`crate::protocols::raw_connect::ProxyDigest`].
///
/// Takes the request and response headers from the proxy connection establishment, and may produce
/// arbitrary data to be stored in ProxyDigest's user_data field.
///
/// This can be useful when, for example, you want to store some parameter(s) from the request or
/// response headers from when the proxy connection was first established.
pub type ProxyDigestUserDataHook = Arc<
dyn Fn(
&http::request::Parts, // request headers
&pingora_http::ResponseHeader, // response headers
) -> Option<Box<dyn std::any::Any + Send + Sync>>
+ Send
+ Sync
+ 'static,
>;
/// The interface to trace the connection
pub trait Tracing: Send + Sync + std::fmt::Debug {
/// This method is called when successfully connected to a remote server
fn on_connected(&self);
/// This method is called when the connection is disconnected.
fn on_disconnected(&self);
/// A way to clone itself
fn boxed_clone(&self) -> Box<dyn Tracing>;
}
/// An object-safe version of Tracing object that can use Clone
#[derive(Debug)]
pub struct Tracer(pub Box<dyn Tracing>);
impl Clone for Tracer {
fn clone(&self) -> Self {
Tracer(self.0.boxed_clone())
}
}
/// [`Peer`] defines the interface to communicate with the [`crate::connectors`] regarding where to
/// connect to and how to connect to it.
pub trait Peer: Display + Clone {
/// The remote address to connect to
fn address(&self) -> &SocketAddr;
/// If TLS should be used;
fn tls(&self) -> bool;
/// The SNI to send, if TLS is used
fn sni(&self) -> &str;
/// To decide whether a [`Peer`] can use the connection established by another [`Peer`].
///
/// The connections to two peers are considered reusable to each other if their reuse hashes are
/// the same
fn reuse_hash(&self) -> u64;
/// Get the proxy setting to connect to the remote server
fn get_proxy(&self) -> Option<&Proxy> {
None
}
/// Get the additional options to connect to the peer.
///
/// See [`PeerOptions`] for more details
fn get_peer_options(&self) -> Option<&PeerOptions> {
None
}
/// Get the additional options for modification.
fn get_mut_peer_options(&mut self) -> Option<&mut PeerOptions> {
None
}
/// Whether the TLS handshake should validate the cert of the server.
fn verify_cert(&self) -> bool {
match self.get_peer_options() {
Some(opt) => opt.verify_cert,
None => false,
}
}
/// Whether the TLS handshake should verify that the server cert matches the SNI.
fn verify_hostname(&self) -> bool {
match self.get_peer_options() {
Some(opt) => opt.verify_hostname,
None => false,
}
}
/// Whether the system trust store should be loaded and used when verifying certificates
#[cfg(feature = "s2n")]
fn use_system_certs(&self) -> bool {
match self.get_peer_options() {
Some(opt) => opt.use_system_certs,
None => false,
}
}
/// The alternative common name to use to verify the server cert.
///
/// If the server cert doesn't match the SNI, this name will be used to
/// verify the cert.
fn alternative_cn(&self) -> Option<&String> {
match self.get_peer_options() {
Some(opt) => opt.alternative_cn.as_ref(),
None => None,
}
}
/// Information about the local source address this connection should be bound to.
fn bind_to(&self) -> Option<&BindTo> {
match self.get_peer_options() {
Some(opt) => opt.bind_to.as_ref(),
None => None,
}
}
/// How long connect() call should be wait before it returns a timeout error.
fn connection_timeout(&self) -> Option<Duration> {
match self.get_peer_options() {
Some(opt) => opt.connection_timeout,
None => None,
}
}
/// How long the overall connection establishment should take before a timeout error is returned.
fn total_connection_timeout(&self) -> Option<Duration> {
match self.get_peer_options() {
Some(opt) => opt.total_connection_timeout,
None => None,
}
}
/// If the connection can be reused, how long the connection should wait to be reused before it
/// shuts down.
fn idle_timeout(&self) -> Option<Duration> {
self.get_peer_options().and_then(|o| o.idle_timeout)
}
/// Get the ALPN preference.
fn get_alpn(&self) -> Option<&ALPN> {
self.get_peer_options().map(|opt| &opt.alpn)
}
/// Get the CA cert to use to validate the server cert.
///
/// If not set, the default CAs will be used.
fn get_ca(&self) -> Option<&Arc<CaType>> {
match self.get_peer_options() {
Some(opt) => opt.ca.as_ref(),
None => None,
}
}
/// Get the client cert and key for mutual TLS if any
fn get_client_cert_key(&self) -> Option<&Arc<CertKey>> {
None
}
/// Get the PSK (pre-shared key) to use to validate the connection
///
/// If not set, PSK validation will not be used
#[cfg(feature = "s2n")]
fn get_psk(&self) -> Option<&Arc<PskType>> {
match self.get_peer_options() {
Some(opt) => opt.psk.as_ref(),
None => None,
}
}
/// Get the Security Policy to use for this connection (S2N only)
///
/// If not set, the default policy "default_tls13" will be used
/// https://aws.github.io/s2n-tls/usage-guide/ch06-security-policies.html
#[cfg(feature = "s2n")]
fn get_s2n_security_policy(&self) -> Option<&S2NPolicy> {
match self.get_peer_options() {
Some(opt) => opt.s2n_security_policy.as_ref(),
None => None,
}
}
/// S2N-TLS will delay a response up to the max blinding delay (default 30)
/// seconds whenever an error triggered by a peer occurs to mitigate against
/// timing side channels.
#[cfg(feature = "s2n")]
fn get_max_blinding_delay(&self) -> Option<u32> {
match self.get_peer_options() {
Some(opt) => opt.max_blinding_delay,
None => None,
}
}
/// The TCP keepalive setting that should be applied to this connection
fn tcp_keepalive(&self) -> Option<&TcpKeepalive> {
self.get_peer_options()
.and_then(|o| o.tcp_keepalive.as_ref())
}
/// The interval H2 pings to send to the server if any
fn h2_ping_interval(&self) -> Option<Duration> {
self.get_peer_options().and_then(|o| o.h2_ping_interval)
}
/// The size of the TCP receive buffer should be limited to. See SO_RCVBUF for more details.
fn tcp_recv_buf(&self) -> Option<usize> {
self.get_peer_options().and_then(|o| o.tcp_recv_buf)
}
/// The DSCP value that should be applied to the send side of this connection.
/// See the [RFC](https://datatracker.ietf.org/doc/html/rfc2474) for more details.
fn dscp(&self) -> Option<u8> {
self.get_peer_options().and_then(|o| o.dscp)
}
/// Whether to enable TCP fast open.
fn tcp_fast_open(&self) -> bool {
self.get_peer_options()
.map(|o| o.tcp_fast_open)
.unwrap_or_default()
}
#[cfg(unix)]
fn matches_fd<V: AsRawFd>(&self, fd: V) -> bool {
self.address().check_fd_match(fd)
}
#[cfg(windows)]
fn matches_sock<V: AsRawSocket>(&self, sock: V) -> bool {
use crate::protocols::ConnSockReusable;
self.address().check_sock_match(sock)
}
fn get_tracer(&self) -> Option<Tracer> {
None
}
/// Returns a hook that should be run before an upstream TCP connection is connected.
///
/// This hook can be used to set additional socket options.
fn upstream_tcp_sock_tweak_hook(
&self,
) -> Option<&Arc<dyn Fn(&TcpSocket) -> Result<()> + Send + Sync + 'static>> {
self.get_peer_options()?
.upstream_tcp_sock_tweak_hook
.as_ref()
}
/// Returns a [`ProxyDigestUserDataHook`] that may generate user data for
/// [`crate::protocols::raw_connect::ProxyDigest`] when establishing a new proxy connection.
fn proxy_digest_user_data_hook(&self) -> Option<&ProxyDigestUserDataHook> {
self.get_peer_options()?
.proxy_digest_user_data_hook
.as_ref()
}
/// Returns a hook that should be run on TLS handshake completion.
///
/// Any value returned from the returned hook (other than `None`) will be stored in the
/// `extension` field of `SslDigest`. This allows you to attach custom application-specific
/// data to the TLS connection, which will be accessible from the HTTP layer via the
/// `SslDigest` attached to the session digest.
///
/// Currently only enabled for openssl variants with meaningful `TlsRef`s.
#[cfg(feature = "openssl_derived")]
fn upstream_tls_handshake_complete_hook(&self) -> Option<&HandshakeCompleteHook> {
self.get_peer_options()?
.upstream_tls_handshake_complete_hook
.as_ref()
}
}
/// A simple TCP or TLS peer without many complicated settings.
#[derive(Debug, Clone)]
pub struct BasicPeer {
pub _address: SocketAddr,
pub sni: String,
pub options: PeerOptions,
}
impl BasicPeer {
/// Create a new [`BasicPeer`].
pub fn new(address: &str) -> Self {
let addr = SocketAddr::Inet(address.parse().unwrap()); // TODO: check error
Self::new_from_sockaddr(addr)
}
/// Create a new [`BasicPeer`] with the given path to a Unix domain socket.
#[cfg(unix)]
pub fn new_uds<P: AsRef<Path>>(path: P) -> Result<Self> {
let addr = SocketAddr::Unix(
UnixSocketAddr::from_pathname(path.as_ref())
.or_err(InternalError, "while creating BasicPeer")?,
);
Ok(Self::new_from_sockaddr(addr))
}
fn new_from_sockaddr(sockaddr: SocketAddr) -> Self {
BasicPeer {
_address: sockaddr,
sni: "".to_string(), // TODO: add support for SNI
options: PeerOptions::new(),
}
}
}
impl Display for BasicPeer {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(f, "{:?}", self)
}
}
impl Peer for BasicPeer {
fn address(&self) -> &SocketAddr {
&self._address
}
fn tls(&self) -> bool {
!self.sni.is_empty()
}
fn bind_to(&self) -> Option<&BindTo> {
None
}
fn sni(&self) -> &str {
&self.sni
}
// TODO: change connection pool to accept u64 instead of String
fn reuse_hash(&self) -> u64 {
let mut hasher = AHasher::default();
self._address.hash(&mut hasher);
hasher.finish()
}
fn get_peer_options(&self) -> Option<&PeerOptions> {
Some(&self.options)
}
}
/// Define whether to connect via http or https
#[derive(Hash, Clone, Debug, PartialEq)]
pub enum Scheme {
HTTP,
HTTPS,
}
impl Display for Scheme {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match self {
Scheme::HTTP => write!(f, "HTTP"),
Scheme::HTTPS => write!(f, "HTTPS"),
}
}
}
impl Scheme {
pub fn from_tls_bool(tls: bool) -> Self {
if tls {
Self::HTTPS
} else {
Self::HTTP
}
}
}
/// The preferences to connect to a remote server
///
/// See [`Peer`] for the meaning of the fields
#[non_exhaustive]
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct PeerOptions {
pub bind_to: Option<BindTo>,
pub connection_timeout: Option<Duration>,
pub total_connection_timeout: Option<Duration>,
pub read_timeout: Option<Duration>,
pub idle_timeout: Option<Duration>,
pub write_timeout: Option<Duration>,
pub verify_cert: bool,
pub verify_hostname: bool,
#[cfg(feature = "s2n")]
pub use_system_certs: bool,
/* accept the cert if it's CN matches the SNI or this name */
pub alternative_cn: Option<String>,
pub alpn: ALPN,
pub ca: Option<Arc<CaType>>,
pub tcp_keepalive: Option<TcpKeepalive>,
pub tcp_recv_buf: Option<usize>,
pub dscp: Option<u8>,
pub h2_ping_interval: Option<Duration>,
#[cfg(feature = "s2n")]
pub psk: Option<Arc<PskType>>,
#[cfg(feature = "s2n")]
pub s2n_security_policy: Option<S2NPolicy>,
#[cfg(feature = "s2n")]
pub max_blinding_delay: Option<u32>,
// how many concurrent h2 stream are allowed in the same connection
pub max_h2_streams: usize,
pub extra_proxy_headers: BTreeMap<String, Vec<u8>>,
// The list of curve the tls connection should advertise
// if `None`, the default curves will be used
pub curves: Option<&'static str>,
// see ssl_use_second_key_share
pub second_keyshare: bool,
// whether to enable TCP fast open
pub tcp_fast_open: bool,
// use Arc because Clone is required but not allowed in trait object
pub tracer: Option<Tracer>,
// A custom L4 connector to use to establish new L4 connections
pub custom_l4: Option<Arc<dyn L4Connect + Send + Sync>>,
#[derivative(Debug = "ignore")]
pub upstream_tcp_sock_tweak_hook:
Option<Arc<dyn Fn(&TcpSocket) -> Result<()> + Send + Sync + 'static>>,
#[derivative(Debug = "ignore")]
pub proxy_digest_user_data_hook: Option<ProxyDigestUserDataHook>,
/// Hook that allows returning an optional `SslDigestExtension`.
/// Any returned value will be saved into the `SslDigest`.
///
/// Currently only enabled for openssl variants with meaningful `TlsRef`s.
#[cfg(feature = "openssl_derived")]
#[derivative(Debug = "ignore")]
pub upstream_tls_handshake_complete_hook: Option<HandshakeCompleteHook>,
}
impl PeerOptions {
/// Create a new [`PeerOptions`]
pub fn new() -> Self {
PeerOptions {
bind_to: None,
connection_timeout: None,
total_connection_timeout: None,
read_timeout: None,
idle_timeout: None,
write_timeout: None,
verify_cert: true,
verify_hostname: true,
#[cfg(feature = "s2n")]
use_system_certs: true,
alternative_cn: None,
alpn: ALPN::H1,
ca: None,
tcp_keepalive: None,
tcp_recv_buf: None,
dscp: None,
h2_ping_interval: None,
#[cfg(feature = "s2n")]
psk: None,
#[cfg(feature = "s2n")]
s2n_security_policy: None,
#[cfg(feature = "s2n")]
max_blinding_delay: None,
max_h2_streams: 1,
extra_proxy_headers: BTreeMap::new(),
curves: None,
second_keyshare: true, // default true and noop when not using PQ curves
tcp_fast_open: false,
tracer: None,
custom_l4: None,
upstream_tcp_sock_tweak_hook: None,
proxy_digest_user_data_hook: None,
#[cfg(feature = "openssl_derived")]
upstream_tls_handshake_complete_hook: None,
}
}
/// Set the ALPN according to the `max` and `min` constrains.
pub fn set_http_version(&mut self, max: u8, min: u8) {
self.alpn = ALPN::new(max, min);
}
}
impl Display for PeerOptions {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
if let Some(b) = self.bind_to.as_ref() {
write!(f, "bind_to: {:?},", b)?;
}
if let Some(t) = self.connection_timeout {
write!(f, "conn_timeout: {:?},", t)?;
}
if let Some(t) = self.total_connection_timeout {
write!(f, "total_conn_timeout: {:?},", t)?;
}
if self.verify_cert {
write!(f, "verify_cert: true,")?;
}
if self.verify_hostname {
write!(f, "verify_hostname: true,")?;
}
#[cfg(feature = "s2n")]
if self.use_system_certs {
write!(f, "use_system_certs: true,")?;
}
if let Some(cn) = &self.alternative_cn {
write!(f, "alt_cn: {},", cn)?;
}
write!(f, "alpn: {},", self.alpn)?;
if let Some(cas) = &self.ca {
for ca in cas.iter() {
write!(
f,
"CA: {}, expire: {},",
get_organization_unit(ca).unwrap_or_default(),
ca.not_after()
)?;
}
}
#[cfg(feature = "s2n")]
if let Some(policy) = &self.s2n_security_policy {
write!(f, "s2n_security_policy: {:?}, ", policy)?;
}
#[cfg(feature = "s2n")]
if let Some(psk_config) = &self.psk {
for psk in &psk_config.keys {
write!(
f,
"psk_identity: {}",
String::from_utf8_lossy(psk.identity.as_slice())
)?;
}
}
if let Some(tcp_keepalive) = &self.tcp_keepalive {
write!(f, "tcp_keepalive: {},", tcp_keepalive)?;
}
if let Some(h2_ping_interval) = self.h2_ping_interval {
write!(f, "h2_ping_interval: {:?},", h2_ping_interval)?;
}
Ok(())
}
}
/// A peer representing the remote HTTP server to connect to
#[derive(Debug, Clone)]
pub struct HttpPeer {
pub _address: SocketAddr,
pub scheme: Scheme,
pub sni: String,
pub proxy: Option<Proxy>,
pub client_cert_key: Option<Arc<CertKey>>,
/// a custom field to isolate connection reuse. Requests with different group keys
/// cannot share connections with each other.
pub group_key: u64,
pub options: PeerOptions,
}
impl HttpPeer {
// These methods are pretty ad-hoc
pub fn is_tls(&self) -> bool {
match self.scheme {
Scheme::HTTP => false,
Scheme::HTTPS => true,
}
}
fn new_from_sockaddr(address: SocketAddr, tls: bool, sni: String) -> Self {
HttpPeer {
_address: address,
scheme: Scheme::from_tls_bool(tls),
sni,
proxy: None,
client_cert_key: None,
group_key: 0,
options: PeerOptions::new(),
}
}
/// Create a new [`HttpPeer`] with the given socket address and TLS settings.
pub fn new<A: ToInetSocketAddrs>(address: A, tls: bool, sni: String) -> Self {
let mut addrs_iter = address.to_socket_addrs().unwrap(); //TODO: handle error
let addr = addrs_iter.next().unwrap();
Self::new_from_sockaddr(SocketAddr::Inet(addr), tls, sni)
}
/// Create a new [`HttpPeer`] with the given path to Unix domain socket and TLS settings.
#[cfg(unix)]
pub fn new_uds(path: &str, tls: bool, sni: String) -> Result<Self> {
let addr = SocketAddr::Unix(
UnixSocketAddr::from_pathname(Path::new(path)).or_err(SocketError, "invalid path")?,
);
Ok(Self::new_from_sockaddr(addr, tls, sni))
}
/// Create a new [`HttpPeer`] that uses a proxy to connect to the upstream IP and port
/// combination.
pub fn new_proxy(
next_hop: &str,
ip_addr: IpAddr,
port: u16,
tls: bool,
sni: &str,
headers: BTreeMap<String, Vec<u8>>,
) -> Self {
HttpPeer {
_address: SocketAddr::Inet(InetSocketAddr::new(ip_addr, port)),
scheme: Scheme::from_tls_bool(tls),
sni: sni.to_string(),
proxy: Some(Proxy {
next_hop: PathBuf::from(next_hop).into(),
host: ip_addr.to_string(),
port,
headers,
}),
client_cert_key: None,
group_key: 0,
options: PeerOptions::new(),
}
}
fn peer_hash(&self) -> u64 {
let mut hasher = AHasher::default();
self.hash(&mut hasher);
hasher.finish()
}
}
impl Hash for HttpPeer {
fn hash<H: Hasher>(&self, state: &mut H) {
self._address.hash(state);
self.scheme.hash(state);
self.proxy.hash(state);
self.sni.hash(state);
// client cert serial
self.client_cert_key.hash(state);
// origin server cert verification
self.verify_cert().hash(state);
self.verify_hostname().hash(state);
self.alternative_cn().hash(state);
#[cfg(feature = "s2n")]
self.get_psk().hash(state);
self.group_key.hash(state);
// max h2 stream settings
self.options.max_h2_streams.hash(state);
}
}
impl Display for HttpPeer {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(f, "addr: {}, scheme: {}", self._address, self.scheme)?;
if !self.sni.is_empty() {
write!(f, ", sni: {}", self.sni)?;
}
if let Some(p) = self.proxy.as_ref() {
write!(f, ", proxy: {p}")?;
}
if let Some(cert) = &self.client_cert_key {
write!(f, ", client cert: {}", cert)?;
}
Ok(())
}
}
impl Peer for HttpPeer {
fn address(&self) -> &SocketAddr {
&self._address
}
fn tls(&self) -> bool {
self.is_tls()
}
fn sni(&self) -> &str {
&self.sni
}
// TODO: change connection pool to accept u64 instead of String
fn reuse_hash(&self) -> u64 {
self.peer_hash()
}
fn get_peer_options(&self) -> Option<&PeerOptions> {
Some(&self.options)
}
fn get_mut_peer_options(&mut self) -> Option<&mut PeerOptions> {
Some(&mut self.options)
}
fn get_proxy(&self) -> Option<&Proxy> {
self.proxy.as_ref()
}
#[cfg(unix)]
fn matches_fd<V: AsRawFd>(&self, fd: V) -> bool {
if let Some(proxy) = self.get_proxy() {
proxy.next_hop.check_fd_match(fd)
} else {
self.address().check_fd_match(fd)
}
}
#[cfg(windows)]
fn matches_sock<V: AsRawSocket>(&self, sock: V) -> bool {
use crate::protocols::ConnSockReusable;
if let Some(proxy) = self.get_proxy() {
panic!("windows do not support peers with proxy")
} else {
self.address().check_sock_match(sock)
}
}
fn get_client_cert_key(&self) -> Option<&Arc<CertKey>> {
self.client_cert_key.as_ref()
}
fn get_tracer(&self) -> Option<Tracer> {
self.options.tracer.clone()
}
}
/// The proxy settings to connect to the remote server, CONNECT only for now
#[derive(Debug, Hash, Clone)]
pub struct Proxy {
pub next_hop: Box<Path>, // for now this will be the path to the UDS
pub host: String, // the proxied host. Could be either IP addr or hostname.
pub port: u16, // the port to proxy to
pub headers: BTreeMap<String, Vec<u8>>, // the additional headers to add to CONNECT
}
impl Display for Proxy {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(
f,
"next_hop: {}, host: {}, port: {}",
self.next_hop.display(),
self.host,
self.port
)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/utils/mod.rs | pingora-core/src/utils/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This module contains various types that make it easier to work with bytes and X509
//! certificates.
#[cfg(feature = "any_tls")]
pub mod tls;
#[cfg(not(feature = "any_tls"))]
pub use crate::tls::utils as tls;
use bytes::Bytes;
/// A `BufRef` is a reference to a buffer of bytes. It removes the need for self-referential data
/// structures. It is safe to use as long as the underlying buffer does not get mutated.
///
/// # Panics
///
/// This will panic if an index is out of bounds.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct BufRef(pub usize, pub usize);
impl BufRef {
/// Return a sub-slice of `buf`.
pub fn get<'a>(&self, buf: &'a [u8]) -> &'a [u8] {
&buf[self.0..self.1]
}
/// Return a slice of `buf`. This operation is O(1) and increases the reference count of `buf`.
pub fn get_bytes(&self, buf: &Bytes) -> Bytes {
buf.slice(self.0..self.1)
}
/// Return the size of the slice reference.
pub fn len(&self) -> usize {
self.1 - self.0
}
/// Return true if the length is zero.
pub fn is_empty(&self) -> bool {
self.1 == self.0
}
}
impl BufRef {
/// Initialize a `BufRef` that can reference a slice beginning at index `start` and has a
/// length of `len`.
pub fn new(start: usize, len: usize) -> Self {
BufRef(start, start + len)
}
}
/// A `KVRef` contains a key name and value pair, stored as two [BufRef] types.
#[derive(Clone)]
pub struct KVRef {
name: BufRef,
value: BufRef,
}
impl KVRef {
/// Like [BufRef::get] for the name.
pub fn get_name<'a>(&self, buf: &'a [u8]) -> &'a [u8] {
self.name.get(buf)
}
/// Like [BufRef::get] for the value.
pub fn get_value<'a>(&self, buf: &'a [u8]) -> &'a [u8] {
self.value.get(buf)
}
/// Like [BufRef::get_bytes] for the name.
pub fn get_name_bytes(&self, buf: &Bytes) -> Bytes {
self.name.get_bytes(buf)
}
/// Like [BufRef::get_bytes] for the value.
pub fn get_value_bytes(&self, buf: &Bytes) -> Bytes {
self.value.get_bytes(buf)
}
/// Return a new `KVRef` with name and value start indices and lengths.
pub fn new(name_s: usize, name_len: usize, value_s: usize, value_len: usize) -> Self {
KVRef {
name: BufRef(name_s, name_s + name_len),
value: BufRef(value_s, value_s + value_len),
}
}
/// Return a reference to the value.
pub fn value(&self) -> &BufRef {
&self.value
}
}
/// A [KVRef] which contains empty sub-slices.
pub const EMPTY_KV_REF: KVRef = KVRef {
name: BufRef(0, 0),
value: BufRef(0, 0),
};
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/utils/tls/s2n.rs | pingora-core/src/utils/tls/s2n.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use ouroboros::self_referencing;
use pingora_error::Result;
use std::hash::{Hash, Hasher};
use x509_parser::{
pem::Pem,
prelude::{FromDer, X509Certificate},
};
fn get_organization_serial_x509(
x509cert: &X509Certificate<'_>,
) -> Result<(Option<String>, String)> {
let serial = x509cert.raw_serial_as_string();
Ok((get_organization_x509(x509cert), serial))
}
/// Get the serial number associated with the given certificate
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_serial(x509cert: &WrappedX509) -> Result<String> {
Ok(x509cert.borrow_cert().raw_serial_as_string())
}
/// Return the organization associated with the X509 certificate.
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization(x509cert: &WrappedX509) -> Option<String> {
get_organization_x509(x509cert.borrow_cert())
}
/// Return the organization associated with the X509 certificate.
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization_x509(x509cert: &X509Certificate<'_>) -> Option<String> {
x509cert
.subject
.iter_organization()
.filter_map(|a| a.as_str().ok())
.map(|a| a.to_string())
.reduce(|cur, next| cur + &next)
}
/// Return the organization associated with the X509 certificate (as bytes).
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization_serial_bytes(cert: &[u8]) -> Result<(Option<String>, String)> {
let (_, x509cert) = x509_parser::certificate::X509Certificate::from_der(cert)
.expect("Failed to parse certificate from DER format.");
get_organization_serial_x509(&x509cert)
}
/// Return the organization unit associated with the X509 certificate.
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization_unit(x509cert: &WrappedX509) -> Option<String> {
x509cert
.borrow_cert()
.subject
.iter_organizational_unit()
.filter_map(|a| a.as_str().ok())
.map(|a| a.to_string())
.reduce(|cur, next| cur + &next)
}
/// Get a combination of the common names for the given certificate
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_common_name(x509cert: &WrappedX509) -> Option<String> {
x509cert
.borrow_cert()
.subject
.iter_common_name()
.filter_map(|a| a.as_str().ok())
.map(|a| a.to_string())
.reduce(|cur, next| cur + &next)
}
/// Get the `not_after` field for the valid time period for the given cert
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_not_after(x509cert: &WrappedX509) -> String {
x509cert.borrow_cert().validity.not_after.to_string()
}
/// This type contains a list of one or more certificates and an associated private key. The leaf
/// certificate should always be first.
pub struct CertKey {
key: Vec<u8>,
pem: X509Pem,
}
impl CertKey {
/// Create a new `CertKey` given a list of certificates and a private key.
pub fn new(pem_bytes: Vec<u8>, key: Vec<u8>) -> CertKey {
let pem = X509Pem::new(pem_bytes);
assert!(
!pem.certs.is_empty(),
"expected at least one certificate in PEM"
);
CertKey { key, pem }
}
/// Peek at the leaf certificate.
pub fn leaf(&self) -> &WrappedX509 {
// This is safe due to the assertion in creation of a `CertKey`
&self.pem.certs[0]
}
/// Return the key.
pub fn key(&self) -> &Vec<u8> {
&self.key
}
/// Return a slice of intermediate certificates. An empty slice means there are none.
pub fn intermediates(&self) -> Vec<&WrappedX509> {
self.pem.certs.iter().skip(1).collect()
}
/// Return the organization from the leaf certificate.
pub fn organization(&self) -> Option<String> {
get_organization(self.leaf())
}
/// Return the serial from the leaf certificate.
pub fn serial(&self) -> String {
get_serial(self.leaf()).unwrap()
}
pub fn raw_pem(&self) -> &[u8] {
&self.pem.raw_pem
}
}
#[derive(Debug)]
pub struct X509Pem {
pub raw_pem: Vec<u8>,
pub certs: Vec<WrappedX509>,
}
impl X509Pem {
pub fn new(raw_pem: Vec<u8>) -> Self {
let certs = Pem::iter_from_buffer(&raw_pem)
.map(|part| {
let raw_cert = part.expect("Failed to parse PEM").contents;
WrappedX509::new(raw_cert, parse_x509)
})
.collect();
X509Pem { raw_pem, certs }
}
pub fn iter(&self) -> std::slice::Iter<'_, WrappedX509> {
self.certs.iter()
}
}
fn parse_x509<C>(raw_cert: &C) -> X509Certificate<'_>
where
C: AsRef<[u8]>,
{
X509Certificate::from_der(raw_cert.as_ref())
.expect("Failed to parse certificate from DER format.")
.1
}
#[self_referencing]
#[derive(Debug)]
pub struct WrappedX509 {
raw_cert: Vec<u8>,
#[borrows(raw_cert)]
#[covariant]
cert: X509Certificate<'this>,
}
impl WrappedX509 {
pub fn not_after(&self) -> String {
self.borrow_cert().validity.not_after.to_string()
}
}
// hide private key
impl std::fmt::Debug for CertKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CertKey")
.field("X509", &self.leaf())
.finish()
}
}
impl std::fmt::Display for CertKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let leaf = self.leaf();
if let Some(cn) = get_common_name(leaf) {
// Write CN if it exists
write!(f, "CN: {cn},")?;
} else if let Some(org_unit) = get_organization_unit(leaf) {
// CA cert might not have CN, so print its unit name instead
write!(f, "Org Unit: {org_unit},")?;
}
write!(f, ", expire: {}", get_not_after(leaf))
// ignore the details of the private key
}
}
impl Hash for X509Pem {
fn hash<H: Hasher>(&self, state: &mut H) {
for certificate in &self.certs {
if let Ok(serial) = get_serial(certificate) {
serial.hash(state)
}
}
}
}
impl Hash for CertKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.pem.hash(state)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/utils/tls/rustls.rs | pingora-core/src/utils/tls/rustls.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use ouroboros::self_referencing;
use pingora_error::Result;
use pingora_rustls::CertificateDer;
use std::hash::{Hash, Hasher};
use x509_parser::prelude::{FromDer, X509Certificate};
/// Get the organization and serial number associated with the given certificate
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization_serial(x509cert: &WrappedX509) -> Result<(Option<String>, String)> {
let serial = get_serial(x509cert)?;
Ok((get_organization(x509cert), serial))
}
fn get_organization_serial_x509(
x509cert: &X509Certificate<'_>,
) -> Result<(Option<String>, String)> {
let serial = x509cert.raw_serial_as_string();
Ok((get_organization_x509(x509cert), serial))
}
/// Get the serial number associated with the given certificate
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_serial(x509cert: &WrappedX509) -> Result<String> {
Ok(x509cert.borrow_cert().raw_serial_as_string())
}
/// Return the organization associated with the X509 certificate.
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization(x509cert: &WrappedX509) -> Option<String> {
get_organization_x509(x509cert.borrow_cert())
}
/// Return the organization associated with the X509 certificate.
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization_x509(x509cert: &X509Certificate<'_>) -> Option<String> {
x509cert
.subject
.iter_organization()
.filter_map(|a| a.as_str().ok())
.map(|a| a.to_string())
.reduce(|cur, next| cur + &next)
}
/// Return the organization associated with the X509 certificate (as bytes).
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization_serial_bytes(cert: &[u8]) -> Result<(Option<String>, String)> {
let (_, x509cert) = x509_parser::certificate::X509Certificate::from_der(cert)
.expect("Failed to parse certificate from DER format.");
get_organization_serial_x509(&x509cert)
}
/// Return the organization unit associated with the X509 certificate.
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_organization_unit(x509cert: &WrappedX509) -> Option<String> {
x509cert
.borrow_cert()
.subject
.iter_organizational_unit()
.filter_map(|a| a.as_str().ok())
.map(|a| a.to_string())
.reduce(|cur, next| cur + &next)
}
/// Get a combination of the common names for the given certificate
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_common_name(x509cert: &WrappedX509) -> Option<String> {
x509cert
.borrow_cert()
.subject
.iter_common_name()
.filter_map(|a| a.as_str().ok())
.map(|a| a.to_string())
.reduce(|cur, next| cur + &next)
}
/// Get the `not_after` field for the valid time period for the given cert
/// see https://en.wikipedia.org/wiki/X.509#Structure_of_a_certificate
pub fn get_not_after(x509cert: &WrappedX509) -> String {
x509cert.borrow_cert().validity.not_after.to_string()
}
/// This type contains a list of one or more certificates and an associated private key. The leaf
/// certificate should always be first.
pub struct CertKey {
key: Vec<u8>,
certificates: Vec<WrappedX509>,
}
#[self_referencing]
#[derive(Debug)]
pub struct WrappedX509 {
raw_cert: Vec<u8>,
#[borrows(raw_cert)]
#[covariant]
cert: X509Certificate<'this>,
}
fn parse_x509<C>(raw_cert: &C) -> X509Certificate<'_>
where
C: AsRef<[u8]>,
{
X509Certificate::from_der(raw_cert.as_ref())
.expect("Failed to parse certificate from DER format.")
.1
}
impl Clone for CertKey {
fn clone(&self) -> Self {
CertKey {
key: self.key.clone(),
certificates: self
.certificates
.iter()
.map(|wrapper| WrappedX509::new(wrapper.borrow_raw_cert().clone(), parse_x509))
.collect::<Vec<_>>(),
}
}
}
impl CertKey {
/// Create a new `CertKey` given a list of certificates and a private key.
pub fn new(certificates: Vec<Vec<u8>>, key: Vec<u8>) -> CertKey {
assert!(
!certificates.is_empty() && !certificates.first().unwrap().is_empty(),
"expected a non-empty vector of certificates in CertKey::new"
);
CertKey {
key,
certificates: certificates
.into_iter()
.map(|raw_cert| WrappedX509::new(raw_cert, parse_x509))
.collect::<Vec<_>>(),
}
}
/// Peek at the leaf certificate.
pub fn leaf(&self) -> &WrappedX509 {
// This is safe due to the assertion in creation of a `CertKey`
&self.certificates[0]
}
/// Return the key.
pub fn key(&self) -> &Vec<u8> {
&self.key
}
/// Return a slice of intermediate certificates. An empty slice means there are none.
pub fn intermediates(&self) -> Vec<&WrappedX509> {
self.certificates.iter().skip(1).collect()
}
/// Return the organization from the leaf certificate.
pub fn organization(&self) -> Option<String> {
get_organization(self.leaf())
}
/// Return the serial from the leaf certificate.
pub fn serial(&self) -> String {
get_serial(self.leaf()).unwrap()
}
}
impl WrappedX509 {
pub fn not_after(&self) -> String {
self.borrow_cert().validity.not_after.to_string()
}
}
// hide private key
impl std::fmt::Debug for CertKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CertKey")
.field("X509", &self.leaf())
.finish()
}
}
impl std::fmt::Display for CertKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let leaf = self.leaf();
if let Some(cn) = get_common_name(leaf) {
// Write CN if it exists
write!(f, "CN: {cn},")?;
} else if let Some(org_unit) = get_organization_unit(leaf) {
// CA cert might not have CN, so print its unit name instead
write!(f, "Org Unit: {org_unit},")?;
}
write!(f, ", expire: {}", get_not_after(leaf))
// ignore the details of the private key
}
}
impl Hash for CertKey {
fn hash<H: Hasher>(&self, state: &mut H) {
for certificate in &self.certificates {
if let Ok(serial) = get_serial(certificate) {
serial.hash(state)
}
}
}
}
impl<'a> From<&'a WrappedX509> for CertificateDer<'static> {
fn from(value: &'a WrappedX509) -> Self {
CertificateDer::from(value.borrow_raw_cert().as_slice().to_owned())
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/utils/tls/mod.rs | pingora-core/src/utils/tls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "openssl_derived")]
mod boringssl_openssl;
#[cfg(feature = "openssl_derived")]
pub use boringssl_openssl::*;
#[cfg(feature = "rustls")]
mod rustls;
#[cfg(feature = "rustls")]
pub use rustls::*;
#[cfg(feature = "s2n")]
mod s2n;
#[cfg(feature = "s2n")]
pub use s2n::*;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/utils/tls/boringssl_openssl.rs | pingora-core/src/utils/tls/boringssl_openssl.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::tls::{nid::Nid, pkey::PKey, pkey::Private, x509::X509};
use crate::Result;
use pingora_error::{ErrorType::*, OrErr};
use std::hash::{Hash, Hasher};
fn get_subject_name(cert: &X509, name_type: Nid) -> Option<String> {
cert.subject_name()
.entries_by_nid(name_type)
.next()
.map(|name| {
name.data()
.as_utf8()
.map(|s| s.to_string())
.unwrap_or_default()
})
}
/// Return the organization associated with the X509 certificate.
pub fn get_organization(cert: &X509) -> Option<String> {
get_subject_name(cert, Nid::ORGANIZATIONNAME)
}
/// Return the common name associated with the X509 certificate.
pub fn get_common_name(cert: &X509) -> Option<String> {
get_subject_name(cert, Nid::COMMONNAME)
}
/// Return the common name associated with the X509 certificate.
pub fn get_organization_unit(cert: &X509) -> Option<String> {
get_subject_name(cert, Nid::ORGANIZATIONALUNITNAME)
}
/// Return the serial number associated with the X509 certificate as a hexadecimal value.
pub fn get_serial(cert: &X509) -> Result<String> {
let bn = cert
.serial_number()
.to_bn()
.or_err(InvalidCert, "Invalid serial")?;
let hex = bn.to_hex_str().or_err(InvalidCert, "Invalid serial")?;
let hex_str: &str = hex.as_ref();
Ok(hex_str.to_owned())
}
/// This type contains a list of one or more certificates and an associated private key. The leaf
/// certificate should always be first.
#[derive(Clone)]
pub struct CertKey {
certificates: Vec<X509>,
key: PKey<Private>,
}
impl CertKey {
/// Create a new `CertKey` given a list of certificates and a private key.
pub fn new(certificates: Vec<X509>, key: PKey<Private>) -> CertKey {
assert!(
!certificates.is_empty(),
"expected a non-empty vector of certificates in CertKey::new"
);
CertKey { certificates, key }
}
/// Peek at the leaf certificate.
pub fn leaf(&self) -> &X509 {
// This is safe due to the assertion above.
&self.certificates[0]
}
/// Return the key.
pub fn key(&self) -> &PKey<Private> {
&self.key
}
/// Return a slice of intermediate certificates. An empty slice means there are none.
pub fn intermediates(&self) -> &[X509] {
if self.certificates.len() <= 1 {
return &[];
}
&self.certificates[1..]
}
/// Return the organization from the leaf certificate.
pub fn organization(&self) -> Option<String> {
get_organization(self.leaf())
}
/// Return the serial from the leaf certificate.
pub fn serial(&self) -> Result<String> {
get_serial(self.leaf())
}
}
impl Hash for CertKey {
fn hash<H: Hasher>(&self, state: &mut H) {
for certificate in &self.certificates {
if let Ok(serial) = get_serial(certificate) {
serial.hash(state)
}
}
}
}
// hide private key
impl std::fmt::Debug for CertKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CertKey")
.field("X509", &self.leaf())
.finish()
}
}
impl std::fmt::Display for CertKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let leaf = self.leaf();
if let Some(cn) = get_common_name(leaf) {
// Write CN if it exists
write!(f, "CN: {cn},")?;
} else if let Some(org_unit) = get_organization_unit(leaf) {
// CA cert might not have CN, so print its unit name instead
write!(f, "Org Unit: {org_unit},")?;
}
write!(f, ", expire: {}", leaf.not_after())
// ignore the details of the private key
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/l4.rs | pingora-core/src/connectors/l4.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(unix)]
use crate::protocols::l4::ext::connect_uds;
use crate::protocols::l4::ext::{
connect_with as tcp_connect, set_dscp, set_recv_buf, set_tcp_fastopen_connect,
};
use crate::protocols::l4::socket::SocketAddr;
use crate::protocols::l4::stream::Stream;
use crate::protocols::{GetSocketDigest, SocketDigest};
use crate::upstreams::peer::Peer;
use async_trait::async_trait;
use log::debug;
use pingora_error::{Context, Error, ErrorType::*, OrErr, Result};
use rand::seq::SliceRandom;
use std::net::SocketAddr as InetSocketAddr;
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
#[cfg(windows)]
use std::os::windows::io::AsRawSocket;
/// The interface to establish a L4 connection
#[async_trait]
pub trait Connect: std::fmt::Debug {
async fn connect(&self, addr: &SocketAddr) -> Result<Stream>;
}
/// Settings for binding on connect
#[derive(Clone, Debug, Default)]
pub struct BindTo {
// local ip address
pub addr: Option<InetSocketAddr>,
// port range
port_range: Option<(u16, u16)>,
// whether we fallback and try again on bind errors when a port range is set
fallback: bool,
}
impl BindTo {
/// Sets the port range we will bind to where the first item in the tuple is the lower bound
/// and the second item is the upper bound.
///
/// Note this bind option is only supported on Linux since 6.3, this is a no-op on other systems.
/// To reset the range, pass a `None` or `Some((0,0))`, more information can be found [here](https://man7.org/linux/man-pages/man7/ip.7.html)
pub fn set_port_range(&mut self, range: Option<(u16, u16)>) -> Result<()> {
if range.is_none() && self.port_range.is_none() {
// nothing to do
return Ok(());
}
match range {
// 0,0 is valid for resets
None | Some((0, 0)) => self.port_range = Some((0, 0)),
// set the port range if valid
Some((low, high)) if low > 0 && low < high => {
self.port_range = Some((low, high));
}
_ => return Error::e_explain(SocketError, "invalid port range: {range}"),
}
Ok(())
}
/// Set whether we fallback on no address available if a port range is set
pub fn set_fallback(&mut self, fallback: bool) {
self.fallback = fallback
}
/// Configured bind port range
pub fn port_range(&self) -> Option<(u16, u16)> {
self.port_range
}
/// Whether we attempt to fallback on no address available
pub fn will_fallback(&self) -> bool {
self.fallback && self.port_range.is_some()
}
}
/// Establish a connection (l4) to the given peer using its settings and an optional bind address.
pub(crate) async fn connect<P>(peer: &P, bind_to: Option<BindTo>) -> Result<Stream>
where
P: Peer + Send + Sync,
{
if peer.get_proxy().is_some() {
return proxy_connect(peer)
.await
.err_context(|| format!("Fail to establish CONNECT proxy: {}", peer));
}
let peer_addr = peer.address();
let mut stream: Stream =
if let Some(custom_l4) = peer.get_peer_options().and_then(|o| o.custom_l4.as_ref()) {
custom_l4.connect(peer_addr).await?
} else {
match peer_addr {
SocketAddr::Inet(addr) => {
let connect_future = tcp_connect(addr, bind_to.as_ref(), |socket| {
#[cfg(unix)]
let raw = socket.as_raw_fd();
#[cfg(windows)]
let raw = socket.as_raw_socket();
if peer.tcp_fast_open() {
set_tcp_fastopen_connect(raw)?;
}
if let Some(recv_buf) = peer.tcp_recv_buf() {
debug!("Setting recv buf size");
set_recv_buf(raw, recv_buf)?;
}
if let Some(dscp) = peer.dscp() {
debug!("Setting dscp");
set_dscp(raw, dscp)?;
}
if let Some(tweak_hook) = peer
.get_peer_options()
.and_then(|o| o.upstream_tcp_sock_tweak_hook.clone())
{
tweak_hook(socket)?;
}
Ok(())
});
let conn_res = match peer.connection_timeout() {
Some(t) => pingora_timeout::timeout(t, connect_future)
.await
.explain_err(ConnectTimedout, |_| {
format!("timeout {t:?} connecting to server {peer}")
})?,
None => connect_future.await,
};
match conn_res {
Ok(socket) => {
debug!("connected to new server: {}", peer.address());
Ok(socket.into())
}
Err(e) => {
let c = format!("Fail to connect to {peer}");
match e.etype() {
SocketError | BindError => Error::e_because(InternalError, c, e),
_ => Err(e.more_context(c)),
}
}
}
}
#[cfg(unix)]
SocketAddr::Unix(addr) => {
let connect_future = connect_uds(
addr.as_pathname()
.expect("non-pathname unix sockets not supported as peer"),
);
let conn_res = match peer.connection_timeout() {
Some(t) => pingora_timeout::timeout(t, connect_future)
.await
.explain_err(ConnectTimedout, |_| {
format!("timeout {t:?} connecting to server {peer}")
})?,
None => connect_future.await,
};
match conn_res {
Ok(socket) => {
debug!("connected to new server: {}", peer.address());
Ok(socket.into())
}
Err(e) => {
let c = format!("Fail to connect to {peer}");
match e.etype() {
SocketError | BindError => Error::e_because(InternalError, c, e),
_ => Err(e.more_context(c)),
}
}
}
}
}?
};
let tracer = peer.get_tracer();
if let Some(t) = tracer {
t.0.on_connected();
stream.tracer = Some(t);
}
// settings applied based on stream type
if let Some(ka) = peer.tcp_keepalive() {
stream.set_keepalive(ka)?;
}
stream.set_nodelay()?;
#[cfg(unix)]
let digest = SocketDigest::from_raw_fd(stream.as_raw_fd());
#[cfg(windows)]
let digest = SocketDigest::from_raw_socket(stream.as_raw_socket());
digest
.peer_addr
.set(Some(peer_addr.clone()))
.expect("newly created OnceCell must be empty");
stream.set_socket_digest(digest);
Ok(stream)
}
pub(crate) fn bind_to_random<P: Peer>(
peer: &P,
v4_list: &[InetSocketAddr],
v6_list: &[InetSocketAddr],
) -> Option<BindTo> {
// helper function for randomly picking address
fn bind_to_ips(ips: &[InetSocketAddr]) -> Option<InetSocketAddr> {
match ips.len() {
0 => None,
1 => Some(ips[0]),
_ => {
// pick a random bind ip
ips.choose(&mut rand::thread_rng()).copied()
}
}
}
let mut bind_to = peer.get_peer_options().and_then(|o| o.bind_to.clone());
if bind_to.as_ref().map(|b| b.addr).is_some() {
// already have a bind address selected
return bind_to;
}
let addr = match peer.address() {
SocketAddr::Inet(sockaddr) => match sockaddr {
InetSocketAddr::V4(_) => bind_to_ips(v4_list),
InetSocketAddr::V6(_) => bind_to_ips(v6_list),
},
#[cfg(unix)]
SocketAddr::Unix(_) => None,
};
if addr.is_some() {
if let Some(bind_to) = bind_to.as_mut() {
bind_to.addr = addr;
} else {
bind_to = Some(BindTo {
addr,
..Default::default()
});
}
}
bind_to
}
use crate::protocols::raw_connect;
#[cfg(unix)]
async fn proxy_connect<P: Peer>(peer: &P) -> Result<Stream> {
// safe to unwrap
let proxy = peer.get_proxy().unwrap();
let options = peer.get_peer_options().unwrap();
// combine required and optional headers
let mut headers = proxy
.headers
.iter()
.chain(options.extra_proxy_headers.iter());
// not likely to timeout during connect() to UDS
let stream: Box<Stream> = Box::new(
connect_uds(&proxy.next_hop)
.await
.or_err_with(ConnectError, || {
format!("CONNECT proxy connect() error to {:?}", &proxy.next_hop)
})?
.into(),
);
let req_header = raw_connect::generate_connect_header(&proxy.host, proxy.port, &mut headers)?;
let fut = raw_connect::connect(stream, &req_header, peer);
let (mut stream, digest) = match peer.connection_timeout() {
Some(t) => pingora_timeout::timeout(t, fut)
.await
.explain_err(ConnectTimedout, |_| "establishing CONNECT proxy")?,
None => fut.await,
}
.map_err(|mut e| {
// http protocol may ask to retry if reused client
e.retry.decide_reuse(false);
e
})?;
debug!("CONNECT proxy established: {:?}", proxy);
stream.set_proxy_digest(digest);
let stream = stream.into_any().downcast::<Stream>().unwrap(); // safe, it is Stream from above
Ok(*stream)
}
#[cfg(windows)]
async fn proxy_connect<P: Peer>(peer: &P) -> Result<Stream> {
panic!("peer proxy not supported on windows")
}
#[cfg(test)]
mod tests {
use super::*;
use crate::upstreams::peer::{BasicPeer, HttpPeer, Proxy};
use pingora_error::ErrorType;
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::io::AsyncWriteExt;
#[cfg(unix)]
use tokio::net::UnixListener;
use tokio::time::sleep;
/// Some of the tests below are flaky when making new connections to mock
/// servers. The servers are simple tokio listeners, so failures there are
/// not indicative of real errors. This function will retry the peer/server
/// in increasing intervals until it either succeeds in connecting or a long
/// timeout expires (max 10sec)
#[cfg(unix)]
async fn wait_for_peer<P>(peer: &P)
where
P: Peer + Send + Sync,
{
use ErrorType as E;
let start = Instant::now();
let mut res = connect(peer, None).await;
let mut delay = Duration::from_millis(5);
let max_delay = Duration::from_secs(10);
while start.elapsed() < max_delay {
match &res {
Err(e) if e.etype == E::ConnectRefused => {}
_ => break,
}
sleep(delay).await;
delay *= 2;
res = connect(peer, None).await;
}
}
#[tokio::test]
async fn test_conn_error_refused() {
let peer = BasicPeer::new("127.0.0.1:79"); // hopefully port 79 is not used
let new_session = connect(&peer, None).await;
assert_eq!(new_session.unwrap_err().etype(), &ConnectRefused)
}
// TODO broken on arm64
#[ignore]
#[tokio::test]
async fn test_conn_error_no_route() {
let peer = BasicPeer::new("[::3]:79"); // no route
let new_session = connect(&peer, None).await;
assert_eq!(new_session.unwrap_err().etype(), &ConnectNoRoute)
}
#[tokio::test]
async fn test_conn_error_addr_not_avail() {
let peer = HttpPeer::new("127.0.0.1:121".to_string(), false, "".to_string());
let addr = "192.0.2.2:0".parse().ok();
let bind_to = BindTo {
addr,
..Default::default()
};
let new_session = connect(&peer, Some(bind_to)).await;
assert_eq!(new_session.unwrap_err().etype(), &InternalError)
}
#[tokio::test]
async fn test_conn_error_other() {
let peer = HttpPeer::new("240.0.0.1:80".to_string(), false, "".to_string()); // non localhost
let addr = "127.0.0.1:0".parse().ok();
// create an error: cannot send from src addr: localhost to dst addr: a public IP
let bind_to = BindTo {
addr,
..Default::default()
};
let new_session = connect(&peer, Some(bind_to)).await;
let error = new_session.unwrap_err();
// XXX: some system will allow the socket to bind and connect without error, only to timeout
assert!(
error.etype() == &ConnectError
|| error.etype() == &ConnectTimedout
// The error seen on mac: https://github.com/cloudflare/pingora/pull/679
|| (error.etype() == &InternalError),
"{error:?}"
)
}
#[tokio::test]
async fn test_conn_timeout() {
// 192.0.2.1 is effectively a blackhole
let mut peer = BasicPeer::new("192.0.2.1:79");
peer.options.connection_timeout = Some(std::time::Duration::from_millis(1)); //1ms
let new_session = connect(&peer, None).await;
assert_eq!(new_session.unwrap_err().etype(), &ConnectTimedout)
}
#[tokio::test]
async fn test_tweak_hook() {
const INIT_FLAG: bool = false;
let flag = Arc::new(AtomicBool::new(INIT_FLAG));
let mut peer = BasicPeer::new("1.1.1.1:80");
let move_flag = Arc::clone(&flag);
peer.options.upstream_tcp_sock_tweak_hook = Some(Arc::new(move |_| {
move_flag.fetch_xor(true, Ordering::SeqCst);
Ok(())
}));
connect(&peer, None).await.unwrap();
assert_eq!(!INIT_FLAG, flag.load(Ordering::SeqCst));
}
#[tokio::test]
async fn test_custom_connect() {
#[derive(Debug)]
struct MyL4;
#[async_trait]
impl Connect for MyL4 {
async fn connect(&self, _addr: &SocketAddr) -> Result<Stream> {
tokio::net::TcpStream::connect("1.1.1.1:80")
.await
.map(|s| s.into())
.or_fail()
}
}
// :79 shouldn't be able to be connected to
let mut peer = BasicPeer::new("1.1.1.1:79");
peer.options.custom_l4 = Some(std::sync::Arc::new(MyL4 {}));
let new_session = connect(&peer, None).await;
// but MyL4 connects to :80 instead
assert!(new_session.is_ok());
}
#[cfg(unix)]
#[tokio::test]
async fn test_connect_proxy_fail() {
let mut peer = HttpPeer::new("1.1.1.1:80".to_string(), false, "".to_string());
let mut path = PathBuf::new();
path.push("/tmp/123");
peer.proxy = Some(Proxy {
next_hop: path.into(),
host: "1.1.1.1".into(),
port: 80,
headers: BTreeMap::new(),
});
let new_session = connect(&peer, None).await;
let e = new_session.unwrap_err();
assert_eq!(e.etype(), &ConnectError);
assert!(!e.retry());
}
#[cfg(unix)]
const MOCK_UDS_PATH: &str = "/tmp/test_unix_connect_proxy.sock";
// one-off mock server
#[cfg(unix)]
async fn mock_connect_server() {
let _ = std::fs::remove_file(MOCK_UDS_PATH);
let listener = UnixListener::bind(MOCK_UDS_PATH).unwrap();
if let Ok((mut stream, _addr)) = listener.accept().await {
stream.write_all(b"HTTP/1.1 200 OK\r\n\r\n").await.unwrap();
// wait a bit so that the client can read
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
let _ = std::fs::remove_file(MOCK_UDS_PATH);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_proxy_work() {
tokio::spawn(async {
mock_connect_server().await;
});
// wait for the server to start
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
let mut peer = HttpPeer::new("1.1.1.1:80".to_string(), false, "".to_string());
let mut path = PathBuf::new();
path.push(MOCK_UDS_PATH);
peer.proxy = Some(Proxy {
next_hop: path.into(),
host: "1.1.1.1".into(),
port: 80,
headers: BTreeMap::new(),
});
let new_session = connect(&peer, None).await;
assert!(new_session.is_ok());
}
#[cfg(unix)]
const MOCK_BAD_UDS_PATH: &str = "/tmp/test_unix_bad_connect_proxy.sock";
// one-off mock bad proxy
// closes connection upon accepting
#[cfg(unix)]
async fn mock_connect_bad_server() {
let _ = std::fs::remove_file(MOCK_BAD_UDS_PATH);
let listener = UnixListener::bind(MOCK_BAD_UDS_PATH).unwrap();
if let Ok((mut stream, _addr)) = listener.accept().await {
stream.shutdown().await.unwrap();
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
let _ = std::fs::remove_file(MOCK_BAD_UDS_PATH);
}
#[cfg(unix)]
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_proxy_conn_closed() {
tokio::spawn(async {
mock_connect_bad_server().await;
});
// wait for the server to start
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
let mut peer = HttpPeer::new("1.1.1.1:80".to_string(), false, "".to_string());
let mut path = PathBuf::new();
path.push(MOCK_BAD_UDS_PATH);
peer.proxy = Some(Proxy {
next_hop: path.into(),
host: "1.1.1.1".into(),
port: 80,
headers: BTreeMap::new(),
});
let new_session = connect(&peer, None).await;
let err = new_session.unwrap_err();
assert_eq!(err.etype(), &ConnectionClosed);
assert!(!err.retry());
}
#[cfg(target_os = "linux")]
#[tokio::test(flavor = "multi_thread")]
async fn test_bind_to_port_range_on_connect() {
fn get_ip_local_port_range() -> (u16, u16) {
let path = "/proc/sys/net/ipv4/ip_local_port_range";
let file = std::fs::read_to_string(path).unwrap();
let mut parts = file.split_whitespace();
(
parts.next().unwrap().parse().unwrap(),
parts.next().unwrap().parse().unwrap(),
)
}
// one-off mock server
async fn mock_inet_connect_server() -> u16 {
use tokio::net::TcpListener;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let port = listener.local_addr().unwrap().port();
tokio::spawn(async move {
if let Ok((mut stream, _addr)) = listener.accept().await {
stream.write_all(b"HTTP/1.1 200 OK\r\n\r\n").await.unwrap();
// wait a bit so that the client can read
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
});
port
}
fn in_port_range(session: Stream, lower: u16, upper: u16) -> bool {
let digest = session.get_socket_digest();
let local_addr = digest
.as_ref()
.and_then(|s| s.local_addr())
.unwrap()
.as_inet()
.unwrap();
// assert range
local_addr.port() >= lower && local_addr.port() <= upper
}
let port = mock_inet_connect_server().await;
// need to read /proc/sys/net/ipv4/ip_local_port_range for this test to work
// IP_LOCAL_PORT_RANGE clamp only works on ports in /proc/sys/net/ipv4/ip_local_port_range
let (low, _) = get_ip_local_port_range();
let high = low + 1;
let peer = HttpPeer::new(format!("127.0.0.1:{port}"), false, "".to_string());
let mut bind_to = BindTo {
addr: "127.0.0.1:0".parse().ok(),
..Default::default()
};
// wait for the server to start
wait_for_peer(&peer).await;
bind_to.set_port_range(Some((low, high))).unwrap();
let mut success_count = 0;
let mut address_unavailable_count = 0;
// Issue a bunch of requests at once and ensure that all successful
// requests have ports in the right range and that there is at least
// one address-unavailable error because we are restricting the number
// of ports so heavily
for _ in 0..10 {
match connect(&peer, Some(bind_to.clone())).await {
Ok(session) => {
assert!(in_port_range(session, low, high));
success_count += 1;
}
Err(e) if format!("{e:?}").contains("AddrNotAvailable") => {
address_unavailable_count += 1;
}
Err(e) => {
panic!("Unexpected error {e:?}")
}
}
}
assert!(address_unavailable_count > 0);
assert!(success_count >= (high - low));
// enable fallback, assert not in port range but successful
bind_to.set_fallback(true);
let session4 = connect(&peer, Some(bind_to.clone())).await.unwrap();
assert!(!in_port_range(session4, low, high));
// works without bind IP, shift up to use new ports
let low = low + 2;
let high = low + 1;
let mut bind_to = BindTo::default();
bind_to.set_port_range(Some((low, high))).unwrap();
let session5 = connect(&peer, Some(bind_to.clone())).await.unwrap();
assert!(in_port_range(session5, low, high));
}
#[test]
fn test_bind_to_port_ranges() {
let addr = "127.0.0.1:0".parse().ok();
let mut bind_to = BindTo {
addr,
..Default::default()
};
// None because the previous value was None
bind_to.set_port_range(None).unwrap();
assert!(bind_to.port_range.is_none());
// zeroes are handled
bind_to.set_port_range(Some((0, 0))).unwrap();
assert_eq!(bind_to.port_range, Some((0, 0)));
// zeroes because the previous value was Some
bind_to.set_port_range(None).unwrap();
assert_eq!(bind_to.port_range, Some((0, 0)));
// low > high is error
assert!(bind_to.set_port_range(Some((2000, 1000))).is_err());
// low < high success
bind_to.set_port_range(Some((1000, 2000))).unwrap();
assert_eq!(bind_to.port_range, Some((1000, 2000)));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/mod.rs | pingora-core/src/connectors/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Connecting to servers
pub mod http;
pub mod l4;
mod offload;
#[cfg(feature = "any_tls")]
mod tls;
#[cfg(not(feature = "any_tls"))]
use crate::tls::connectors as tls;
use crate::protocols::Stream;
use crate::server::configuration::ServerConf;
use crate::upstreams::peer::{Peer, ALPN};
pub use l4::Connect as L4Connect;
use l4::{connect as l4_connect, BindTo};
use log::{debug, error, warn};
use offload::OffloadRuntime;
use parking_lot::RwLock;
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use pingora_pool::{ConnectionMeta, ConnectionPool};
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use tls::TlsConnector;
use tokio::sync::Mutex;
/// The options to configure a [TransportConnector]
#[derive(Clone)]
pub struct ConnectorOptions {
/// Path to the CA file used to validate server certs.
///
/// If `None`, the CA in the [default](https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_default_verify_paths.html)
/// locations will be loaded
pub ca_file: Option<String>,
/// The maximum number of unique s2n configs to cache. Creating a new s2n config is an
/// expensive operation, so we cache and re-use config objects with identical configurations.
/// Defaults to a cache size of 10. A value of 0 disables the cache.
///
/// WARNING: Disabling the s2n config cache can result in poor performance
#[cfg(feature = "s2n")]
pub s2n_config_cache_size: Option<usize>,
/// The default client cert and key to use for mTLS
///
/// Each individual connection can use their own cert key to override this.
pub cert_key_file: Option<(String, String)>,
/// When enabled allows TLS keys to be written to a file specified by the SSLKEYLOG
/// env variable. This can be used by tools like Wireshark to decrypt traffic
/// for debugging purposes.
pub debug_ssl_keylog: bool,
/// How many connections to keepalive
pub keepalive_pool_size: usize,
/// Optionally offload the connection establishment to dedicated thread pools
///
/// TCP and TLS connection establishment can be CPU intensive. Sometimes such tasks can slow
/// down the entire service, which causes timeouts which leads to more connections which
/// snowballs the issue. Use this option to isolate these CPU intensive tasks from impacting
/// other traffic.
///
/// Syntax: (#pools, #thread in each pool)
pub offload_threadpool: Option<(usize, usize)>,
/// Bind to any of the given source IPv6 addresses
pub bind_to_v4: Vec<SocketAddr>,
/// Bind to any of the given source IPv4 addresses
pub bind_to_v6: Vec<SocketAddr>,
}
impl ConnectorOptions {
/// Derive the [ConnectorOptions] from a [ServerConf]
pub fn from_server_conf(server_conf: &ServerConf) -> Self {
// if both pools and threads are Some(>0)
let offload_threadpool = server_conf
.upstream_connect_offload_threadpools
.zip(server_conf.upstream_connect_offload_thread_per_pool)
.filter(|(pools, threads)| *pools > 0 && *threads > 0);
// create SocketAddrs with port 0 for src addr bind
let bind_to_v4 = server_conf
.client_bind_to_ipv4
.iter()
.map(|v4| {
let ip = v4.parse().unwrap();
SocketAddr::new(ip, 0)
})
.collect();
let bind_to_v6 = server_conf
.client_bind_to_ipv6
.iter()
.map(|v6| {
let ip = v6.parse().unwrap();
SocketAddr::new(ip, 0)
})
.collect();
ConnectorOptions {
ca_file: server_conf.ca_file.clone(),
cert_key_file: None, // TODO: use it
#[cfg(feature = "s2n")]
s2n_config_cache_size: server_conf.s2n_config_cache_size,
debug_ssl_keylog: server_conf.upstream_debug_ssl_keylog,
keepalive_pool_size: server_conf.upstream_keepalive_pool_size,
offload_threadpool,
bind_to_v4,
bind_to_v6,
}
}
/// Create a new [ConnectorOptions] with the given keepalive pool size
pub fn new(keepalive_pool_size: usize) -> Self {
ConnectorOptions {
ca_file: None,
#[cfg(feature = "s2n")]
s2n_config_cache_size: None,
cert_key_file: None,
debug_ssl_keylog: false,
keepalive_pool_size,
offload_threadpool: None,
bind_to_v4: vec![],
bind_to_v6: vec![],
}
}
}
/// [TransportConnector] provides APIs to connect to servers via TCP or TLS with connection reuse
pub struct TransportConnector {
tls_ctx: tls::Connector,
connection_pool: Arc<ConnectionPool<Arc<Mutex<Stream>>>>,
offload: Option<OffloadRuntime>,
bind_to_v4: Vec<SocketAddr>,
bind_to_v6: Vec<SocketAddr>,
preferred_http_version: PreferredHttpVersion,
}
const DEFAULT_POOL_SIZE: usize = 128;
impl TransportConnector {
/// Create a new [TransportConnector] with the given [ConnectorOptions]
pub fn new(mut options: Option<ConnectorOptions>) -> Self {
let pool_size = options
.as_ref()
.map_or(DEFAULT_POOL_SIZE, |c| c.keepalive_pool_size);
// Take the offloading setting there because this layer has implement offloading,
// so no need for stacks at lower layer to offload again.
let offload = options.as_mut().and_then(|o| o.offload_threadpool.take());
let bind_to_v4 = options
.as_ref()
.map_or_else(Vec::new, |o| o.bind_to_v4.clone());
let bind_to_v6 = options
.as_ref()
.map_or_else(Vec::new, |o| o.bind_to_v6.clone());
TransportConnector {
tls_ctx: tls::Connector::new(options),
connection_pool: Arc::new(ConnectionPool::new(pool_size)),
offload: offload.map(|v| OffloadRuntime::new(v.0, v.1)),
bind_to_v4,
bind_to_v6,
preferred_http_version: PreferredHttpVersion::new(),
}
}
/// Connect to the given server [Peer]
///
/// No connection is reused.
pub async fn new_stream<P: Peer + Send + Sync + 'static>(&self, peer: &P) -> Result<Stream> {
let rt = self
.offload
.as_ref()
.map(|o| o.get_runtime(peer.reuse_hash()));
let bind_to = l4::bind_to_random(peer, &self.bind_to_v4, &self.bind_to_v6);
let alpn_override = self.preferred_http_version.get(peer);
let stream = if let Some(rt) = rt {
let peer = peer.clone();
let tls_ctx = self.tls_ctx.clone();
rt.spawn(async move { do_connect(&peer, bind_to, alpn_override, &tls_ctx.ctx).await })
.await
.or_err(InternalError, "offload runtime failure")??
} else {
do_connect(peer, bind_to, alpn_override, &self.tls_ctx.ctx).await?
};
Ok(stream)
}
/// Try to find a reusable connection to the given server [Peer]
pub async fn reused_stream<P: Peer + Send + Sync>(&self, peer: &P) -> Option<Stream> {
match self.connection_pool.get(&peer.reuse_hash()) {
Some(s) => {
debug!("find reusable stream, trying to acquire it");
{
let _ = s.lock().await;
} // wait for the idle poll to release it
match Arc::try_unwrap(s) {
Ok(l) => {
let mut stream = l.into_inner();
// test_reusable_stream: we assume server would never actively send data
// first on an idle stream.
#[cfg(unix)]
if peer.matches_fd(stream.id()) && test_reusable_stream(&mut stream) {
Some(stream)
} else {
None
}
#[cfg(windows)]
{
use std::os::windows::io::{AsRawSocket, RawSocket};
struct WrappedRawSocket(RawSocket);
impl AsRawSocket for WrappedRawSocket {
fn as_raw_socket(&self) -> RawSocket {
self.0
}
}
if peer.matches_sock(WrappedRawSocket(stream.id() as RawSocket))
&& test_reusable_stream(&mut stream)
{
Some(stream)
} else {
None
}
}
}
Err(_) => {
error!("failed to acquire reusable stream");
None
}
}
}
None => {
debug!("No reusable connection found for {peer}");
None
}
}
}
/// Return the [Stream] to the [TransportConnector] for connection reuse.
///
/// Not all TCP/TLS connections can be reused. It is the caller's responsibility to make sure
/// that protocol over the [Stream] supports connection reuse and the [Stream] itself is ready
/// to be reused.
///
/// If a [Stream] is dropped instead of being returned via this function. it will be closed.
pub fn release_stream(
&self,
mut stream: Stream,
key: u64, // usually peer.reuse_hash()
idle_timeout: Option<std::time::Duration>,
) {
if !test_reusable_stream(&mut stream) {
return;
}
let id = stream.id();
let meta = ConnectionMeta::new(key, id);
debug!("Try to keepalive client session");
let stream = Arc::new(Mutex::new(stream));
let locked_stream = stream.clone().try_lock_owned().unwrap(); // safe as we just created it
let (notify_close, watch_use) = self.connection_pool.put(&meta, stream);
let pool = self.connection_pool.clone(); //clone the arc
let rt = pingora_runtime::current_handle();
rt.spawn(async move {
pool.idle_poll(locked_stream, &meta, idle_timeout, notify_close, watch_use)
.await;
});
}
/// Get a stream to the given server [Peer]
///
/// This function will try to find a reusable [Stream] first. If there is none, a new connection
/// will be made to the server.
///
/// The returned boolean will indicate whether the stream is reused.
pub async fn get_stream<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Result<(Stream, bool)> {
let reused_stream = self.reused_stream(peer).await;
if let Some(s) = reused_stream {
Ok((s, true))
} else {
let s = self.new_stream(peer).await?;
Ok((s, false))
}
}
/// Tell the connector to always send h1 for ALPN for the given peer in the future.
pub fn prefer_h1(&self, peer: &impl Peer) {
self.preferred_http_version.add(peer, 1);
}
}
// Perform the actual L4 and tls connection steps while respecting the peer's
// connection timeout if there is one
async fn do_connect<P: Peer + Send + Sync>(
peer: &P,
bind_to: Option<BindTo>,
alpn_override: Option<ALPN>,
tls_ctx: &TlsConnector,
) -> Result<Stream> {
// Create the future that does the connections, but don't evaluate it until
// we decide if we need a timeout or not
let connect_future = do_connect_inner(peer, bind_to, alpn_override, tls_ctx);
match peer.total_connection_timeout() {
Some(t) => match pingora_timeout::timeout(t, connect_future).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ConnectTimedout,
format!("connecting to server {peer}, total-connection timeout {t:?}"),
),
},
None => connect_future.await,
}
}
// Perform the actual L4 and tls connection steps with no timeout
async fn do_connect_inner<P: Peer + Send + Sync>(
peer: &P,
bind_to: Option<BindTo>,
alpn_override: Option<ALPN>,
tls_ctx: &TlsConnector,
) -> Result<Stream> {
let stream = l4_connect(peer, bind_to).await?;
if peer.tls() {
let tls_stream = tls::connect(stream, peer, alpn_override, tls_ctx).await?;
Ok(Box::new(tls_stream))
} else {
Ok(Box::new(stream))
}
}
struct PreferredHttpVersion {
// TODO: shard to avoid the global lock
versions: RwLock<HashMap<u64, u8>>, // <hash of peer, version>
}
// TODO: limit the size of this
impl PreferredHttpVersion {
pub fn new() -> Self {
PreferredHttpVersion {
versions: RwLock::default(),
}
}
pub fn add(&self, peer: &impl Peer, version: u8) {
let key = peer.reuse_hash();
let mut v = self.versions.write();
v.insert(key, version);
}
pub fn get(&self, peer: &impl Peer) -> Option<ALPN> {
let key = peer.reuse_hash();
let v = self.versions.read();
v.get(&key)
.copied()
.map(|v| if v == 1 { ALPN::H1 } else { ALPN::H2H1 })
}
}
use futures::future::FutureExt;
use tokio::io::AsyncReadExt;
/// Test whether a stream is already closed or not reusable (server sent unexpected data)
fn test_reusable_stream(stream: &mut Stream) -> bool {
let mut buf = [0; 1];
// tokio::task::unconstrained because now_or_never may yield None when the future is ready
let result = tokio::task::unconstrained(stream.read(&mut buf[..])).now_or_never();
if let Some(data_result) = result {
match data_result {
Ok(n) => {
if n == 0 {
debug!("Idle connection is closed");
} else {
warn!("Unexpected data read in idle connection");
}
}
Err(e) => {
debug!("Idle connection is broken: {e:?}");
}
}
false
} else {
true
}
}
#[cfg(test)]
#[cfg(feature = "any_tls")]
mod tests {
use pingora_error::ErrorType;
use tls::Connector;
use super::*;
use crate::upstreams::peer::BasicPeer;
use tokio::io::AsyncWriteExt;
#[cfg(unix)]
use tokio::net::UnixListener;
// 192.0.2.1 is effectively a black hole
const BLACK_HOLE: &str = "192.0.2.1:79";
#[tokio::test]
async fn test_connect() {
let connector = TransportConnector::new(None);
let peer = BasicPeer::new("1.1.1.1:80");
// make a new connection to 1.1.1.1
let stream = connector.new_stream(&peer).await.unwrap();
connector.release_stream(stream, peer.reuse_hash(), None);
let (_, reused) = connector.get_stream(&peer).await.unwrap();
assert!(reused);
}
#[tokio::test]
async fn test_connect_tls() {
let connector = TransportConnector::new(None);
let mut peer = BasicPeer::new("1.1.1.1:443");
// BasicPeer will use tls when SNI is set
peer.sni = "one.one.one.one".to_string();
// make a new connection to https://1.1.1.1
let stream = connector.new_stream(&peer).await.unwrap();
connector.release_stream(stream, peer.reuse_hash(), None);
let (_, reused) = connector.get_stream(&peer).await.unwrap();
assert!(reused);
}
#[cfg(unix)]
const MOCK_UDS_PATH: &str = "/tmp/test_unix_transport_connector.sock";
// one-off mock server
#[cfg(unix)]
async fn mock_connect_server() {
let _ = std::fs::remove_file(MOCK_UDS_PATH);
let listener = UnixListener::bind(MOCK_UDS_PATH).unwrap();
if let Ok((mut stream, _addr)) = listener.accept().await {
stream.write_all(b"it works!").await.unwrap();
// wait a bit so that the client can read
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
let _ = std::fs::remove_file(MOCK_UDS_PATH);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_uds() {
tokio::spawn(async {
mock_connect_server().await;
});
// create a new service at /tmp
let connector = TransportConnector::new(None);
let peer = BasicPeer::new_uds(MOCK_UDS_PATH).unwrap();
// make a new connection to mock uds
let mut stream = connector.new_stream(&peer).await.unwrap();
let mut buf = [0; 9];
let _ = stream.read(&mut buf).await.unwrap();
assert_eq!(&buf, b"it works!");
connector.release_stream(stream, peer.reuse_hash(), None);
let (_, reused) = connector.get_stream(&peer).await.unwrap();
assert!(reused);
}
async fn do_test_conn_timeout(conf: Option<ConnectorOptions>) {
let connector = TransportConnector::new(conf);
let mut peer = BasicPeer::new(BLACK_HOLE);
peer.options.connection_timeout = Some(std::time::Duration::from_millis(1));
let stream = connector.new_stream(&peer).await;
match stream {
Ok(_) => panic!("should throw an error"),
Err(e) => assert_eq!(e.etype(), &ConnectTimedout),
}
}
#[tokio::test]
async fn test_conn_timeout() {
do_test_conn_timeout(None).await;
}
#[tokio::test]
async fn test_conn_timeout_with_offload() {
let mut conf = ConnectorOptions::new(8);
conf.offload_threadpool = Some((2, 2));
do_test_conn_timeout(Some(conf)).await;
}
#[tokio::test]
async fn test_connector_bind_to() {
// connect to remote while bind to localhost will fail
let peer = BasicPeer::new("240.0.0.1:80");
let mut conf = ConnectorOptions::new(1);
conf.bind_to_v4.push("127.0.0.1:0".parse().unwrap());
let connector = TransportConnector::new(Some(conf));
let stream = connector.new_stream(&peer).await;
let error = stream.unwrap_err();
// XXX: some systems will allow the socket to bind and connect without error, only to timeout
assert!(error.etype() == &ConnectError || error.etype() == &ConnectTimedout)
}
/// Helper function for testing error handling in the `do_connect` function.
/// This assumes that the connection will fail to on the peer and returns
/// the decomposed error type and message
async fn get_do_connect_failure_with_peer(peer: &BasicPeer) -> (ErrorType, String) {
let tls_connector = Connector::new(None);
let stream = do_connect(peer, None, None, &tls_connector.ctx).await;
match stream {
Ok(_) => panic!("should throw an error"),
Err(e) => (
e.etype().clone(),
e.context
.as_ref()
.map(|ctx| ctx.as_str().to_owned())
.unwrap_or_default(),
),
}
}
#[tokio::test]
async fn test_do_connect_with_total_timeout() {
let mut peer = BasicPeer::new(BLACK_HOLE);
peer.options.total_connection_timeout = Some(std::time::Duration::from_millis(1));
let (etype, context) = get_do_connect_failure_with_peer(&peer).await;
assert_eq!(etype, ConnectTimedout);
assert!(context.contains("total-connection timeout"));
}
#[tokio::test]
async fn test_tls_connect_timeout_supersedes_total() {
let mut peer = BasicPeer::new(BLACK_HOLE);
peer.options.total_connection_timeout = Some(std::time::Duration::from_millis(10));
peer.options.connection_timeout = Some(std::time::Duration::from_millis(1));
let (etype, context) = get_do_connect_failure_with_peer(&peer).await;
assert_eq!(etype, ConnectTimedout);
assert!(!context.contains("total-connection timeout"));
}
#[tokio::test]
async fn test_do_connect_without_total_timeout() {
let peer = BasicPeer::new(BLACK_HOLE);
let (etype, context) = get_do_connect_failure_with_peer(&peer).await;
assert!(etype != ConnectTimedout || !context.contains("total-connection timeout"));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/offload.rs | pingora-core/src/connectors/offload.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use log::debug;
use once_cell::sync::OnceCell;
use rand::Rng;
use tokio::runtime::{Builder, Handle};
use tokio::sync::oneshot::{channel, Sender};
// TODO: use pingora_runtime
// a shared runtime (thread pools)
pub(crate) struct OffloadRuntime {
shards: usize,
thread_per_shard: usize,
// Lazily init the runtimes so that they are created after pingora
// daemonize itself. Otherwise the runtime threads are lost.
pools: OnceCell<Box<[(Handle, Sender<()>)]>>,
}
impl OffloadRuntime {
pub fn new(shards: usize, thread_per_shard: usize) -> Self {
assert!(shards != 0);
assert!(thread_per_shard != 0);
OffloadRuntime {
shards,
thread_per_shard,
pools: OnceCell::new(),
}
}
fn init_pools(&self) -> Box<[(Handle, Sender<()>)]> {
let threads = self.shards * self.thread_per_shard;
let mut pools = Vec::with_capacity(threads);
for _ in 0..threads {
// We use single thread runtimes to reduce the scheduling overhead of multithread
// tokio runtime, which can be 50% of the on CPU time of the runtimes
let rt = Builder::new_current_thread().enable_all().build().unwrap();
let handler = rt.handle().clone();
let (tx, rx) = channel::<()>();
std::thread::Builder::new()
.name("Offload thread".to_string())
.spawn(move || {
debug!("Offload thread started");
// the thread that calls block_on() will drive the runtime
// rx will return when tx is dropped so this runtime and thread will exit
rt.block_on(rx)
})
.unwrap();
pools.push((handler, tx));
}
pools.into_boxed_slice()
}
pub fn get_runtime(&self, hash: u64) -> &Handle {
let mut rng = rand::thread_rng();
// choose a shard based on hash and a random thread with in that shard
// e.g. say thread_per_shard=2, shard 1 thread 1 is 1 * 2 + 1 = 3
// [[th0, th1], [th2, th3], ...]
let shard = hash as usize % self.shards;
let thread_in_shard = rng.gen_range(0..self.thread_per_shard);
let pools = self.pools.get_or_init(|| self.init_pools());
&pools[shard * self.thread_per_shard + thread_in_shard].0
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/tls/mod.rs | pingora-core/src/connectors/tls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "openssl_derived")]
mod boringssl_openssl;
#[cfg(feature = "openssl_derived")]
pub use boringssl_openssl::*;
#[cfg(feature = "s2n")]
mod s2n;
#[cfg(feature = "s2n")]
pub use s2n::*;
#[cfg(feature = "rustls")]
mod rustls;
#[cfg(feature = "rustls")]
pub use rustls::*;
/// OpenSSL considers underscores in hostnames non-compliant.
/// We replace the underscore in the leftmost label as we must support these
/// hostnames for wildcard matches and we have not patched OpenSSL.
///
/// https://github.com/openssl/openssl/issues/12566
///
/// > The labels must follow the rules for ARPANET host names. They must
/// > start with a letter, end with a letter or digit, and have as interior
/// > characters only letters, digits, and hyphen. There are also some
/// > restrictions on the length. Labels must be 63 characters or less.
/// - https://datatracker.ietf.org/doc/html/rfc1034#section-3.5
#[cfg(feature = "any_tls")]
pub fn replace_leftmost_underscore(sni: &str) -> Option<String> {
// wildcard is only leftmost label
if let Some((leftmost, rest)) = sni.split_once('.') {
// if not a subdomain or leftmost does not contain underscore return
if !rest.contains('.') || !leftmost.contains('_') {
return None;
}
// we have a subdomain, replace underscores
let leftmost = leftmost.replace('_', "-");
return Some(format!("{leftmost}.{rest}"));
}
None
}
#[cfg(feature = "any_tls")]
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_replace_leftmost_underscore() {
let none_cases = [
"",
"some",
"some.com",
"1.1.1.1:5050",
"dog.dot.com",
"dog.d_t.com",
"dog.dot.c_m",
"d_g.com",
"_",
"dog.c_m",
];
for case in none_cases {
assert!(replace_leftmost_underscore(case).is_none(), "{}", case);
}
assert_eq!(
Some("bb-b.some.com".to_string()),
replace_leftmost_underscore("bb_b.some.com")
);
assert_eq!(
Some("a-a-a.some.com".to_string()),
replace_leftmost_underscore("a_a_a.some.com")
);
assert_eq!(
Some("-.some.com".to_string()),
replace_leftmost_underscore("_.some.com")
);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/tls/boringssl_openssl/mod.rs | pingora-core/src/connectors/tls/boringssl_openssl/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use log::debug;
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use std::sync::{Arc, Once};
use crate::connectors::tls::replace_leftmost_underscore;
use crate::connectors::ConnectorOptions;
use crate::protocols::tls::client::handshake;
use crate::protocols::tls::SslStream;
use crate::protocols::IO;
use crate::tls::ext::{
add_host, clear_error_stack, ssl_add_chain_cert, ssl_set_groups_list,
ssl_set_renegotiate_mode_freely, ssl_set_verify_cert_store, ssl_use_certificate,
ssl_use_private_key, ssl_use_second_key_share,
};
#[cfg(feature = "boringssl")]
use crate::tls::ssl::SslCurve;
use crate::tls::ssl::{SslConnector, SslFiletype, SslMethod, SslVerifyMode, SslVersion};
use crate::tls::x509::store::X509StoreBuilder;
use crate::upstreams::peer::{Peer, ALPN};
pub type TlsConnector = SslConnector;
const CIPHER_LIST: &str = "AES-128-GCM-SHA256\
:AES-256-GCM-SHA384\
:CHACHA20-POLY1305-SHA256\
:ECDHE-ECDSA-AES128-GCM-SHA256\
:ECDHE-ECDSA-AES256-GCM-SHA384\
:ECDHE-RSA-AES128-GCM-SHA256\
:ECDHE-RSA-AES256-GCM-SHA384\
:ECDHE-RSA-AES128-SHA\
:ECDHE-RSA-AES256-SHA384\
:AES128-GCM-SHA256\
:AES256-GCM-SHA384\
:AES128-SHA\
:AES256-SHA\
:DES-CBC3-SHA";
/**
* Enabled signature algorithms for signing/verification (ECDSA).
* As of 4/10/2023, the only addition to boringssl's defaults is ECDSA_SECP521R1_SHA512.
*/
const SIGALG_LIST: &str = "ECDSA_SECP256R1_SHA256\
:RSA_PSS_RSAE_SHA256\
:RSA_PKCS1_SHA256\
:ECDSA_SECP384R1_SHA384\
:RSA_PSS_RSAE_SHA384\
:RSA_PKCS1_SHA384\
:RSA_PSS_RSAE_SHA512\
:RSA_PKCS1_SHA512\
:RSA_PKCS1_SHA1\
:ECDSA_SECP521R1_SHA512";
/**
* Enabled curves for ECDHE (signature key exchange).
* As of 4/10/2023, the only addition to boringssl's defaults is SECP521R1.
*
* N.B. The ordering of these curves is important. The boringssl library will select the first one
* as a guess when negotiating a handshake with a server using TLSv1.3. We should opt for curves
* that are both computationally cheaper and more supported.
*/
#[cfg(feature = "boringssl")]
const BORINGSSL_CURVE_LIST: &[SslCurve] = &[
SslCurve::X25519,
SslCurve::SECP256R1,
SslCurve::SECP384R1,
SslCurve::SECP521R1,
];
static INIT_CA_ENV: Once = Once::new();
fn init_ssl_cert_env_vars() {
// this sets env vars to pick up the root certs
// it is universal across openssl and boringssl
// safety: although impossible to prove safe we assume it's safe since the call is
// wrapped in a call_once and it's unlikely other threads are reading these vars
INIT_CA_ENV.call_once(|| unsafe { openssl_probe::init_openssl_env_vars() });
}
#[derive(Clone)]
pub struct Connector {
pub(crate) ctx: Arc<SslConnector>, // Arc to support clone
}
impl Connector {
pub fn new(options: Option<ConnectorOptions>) -> Self {
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
// TODO: make these conf
// Set supported ciphers.
builder.set_cipher_list(CIPHER_LIST).unwrap();
// Set supported signature algorithms and ECDH (key exchange) curves.
builder
.set_sigalgs_list(&SIGALG_LIST.to_lowercase())
.unwrap();
#[cfg(feature = "boringssl")]
builder.set_curves(BORINGSSL_CURVE_LIST).unwrap();
builder
.set_max_proto_version(Some(SslVersion::TLS1_3))
.unwrap();
builder
.set_min_proto_version(Some(SslVersion::TLS1))
.unwrap();
if let Some(conf) = options.as_ref() {
if let Some(ca_file_path) = conf.ca_file.as_ref() {
builder.set_ca_file(ca_file_path).unwrap();
} else {
init_ssl_cert_env_vars();
// load from default system wide trust location. (the name is misleading)
builder.set_default_verify_paths().unwrap();
}
if let Some((cert, key)) = conf.cert_key_file.as_ref() {
builder.set_certificate_chain_file(cert).unwrap();
builder.set_private_key_file(key, SslFiletype::PEM).unwrap();
}
if conf.debug_ssl_keylog {
// write TLS keys to file specified by SSLKEYLOGFILE if it exists
if let Some(keylog) = std::env::var_os("SSLKEYLOGFILE").and_then(|path| {
std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(path)
.ok()
}) {
use std::io::Write;
builder.set_keylog_callback(move |_, line| {
let _ = writeln!(&keylog, "{}", line);
});
}
}
} else {
init_ssl_cert_env_vars();
builder.set_default_verify_paths().unwrap();
}
Connector {
ctx: Arc::new(builder.build()),
}
}
}
pub(crate) async fn connect<T, P>(
stream: T,
peer: &P,
alpn_override: Option<ALPN>,
tls_ctx: &SslConnector,
) -> Result<SslStream<T>>
where
T: IO,
P: Peer + Send + Sync,
{
let mut ssl_conf = tls_ctx.configure().unwrap();
ssl_set_renegotiate_mode_freely(&mut ssl_conf);
// Set up CA/verify cert store
// TODO: store X509Store in the peer directly
if let Some(ca_list) = peer.get_ca() {
let mut store_builder = X509StoreBuilder::new().unwrap();
for ca in &***ca_list {
store_builder.add_cert(ca.clone()).unwrap();
}
ssl_set_verify_cert_store(&mut ssl_conf, &store_builder.build())
.or_err(InternalError, "failed to load cert store")?;
}
// Set up client cert/key
if let Some(key_pair) = peer.get_client_cert_key() {
debug!("setting client cert and key");
ssl_use_certificate(&mut ssl_conf, key_pair.leaf())
.or_err(InternalError, "invalid client cert")?;
ssl_use_private_key(&mut ssl_conf, key_pair.key())
.or_err(InternalError, "invalid client key")?;
let intermediates = key_pair.intermediates();
if !intermediates.is_empty() {
debug!("adding intermediate certificates for mTLS chain");
for int in intermediates {
ssl_add_chain_cert(&mut ssl_conf, int)
.or_err(InternalError, "invalid intermediate client cert")?;
}
}
}
if let Some(curve) = peer.get_peer_options().and_then(|o| o.curves) {
ssl_set_groups_list(&mut ssl_conf, curve).or_err(InternalError, "invalid curves")?;
}
// second_keyshare is default true
if !peer.get_peer_options().is_none_or(|o| o.second_keyshare) {
ssl_use_second_key_share(&mut ssl_conf, false);
}
// disable verification if sni does not exist
// XXX: verify on empty string cause null string seg fault
if peer.sni().is_empty() {
ssl_conf.set_use_server_name_indication(false);
/* NOTE: technically we can still verify who signs the cert but turn it off to be
consistent with nginx's behavior */
ssl_conf.set_verify(SslVerifyMode::NONE);
} else if peer.verify_cert() {
if peer.verify_hostname() {
let verify_param = ssl_conf.param_mut();
add_host(verify_param, peer.sni()).or_err(InternalError, "failed to add host")?;
// if sni had underscores in leftmost label replace and add
if let Some(sni_s) = replace_leftmost_underscore(peer.sni()) {
add_host(verify_param, sni_s.as_ref()).unwrap();
}
if let Some(alt_cn) = peer.alternative_cn() {
if !alt_cn.is_empty() {
add_host(verify_param, alt_cn).unwrap();
// if alt_cn had underscores in leftmost label replace and add
if let Some(alt_cn_s) = replace_leftmost_underscore(alt_cn) {
add_host(verify_param, alt_cn_s.as_ref()).unwrap();
}
}
}
}
ssl_conf.set_verify(SslVerifyMode::PEER);
} else {
ssl_conf.set_verify(SslVerifyMode::NONE);
}
/*
We always set set_verify_hostname(false) here because:
- verify case.) otherwise ssl.connect calls X509_VERIFY_PARAM_set1_host
which overrides the names added by add_host. Verify is
essentially on as long as the names are added.
- off case.) the non verify hostname case should have it disabled
*/
ssl_conf.set_verify_hostname(false);
if let Some(alpn) = alpn_override.as_ref().or(peer.get_alpn()) {
ssl_conf.set_alpn_protos(alpn.to_wire_preference()).unwrap();
}
clear_error_stack();
let complete_hook = peer
.get_peer_options()
.and_then(|o| o.upstream_tls_handshake_complete_hook.clone());
let connect_future = handshake(ssl_conf, peer.sni(), stream, complete_hook);
match peer.connection_timeout() {
Some(t) => match pingora_timeout::timeout(t, connect_future).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ConnectTimedout,
format!("connecting to server {}, timeout {:?}", peer, t),
),
},
None => connect_future.await,
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/tls/rustls/mod.rs | pingora-core/src/connectors/tls/rustls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use log::debug;
use pingora_error::{
Error,
ErrorType::{ConnectTimedout, InvalidCert},
OrErr, Result,
};
use pingora_rustls::{
load_ca_file_into_store, load_certs_and_key_files, load_platform_certs_incl_env_into_store,
version, CertificateDer, CertificateError, ClientConfig as RusTlsClientConfig,
DigitallySignedStruct, PrivateKeyDer, RootCertStore, RusTlsError, ServerName, SignatureScheme,
TlsConnector as RusTlsConnector, UnixTime, WebPkiServerVerifier,
};
// Uses custom certificate verification from rustls's 'danger' module.
use pingora_rustls::{
HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier as RusTlsServerCertVerifier,
};
use crate::protocols::tls::{client::handshake, TlsStream};
use crate::{connectors::ConnectorOptions, listeners::ALPN, protocols::IO, upstreams::peer::Peer};
use super::replace_leftmost_underscore;
#[derive(Clone)]
pub struct Connector {
pub ctx: Arc<TlsConnector>,
}
impl Connector {
/// Create a new connector based on the optional configurations. If no
/// configurations are provided, no customized certificates or keys will be
/// used
pub fn new(config_opt: Option<ConnectorOptions>) -> Self {
TlsConnector::build_connector(config_opt).unwrap()
}
}
pub struct TlsConnector {
config: Arc<RusTlsClientConfig>,
ca_certs: Arc<RootCertStore>,
}
impl TlsConnector {
pub(crate) fn build_connector(options: Option<ConnectorOptions>) -> Result<Connector>
where
Self: Sized,
{
// NOTE: Rustls only supports TLS 1.2 & 1.3
// TODO: currently using Rustls defaults
// - support SSLKEYLOGFILE
// - set supported ciphers/algorithms/curves
// - add options for CRL/OCSP validation
let (ca_certs, certs_key) = {
let mut ca_certs = RootCertStore::empty();
let mut certs_key = None;
if let Some(conf) = options.as_ref() {
if let Some(ca_file_path) = conf.ca_file.as_ref() {
load_ca_file_into_store(ca_file_path, &mut ca_certs)?;
} else {
load_platform_certs_incl_env_into_store(&mut ca_certs)?;
}
if let Some((cert, key)) = conf.cert_key_file.as_ref() {
certs_key = load_certs_and_key_files(cert, key)?;
}
// TODO: support SSLKEYLOGFILE
} else {
load_platform_certs_incl_env_into_store(&mut ca_certs)?;
}
(ca_certs, certs_key)
};
// TODO: WebPkiServerVerifier for CRL/OCSP validation
let builder =
RusTlsClientConfig::builder_with_protocol_versions(&[&version::TLS12, &version::TLS13])
.with_root_certificates(ca_certs.clone());
let config = match certs_key {
Some((certs, key)) => {
match builder.with_client_auth_cert(certs.clone(), key.clone_key()) {
Ok(config) => config,
Err(err) => {
// TODO: is there a viable alternative to the panic?
// falling back to no client auth... does not seem to be reasonable.
panic!("Failed to configure client auth cert/key. Error: {}", err);
}
}
}
None => builder.with_no_client_auth(),
};
Ok(Connector {
ctx: Arc::new(TlsConnector {
config: Arc::new(config),
ca_certs: Arc::new(ca_certs),
}),
})
}
}
pub async fn connect<T, P>(
stream: T,
peer: &P,
alpn_override: Option<ALPN>,
tls_ctx: &TlsConnector,
) -> Result<TlsStream<T>>
where
T: IO,
P: Peer + Send + Sync,
{
let config = &tls_ctx.config;
// TODO: setup CA/verify cert store from peer
// peer.get_ca() returns None by default. It must be replaced by the
// implementation of `peer`
let key_pair = peer.get_client_cert_key();
let mut updated_config_opt: Option<RusTlsClientConfig> = match key_pair {
None => None,
Some(key_arc) => {
debug!("setting client cert and key");
let mut cert_chain = vec![];
debug!("adding leaf certificate to mTLS cert chain");
cert_chain.push(key_arc.leaf());
debug!("adding intermediate certificates to mTLS cert chain");
key_arc
.intermediates()
.to_owned()
.iter()
.copied()
.for_each(|i| cert_chain.push(i));
let certs: Vec<CertificateDer> = cert_chain.into_iter().map(|c| c.into()).collect();
let private_key: PrivateKeyDer =
key_arc.key().as_slice().to_owned().try_into().unwrap();
let builder = RusTlsClientConfig::builder_with_protocol_versions(&[
&version::TLS12,
&version::TLS13,
])
.with_root_certificates(Arc::clone(&tls_ctx.ca_certs));
debug!("added root ca certificates");
let updated_config = builder.with_client_auth_cert(certs, private_key).or_err(
InvalidCert,
"Failed to use peer cert/key to update Rustls config",
)?;
Some(updated_config)
}
};
if let Some(alpn) = alpn_override.as_ref().or(peer.get_alpn()) {
let alpn_protocols = alpn.to_wire_protocols();
if let Some(updated_config) = updated_config_opt.as_mut() {
updated_config.alpn_protocols = alpn_protocols;
} else {
let mut updated_config = RusTlsClientConfig::clone(config);
updated_config.alpn_protocols = alpn_protocols;
updated_config_opt = Some(updated_config);
}
}
let mut domain = peer.sni().to_string();
if let Some(updated_config) = updated_config_opt.as_mut() {
let verification_mode = if peer.sni().is_empty() {
updated_config.enable_sni = false;
/* NOTE: technically we can still verify who signs the cert but turn it off to be
consistent with nginx's behavior */
Some(VerificationMode::SkipAll) // disable verification if sni does not exist
} else if !peer.verify_cert() {
Some(VerificationMode::SkipAll)
} else if !peer.verify_hostname() {
Some(VerificationMode::SkipHostname)
} else {
// if sni had underscores in leftmost label replace and add
if let Some(sni_s) = replace_leftmost_underscore(peer.sni()) {
domain = sni_s;
}
None
// to use the custom verifier for the full verify:
// Some(VerificationMode::Full)
};
// Builds the custom_verifier when verification_mode is set.
if let Some(mode) = verification_mode {
let delegate = WebPkiServerVerifier::builder(Arc::clone(&tls_ctx.ca_certs))
.build()
.or_err(InvalidCert, "Failed to build WebPkiServerVerifier")?;
let custom_verifier = Arc::new(CustomServerCertVerifier::new(delegate, mode));
updated_config
.dangerous()
.set_certificate_verifier(custom_verifier);
}
}
// TODO: curve setup from peer
// - second key share from peer, currently only used in boringssl with PQ features
// Patch config for dangerous verifier if needed, but only in test builds.
#[cfg(test)]
if !peer.verify_cert() || !peer.verify_hostname() {
use crate::connectors::http::rustls_no_verify::apply_no_verify;
if let Some(cfg) = updated_config_opt.as_mut() {
apply_no_verify(cfg);
} else {
let mut tmp = RusTlsClientConfig::clone(config);
apply_no_verify(&mut tmp);
updated_config_opt = Some(tmp);
}
}
let tls_conn = if let Some(cfg) = updated_config_opt {
RusTlsConnector::from(Arc::new(cfg))
} else {
RusTlsConnector::from(Arc::clone(config))
};
let connect_future = handshake(&tls_conn, &domain, stream);
match peer.connection_timeout() {
Some(t) => match pingora_timeout::timeout(t, connect_future).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ConnectTimedout,
format!("connecting to server {}, timeout {:?}", peer, t),
),
},
None => connect_future.await,
}
}
#[derive(Debug)]
enum VerificationMode {
SkipHostname,
SkipAll,
Full,
// Note: "Full" Included for completeness, making this verifier self-contained
// and explicit about all possible verification modes, not just exceptions.
}
#[derive(Debug)]
pub struct CustomServerCertVerifier {
delegate: Arc<WebPkiServerVerifier>,
verification_mode: VerificationMode,
}
impl CustomServerCertVerifier {
pub fn new(delegate: Arc<WebPkiServerVerifier>, verification_mode: VerificationMode) -> Self {
Self {
delegate,
verification_mode,
}
}
}
// CustomServerCertVerifier delegates TLS signature verification and allows 3 VerificationMode:
// Full: delegates all verification to the original WebPkiServerVerifier
// SkipHostname: same as "Full" but ignores "NotValidForName" certificate errors
// SkipAll: all certificate verification checks are skipped.
impl RusTlsServerCertVerifier for CustomServerCertVerifier {
fn verify_server_cert(
&self,
_end_entity: &CertificateDer<'_>,
_intermediates: &[CertificateDer<'_>],
_server_name: &ServerName<'_>,
_ocsp: &[u8],
_now: UnixTime,
) -> Result<ServerCertVerified, RusTlsError> {
match self.verification_mode {
VerificationMode::Full => self.delegate.verify_server_cert(
_end_entity,
_intermediates,
_server_name,
_ocsp,
_now,
),
VerificationMode::SkipHostname => {
match self.delegate.verify_server_cert(
_end_entity,
_intermediates,
_server_name,
_ocsp,
_now,
) {
Ok(scv) => Ok(scv),
Err(RusTlsError::InvalidCertificate(cert_error)) => {
if let CertificateError::NotValidForNameContext { .. } = cert_error {
Ok(ServerCertVerified::assertion())
} else {
Err(RusTlsError::InvalidCertificate(cert_error))
}
}
Err(e) => Err(e),
}
}
VerificationMode::SkipAll => Ok(ServerCertVerified::assertion()),
}
}
fn verify_tls12_signature(
&self,
message: &[u8],
cert: &CertificateDer<'_>,
dss: &DigitallySignedStruct,
) -> Result<HandshakeSignatureValid, RusTlsError> {
self.delegate.verify_tls12_signature(message, cert, dss)
}
fn verify_tls13_signature(
&self,
message: &[u8],
cert: &CertificateDer<'_>,
dss: &DigitallySignedStruct,
) -> Result<HandshakeSignatureValid, RusTlsError> {
self.delegate.verify_tls13_signature(message, cert, dss)
}
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
self.delegate.supported_verify_schemes()
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/tls/s2n/mod.rs | pingora-core/src/connectors/tls/s2n/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::hash::{Hash, Hasher};
use std::num::NonZero;
use std::sync::{Arc, Mutex};
use ahash::AHasher;
use lru::LruCache;
use pingora_error::{Error, Result};
use pingora_error::{ErrorType::*, OrErr};
use pingora_s2n::{
load_pem_file, ClientAuthType, Config, IgnoreVerifyHostnameCallback,
TlsConnector as S2NTlsConnector, DEFAULT_TLS13,
};
use crate::utils::tls::{CertKey, X509Pem};
use crate::{
connectors::ConnectorOptions,
listeners::ALPN,
protocols::{
tls::{client::handshake, S2NConnectionBuilder, TlsStream},
IO,
},
upstreams::peer::Peer,
};
const DEFAULT_CONFIG_CACHE_SIZE: NonZero<usize> = NonZero::new(10).unwrap();
#[derive(Clone)]
pub struct Connector {
pub ctx: TlsConnector,
}
impl Connector {
/// Create a new connector based on the optional configurations. If no
/// configurations are provided, no customized certificates or keys will be
/// used
pub fn new(options: Option<ConnectorOptions>) -> Self {
Connector {
ctx: TlsConnector::new(options),
}
}
}
/// Holds default options for configuring a TLS connection and an LRU cache for `s2n_config`.
///
/// In `s2n-tls`, each connection requires an associated `s2n_config`, which is expensive to create.
/// Although `s2n_config` objects can be cheaply cloned, they are immutable once built.
///
/// To avoid the overhead of constructing a new config for every connection, we maintain a cache
/// that stores previously built configs. Configs are retrieved from the cache based on the
/// configuration options used to create them.
#[derive(Clone)]
pub struct TlsConnector {
config_cache: Option<Arc<Mutex<LruCache<u64, Config>>>>,
options: Option<ConnectorOptions>,
}
impl TlsConnector {
pub fn new(options: Option<ConnectorOptions>) -> Self {
TlsConnector {
config_cache: Self::create_config_cache(&options),
options,
}
}
/// Provided with a set of config options, either creates a new s2n config or
/// fetches one from the LRU Cache.
fn load_config(&self, config_options: S2NConfigOptions) -> Result<Config> {
if self.config_cache.is_some() {
let config_hash = config_options.config_hash();
if let Some(config) = self.load_config_from_cache(config_hash) {
return Ok(config);
} else {
let config = create_s2n_config(&self.options, config_options)?;
self.put_config_in_cache(config_hash, config.clone());
return Ok(config);
}
} else {
create_s2n_config(&self.options, config_options)
}
}
fn load_config_from_cache(&self, config_hash: u64) -> Option<Config> {
if let Some(config_cache) = &self.config_cache {
let mut cache = config_cache.lock().unwrap();
cache.get(&config_hash).cloned()
} else {
None
}
}
fn put_config_in_cache(&self, config_hash: u64, config: Config) {
if let Some(config_cache) = &self.config_cache {
let mut cache = config_cache.lock().unwrap();
cache.put(config_hash, config);
}
}
fn create_config_cache(
options: &Option<ConnectorOptions>,
) -> Option<Arc<Mutex<LruCache<u64, Config>>>> {
let mut cache_size = DEFAULT_CONFIG_CACHE_SIZE;
if let Some(opts) = options {
if let Some(cache_size_config) = opts.s2n_config_cache_size {
if cache_size_config <= 0 {
return None;
} else {
cache_size = NonZero::new(cache_size_config).unwrap();
}
}
}
return Some(Arc::new(Mutex::new(LruCache::new(cache_size))));
}
}
pub(crate) async fn connect<T, P>(
stream: T,
peer: &P,
alpn_override: Option<ALPN>,
tls_ctx: &TlsConnector,
) -> Result<TlsStream<T>>
where
T: IO,
P: Peer + Send + Sync,
{
// Default security policy with TLS 1.3 support
// https://aws.github.io/s2n-tls/usage-guide/ch06-security-policies.html
let security_policy = peer.get_s2n_security_policy().unwrap_or(&DEFAULT_TLS13);
let config_options = S2NConfigOptions::from_peer(peer, alpn_override);
let config = tls_ctx.load_config(config_options)?;
let connection_builder = S2NConnectionBuilder {
config: config,
psk_config: peer.get_psk().cloned(),
security_policy: Some(security_policy.clone()),
};
let domain = peer
.alternative_cn()
.map(|s| s.as_str())
.unwrap_or(peer.sni());
let connector = S2NTlsConnector::new(connection_builder);
let connect_future = handshake(&connector, domain, stream);
match peer.connection_timeout() {
Some(t) => match pingora_timeout::timeout(t, connect_future).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ConnectTimedout,
format!("connecting to server {}, timeout {:?}", peer, t),
),
},
None => connect_future.await,
}
}
fn create_s2n_config(
connector_options: &Option<ConnectorOptions>,
config_options: S2NConfigOptions,
) -> Result<Config> {
let mut builder = Config::builder();
if let Some(conf) = connector_options.as_ref() {
if let Some(ca_file_path) = conf.ca_file.as_ref() {
let ca_pem = load_pem_file(&ca_file_path)?;
builder
.trust_pem(&ca_pem)
.or_err(InternalError, "failed to load ca cert")?;
}
if let Some((cert_file, key_file)) = conf.cert_key_file.as_ref() {
let cert = load_pem_file(cert_file)?;
let key = load_pem_file(key_file)?;
builder
.load_pem(&cert, &key)
.or_err(InternalError, "failed to load client cert")?;
builder
.set_client_auth_type(ClientAuthType::Required)
.or_err(InternalError, "failed to load client key")?;
}
}
if let Some(max_blinding_delay) = config_options.max_blinding_delay {
builder
.set_max_blinding_delay(max_blinding_delay)
.or_err(InternalError, "failed to set max blinding delay")?;
}
if let Some(ca) = config_options.ca {
builder
.trust_pem(&ca.raw_pem)
.or_err(InternalError, "invalid peer ca cert")?;
}
if let Some(client_cert_key) = config_options.client_cert_key {
builder
.load_pem(&client_cert_key.raw_pem(), &client_cert_key.key())
.or_err(InternalError, "invalid peer client cert or key")?;
}
if let Some(alpn) = config_options.alpn {
builder
.set_application_protocol_preference(alpn.to_wire_protocols())
.or_err(InternalError, "failed to set peer alpn")?;
}
if !config_options.verify_cert {
// Disabling x509 verification is considered unsafe
unsafe {
builder
.disable_x509_verification()
.or_err(InternalError, "failed to disable certificate verification")?;
}
}
if !config_options.verify_hostname {
// Set verify hostname callback that always returns success
builder
.set_verify_host_callback(IgnoreVerifyHostnameCallback::new())
.or_err(InternalError, "failed to disable hostname verification")?;
}
if !config_options.use_system_certs {
builder.with_system_certs(false).or_err(
InternalError,
"failed to disable system certificate loading",
)?;
}
Ok(builder
.build()
.or_err(InternalError, "failed to build s2n config")?)
}
#[derive(Clone)]
struct S2NConfigOptions {
max_blinding_delay: Option<u32>,
alpn: Option<ALPN>,
verify_cert: bool,
verify_hostname: bool,
use_system_certs: bool,
ca: Option<Arc<X509Pem>>,
client_cert_key: Option<Arc<CertKey>>,
}
impl S2NConfigOptions {
fn from_peer<P>(peer: &P, alpn_override: Option<ALPN>) -> Self
where
P: Peer + Send + Sync,
{
S2NConfigOptions {
max_blinding_delay: peer.get_max_blinding_delay(),
alpn: alpn_override.or(peer.get_alpn().cloned()),
verify_cert: peer.verify_cert(),
verify_hostname: peer.verify_hostname(),
use_system_certs: peer.use_system_certs(),
ca: peer.get_ca().cloned(),
client_cert_key: peer.get_client_cert_key().cloned(),
}
}
fn config_hash(&self) -> u64 {
let mut hasher = AHasher::default();
self.hash(&mut hasher);
hasher.finish()
}
}
impl Hash for S2NConfigOptions {
fn hash<H: Hasher>(&self, state: &mut H) {
self.max_blinding_delay.hash(state);
self.alpn.hash(state);
self.verify_cert.hash(state);
self.verify_hostname.hash(state);
self.use_system_certs.hash(state);
self.ca.hash(state);
self.client_cert_key.hash(state);
}
}
#[cfg(test)]
mod tests {
use std::{fs, sync::Arc};
use crate::{
connectors::tls::{s2n::S2NConfigOptions, TlsConnector},
listeners::ALPN,
utils::tls::{CertKey, X509Pem},
};
const CA_CERT_FILE: &str = "tests/certs/ca.crt";
const ALT_CA_CERT_FILE: &str = "tests/certs/alt-ca.crt";
const CERT_FILE: &str = "tests/certs/server.crt";
const ALT_CERT_FILE: &str = "tests/certs/alt-server.crt";
const KEY_FILE: &str = "tests/certs/server.key";
fn read_file(file: &str) -> Vec<u8> {
fs::read(file).unwrap()
}
fn load_pem_from_file(file: &str) -> X509Pem {
X509Pem::new(read_file(file))
}
fn create_config_options() -> S2NConfigOptions {
S2NConfigOptions {
max_blinding_delay: Some(10),
alpn: Some(ALPN::H1),
verify_cert: true,
verify_hostname: true,
use_system_certs: true,
ca: Some(Arc::new(load_pem_from_file(CA_CERT_FILE))),
client_cert_key: Some(Arc::new(CertKey::new(
read_file(CERT_FILE),
read_file(KEY_FILE),
))),
}
}
#[test]
fn config_cache_hit_identical() {
let connector = TlsConnector::new(None);
let config_options = create_config_options();
let config = connector.load_config(config_options.clone()).unwrap();
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_some());
assert_eq!(config, cached_config.unwrap());
}
#[test]
fn config_cache_miss_max_blinding_delay_changed() {
let connector = TlsConnector::new(None);
let mut config_options = create_config_options();
let _config = connector.load_config(config_options.clone()).unwrap();
config_options.max_blinding_delay = Some(20);
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_none());
}
#[test]
fn config_cache_miss_alpn_changed() {
let connector = TlsConnector::new(None);
let mut config_options = create_config_options();
let _config = connector.load_config(config_options.clone()).unwrap();
config_options.alpn = Some(ALPN::H2H1);
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_none());
}
#[test]
fn config_cache_miss_verify_cert_changed() {
let connector = TlsConnector::new(None);
let mut config_options = create_config_options();
let _config = connector.load_config(config_options.clone()).unwrap();
config_options.verify_cert = false;
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_none());
}
#[test]
fn config_cache_miss_verify_hostname_changed() {
let connector = TlsConnector::new(None);
let mut config_options = create_config_options();
let _config = connector.load_config(config_options.clone()).unwrap();
config_options.verify_hostname = false;
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_none());
}
#[test]
fn config_cache_miss_use_system_certs_changed() {
let connector = TlsConnector::new(None);
let mut config_options = create_config_options();
let _config = connector.load_config(config_options.clone()).unwrap();
config_options.use_system_certs = false;
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_none());
}
#[test]
fn config_cache_miss_ca_changed() {
let connector = TlsConnector::new(None);
let mut config_options = create_config_options();
let _config = connector.load_config(config_options.clone()).unwrap();
config_options.ca = Some(Arc::new(load_pem_from_file(ALT_CA_CERT_FILE)));
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_none());
}
#[test]
fn config_cache_miss_client_cert_key_changed() {
let connector = TlsConnector::new(None);
let mut config_options = create_config_options();
let _config = connector.load_config(config_options.clone()).unwrap();
config_options.client_cert_key = Some(Arc::new(CertKey::new(
read_file(ALT_CERT_FILE),
read_file(KEY_FILE),
)));
let cached_config = connector.load_config_from_cache(config_options.config_hash());
assert!(cached_config.is_none());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/http/v2.rs | pingora-core/src/connectors/http/v2.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::HttpSession;
use crate::connectors::{ConnectorOptions, TransportConnector};
use crate::protocols::http::custom::client::Session;
use crate::protocols::http::v1::client::HttpSession as Http1Session;
use crate::protocols::http::v2::client::{drive_connection, Http2Session};
use crate::protocols::{Digest, Stream, UniqueIDType};
use crate::upstreams::peer::{Peer, ALPN};
use bytes::Bytes;
use h2::client::SendRequest;
use log::debug;
use parking_lot::{Mutex, RwLock};
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use pingora_pool::{ConnectionMeta, ConnectionPool, PoolNode};
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::watch;
struct Stub(SendRequest<Bytes>);
impl Stub {
async fn new_stream(&self) -> Result<SendRequest<Bytes>> {
let send_req = self.0.clone();
send_req
.ready()
.await
.or_err(H2Error, "while creating new stream")
}
}
pub(crate) struct ConnectionRefInner {
connection_stub: Stub,
closed: watch::Receiver<bool>,
ping_timeout_occurred: Arc<AtomicBool>,
id: UniqueIDType,
// max concurrent streams this connection is allowed to create
max_streams: usize,
// how many concurrent streams already active
current_streams: AtomicUsize,
// The connection is gracefully shutting down, no more stream is allowed
shutting_down: AtomicBool,
// because `SendRequest` doesn't actually have access to the underlying Stream,
// we log info about timing and tcp info here.
pub(crate) digest: Digest,
// To serialize certain operations when trying to release the connect back to the pool,
pub(crate) release_lock: Arc<Mutex<()>>,
}
#[derive(Clone)]
pub struct ConnectionRef(Arc<ConnectionRefInner>);
impl ConnectionRef {
pub fn new(
send_req: SendRequest<Bytes>,
closed: watch::Receiver<bool>,
ping_timeout_occurred: Arc<AtomicBool>,
id: UniqueIDType,
max_streams: usize,
digest: Digest,
) -> Self {
ConnectionRef(Arc::new(ConnectionRefInner {
connection_stub: Stub(send_req),
closed,
ping_timeout_occurred,
id,
max_streams,
current_streams: AtomicUsize::new(0),
shutting_down: false.into(),
digest,
release_lock: Arc::new(Mutex::new(())),
}))
}
pub fn more_streams_allowed(&self) -> bool {
let current = self.0.current_streams.load(Ordering::Relaxed);
!self.is_shutting_down()
&& self.0.max_streams > current
&& self.0.connection_stub.0.current_max_send_streams() > current
}
pub fn is_idle(&self) -> bool {
self.0.current_streams.load(Ordering::Relaxed) == 0
}
pub fn release_stream(&self) {
self.0.current_streams.fetch_sub(1, Ordering::SeqCst);
}
pub fn id(&self) -> UniqueIDType {
self.0.id
}
pub fn digest(&self) -> &Digest {
&self.0.digest
}
pub fn digest_mut(&mut self) -> Option<&mut Digest> {
Arc::get_mut(&mut self.0).map(|inner| &mut inner.digest)
}
pub fn ping_timedout(&self) -> bool {
self.0.ping_timeout_occurred.load(Ordering::Relaxed)
}
pub fn is_closed(&self) -> bool {
*self.0.closed.borrow()
}
// different from is_closed, existing streams can still be processed but can no longer create
// new stream.
pub fn is_shutting_down(&self) -> bool {
self.0.shutting_down.load(Ordering::Relaxed)
}
// spawn a stream if more stream is allowed, otherwise return Ok(None)
pub async fn spawn_stream(&self) -> Result<Option<Http2Session>> {
// Atomically check if the current_stream is over the limit
// load(), compare and then fetch_add() cannot guarantee the same
let current_streams = self.0.current_streams.fetch_add(1, Ordering::SeqCst);
if current_streams >= self.0.max_streams {
// already over the limit, reset the counter to the previous value
self.0.current_streams.fetch_sub(1, Ordering::SeqCst);
return Ok(None);
}
match self.0.connection_stub.new_stream().await {
Ok(send_req) => Ok(Some(Http2Session::new(send_req, self.clone()))),
Err(e) => {
// fail to create the stream, reset the counter
self.0.current_streams.fetch_sub(1, Ordering::SeqCst);
// Remote sends GOAWAY(NO_ERROR): graceful shutdown: this connection no longer
// accepts new streams. We can still try to create new connection.
if e.root_cause()
.downcast_ref::<h2::Error>()
.map(|e| {
e.is_go_away() && e.is_remote() && e.reason() == Some(h2::Reason::NO_ERROR)
})
.unwrap_or(false)
{
self.0.shutting_down.store(true, Ordering::Relaxed);
Ok(None)
} else {
Err(e)
}
}
}
}
}
pub struct InUsePool {
// TODO: use pingora hashmap to shard the lock contention
pools: RwLock<HashMap<u64, PoolNode<ConnectionRef>>>,
}
impl InUsePool {
fn new() -> Self {
InUsePool {
pools: RwLock::new(HashMap::new()),
}
}
pub fn insert(&self, reuse_hash: u64, conn: ConnectionRef) {
{
let pools = self.pools.read();
if let Some(pool) = pools.get(&reuse_hash) {
pool.insert(conn.id(), conn);
return;
}
} // drop read lock
let pool = PoolNode::new();
pool.insert(conn.id(), conn);
let mut pools = self.pools.write();
pools.insert(reuse_hash, pool);
}
// retrieve a h2 conn ref to create a new stream
// the caller should return the conn ref to this pool if there are still
// capacity left for more streams
pub fn get(&self, reuse_hash: u64) -> Option<ConnectionRef> {
let pools = self.pools.read();
pools.get(&reuse_hash)?.get_any().map(|v| v.1)
}
// release a h2_stream, this functional will cause an ConnectionRef to be returned (if exist)
// the caller should update the ref and then decide where to put it (in use pool or idle)
pub fn release(&self, reuse_hash: u64, id: UniqueIDType) -> Option<ConnectionRef> {
let pools = self.pools.read();
if let Some(pool) = pools.get(&reuse_hash) {
pool.remove(id)
} else {
None
}
}
}
const DEFAULT_POOL_SIZE: usize = 128;
/// Http2 connector
pub struct Connector {
// just for creating connections, the Stream of h2 should be reused
transport: TransportConnector,
// the h2 connection idle pool
idle_pool: Arc<ConnectionPool<ConnectionRef>>,
// the pool of h2 connections that have ongoing streams
in_use_pool: InUsePool,
}
impl Connector {
/// Create a new [Connector] from the given [ConnectorOptions]
pub fn new(options: Option<ConnectorOptions>) -> Self {
let pool_size = options
.as_ref()
.map_or(DEFAULT_POOL_SIZE, |o| o.keepalive_pool_size);
// connection offload is handled by the [TransportConnector]
Connector {
transport: TransportConnector::new(options),
idle_pool: Arc::new(ConnectionPool::new(pool_size)),
in_use_pool: InUsePool::new(),
}
}
pub fn transport(&self) -> &TransportConnector {
&self.transport
}
pub fn idle_pool(&self) -> &Arc<ConnectionPool<ConnectionRef>> {
&self.idle_pool
}
pub fn in_use_pool(&self) -> &InUsePool {
&self.in_use_pool
}
/// Create a new Http2 connection to the given server
///
/// Either an Http2 or Http1 session can be returned depending on the server's preference.
pub async fn new_http_session<P: Peer + Send + Sync + 'static, C: Session>(
&self,
peer: &P,
) -> Result<HttpSession<C>> {
let stream = self.transport.new_stream(peer).await?;
// check alpn
match stream.selected_alpn_proto() {
Some(ALPN::H2) => { /* continue */ }
Some(_) => {
// H2 not supported
return Ok(HttpSession::H1(Http1Session::new(stream)));
}
None => {
// if tls but no ALPN, default to h1
// else if plaintext and min http version is 1, this is most likely h1
if peer.tls()
|| peer
.get_peer_options()
.is_none_or(|o| o.alpn.get_min_http_version() == 1)
{
return Ok(HttpSession::H1(Http1Session::new(stream)));
}
// else: min http version=H2 over plaintext, there is no ALPN anyways, we trust
// the caller that the server speaks h2c
}
}
let max_h2_stream = peer.get_peer_options().map_or(1, |o| o.max_h2_streams);
let conn = handshake(stream, max_h2_stream, peer.h2_ping_interval()).await?;
let h2_stream = conn
.spawn_stream()
.await?
.expect("newly created connections should have at least one free stream");
if conn.more_streams_allowed() {
self.in_use_pool.insert(peer.reuse_hash(), conn);
}
Ok(HttpSession::H2(h2_stream))
}
/// Try to create a new http2 stream from any existing H2 connection.
///
/// None means there is no "free" connection left.
pub async fn reused_http_session<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Result<Option<Http2Session>> {
// check in use pool first so that we use fewer total connections
// then idle pool
let reuse_hash = peer.reuse_hash();
// NOTE: We grab a conn from the pools, create a new stream and put the conn back if the
// conn has more free streams. During this process another caller could arrive but is not
// able to find the conn even the conn has free stream to use.
// We accept this false negative to keep the implementation simple. This false negative
// makes an actual impact when there are only a few connection.
// Alternative design 1. given each free stream a conn object: a lot of Arc<>
// Alternative design 2. mutex the pool, which creates lock contention when concurrency is high
// Alternative design 3. do not pop conn from the pool so that multiple callers can grab it
// which will cause issue where spawn_stream() could return None because others call it
// first. Thus a caller might have to retry or give up. This issue is more likely to happen
// when concurrency is high.
let maybe_conn = self
.in_use_pool
.get(reuse_hash)
.or_else(|| self.idle_pool.get(&reuse_hash));
if let Some(conn) = maybe_conn {
let h2_stream = conn.spawn_stream().await?;
if conn.more_streams_allowed() {
self.in_use_pool.insert(reuse_hash, conn);
}
Ok(h2_stream)
} else {
Ok(None)
}
}
/// Release a finished h2 stream.
///
/// This function will terminate the [Http2Session]. The corresponding h2 connection will now
/// have one more free stream to use.
///
/// The h2 connection will be closed after `idle_timeout` if it has no active streams.
pub fn release_http_session<P: Peer + Send + Sync + 'static>(
&self,
session: Http2Session,
peer: &P,
idle_timeout: Option<Duration>,
) {
let id = session.conn.id();
let reuse_hash = peer.reuse_hash();
// get a ref to the connection, which we might need below, before dropping the h2
let conn = session.conn();
// The lock here is to make sure that in_use_pool.insert() below cannot be called after
// in_use_pool.release(), which would have put the conn entry in both pools.
// It also makes sure that only one conn will trigger the conn.is_idle() condition, which
// avoids putting the same conn into the idle_pool more than once.
let locked = conn.0.release_lock.lock_arc();
// this drop() will both drop the actual stream and call the conn.release_stream()
drop(session);
// find and remove the conn stored in in_use_pool so that it could be put in the idle pool
// if necessary
let conn = self.in_use_pool.release(reuse_hash, id).unwrap_or(conn);
if conn.is_closed() || conn.is_shutting_down() {
// should never be put back to the pool
return;
}
if conn.is_idle() {
drop(locked);
let meta = ConnectionMeta {
key: reuse_hash,
id,
};
let closed = conn.0.closed.clone();
let (notify_evicted, watch_use) = self.idle_pool.put(&meta, conn);
if let Some(to) = idle_timeout {
let pool = self.idle_pool.clone(); //clone the arc
let rt = pingora_runtime::current_handle();
rt.spawn(async move {
pool.idle_timeout(&meta, to, notify_evicted, closed, watch_use)
.await;
});
}
} else {
self.in_use_pool.insert(reuse_hash, conn);
drop(locked);
}
}
/// Tell the connector to always send h1 for ALPN for the given peer in the future.
pub fn prefer_h1(&self, peer: &impl Peer) {
self.transport.prefer_h1(peer);
}
pub(crate) fn h1_is_preferred(&self, peer: &impl Peer) -> bool {
self.transport
.preferred_http_version
.get(peer)
.is_some_and(|v| matches!(v, ALPN::H1))
}
}
// The h2 library we use has unbounded internal buffering, which will cause excessive memory
// consumption when the downstream is slower than upstream. This window size caps the buffering by
// limiting how much data can be inflight. However, setting this value will also cap the max
// download speed by limiting the bandwidth-delay product of a link.
// Long term, we should advertising large window but shrink it when a small buffer is full.
// 8 Mbytes = 80 Mbytes X 100ms, which should be enough for most links.
const H2_WINDOW_SIZE: u32 = 1 << 23;
pub async fn handshake(
stream: Stream,
max_streams: usize,
h2_ping_interval: Option<Duration>,
) -> Result<ConnectionRef> {
use h2::client::Builder;
use pingora_runtime::current_handle;
// Safe guard: new_http_session() assumes there should be at least one free stream
if max_streams == 0 {
return Error::e_explain(H2Error, "zero max_stream configured");
}
let id = stream.id();
let digest = Digest {
// NOTE: this field is always false because the digest is shared across all streams
// The streams should log their own reuse info
ssl_digest: stream.get_ssl_digest(),
// TODO: log h2 handshake time
timing_digest: stream.get_timing_digest(),
proxy_digest: stream.get_proxy_digest(),
socket_digest: stream.get_socket_digest(),
};
// TODO: make these configurable
let (send_req, connection) = Builder::new()
.enable_push(false)
.initial_max_send_streams(max_streams)
// The limit for the server. Server push is not allowed, so this value doesn't matter
.max_concurrent_streams(1)
.max_frame_size(64 * 1024) // advise server to send larger frames
.initial_window_size(H2_WINDOW_SIZE)
// should this be max_streams * H2_WINDOW_SIZE?
.initial_connection_window_size(H2_WINDOW_SIZE)
.handshake(stream)
.await
.or_err(HandshakeError, "during H2 handshake")?;
debug!("H2 handshake to server done.");
let ping_timeout_occurred = Arc::new(AtomicBool::new(false));
let ping_timeout_clone = ping_timeout_occurred.clone();
let max_allowed_streams = std::cmp::min(max_streams, connection.max_concurrent_send_streams());
// Safe guard: new_http_session() assumes there should be at least one free stream
// The server won't commonly advertise 0 max stream.
if max_allowed_streams == 0 {
return Error::e_explain(H2Error, "zero max_concurrent_send_streams received");
}
let (closed_tx, closed_rx) = watch::channel(false);
current_handle().spawn(async move {
drive_connection(
connection,
id,
closed_tx,
h2_ping_interval,
ping_timeout_clone,
)
.await;
});
Ok(ConnectionRef::new(
send_req,
closed_rx,
ping_timeout_occurred,
id,
max_allowed_streams,
digest,
))
}
// TODO(slava): add custom unit tests
#[cfg(test)]
mod tests {
use super::*;
use crate::upstreams::peer::HttpPeer;
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_connect_h2() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 2);
let h2 = connector
.new_http_session::<HttpPeer, ()>(&peer)
.await
.unwrap();
match h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => assert!(!h2_stream.ping_timedout()),
HttpSession::Custom(_) => panic!("expect h2"),
}
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_connect_h1() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
// a hack to force h1, new_http_session() in the future might validate this setting
peer.options.set_http_version(1, 1);
let h2 = connector
.new_http_session::<HttpPeer, ()>(&peer)
.await
.unwrap();
match h2 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
}
#[tokio::test]
async fn test_connect_h1_plaintext() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 80), false, "".into());
peer.options.set_http_version(2, 1);
let h2 = connector
.new_http_session::<HttpPeer, ()>(&peer)
.await
.unwrap();
match h2 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_h2_single_stream() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 2);
peer.options.max_h2_streams = 1;
let h2 = connector
.new_http_session::<HttpPeer, ()>(&peer)
.await
.unwrap();
let h2_1 = match h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => h2_stream,
HttpSession::Custom(_) => panic!("expect h2"),
};
let id = h2_1.conn.id();
assert!(connector
.reused_http_session(&peer)
.await
.unwrap()
.is_none());
connector.release_http_session(h2_1, &peer, None);
let h2_2 = connector.reused_http_session(&peer).await.unwrap().unwrap();
assert_eq!(id, h2_2.conn.id());
connector.release_http_session(h2_2, &peer, None);
let h2_3 = connector.reused_http_session(&peer).await.unwrap().unwrap();
assert_eq!(id, h2_3.conn.id());
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_h2_multiple_stream() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 2);
peer.options.max_h2_streams = 3;
let h2 = connector
.new_http_session::<HttpPeer, ()>(&peer)
.await
.unwrap();
let h2_1 = match h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => h2_stream,
HttpSession::Custom(_) => panic!("expect h2"),
};
let id = h2_1.conn.id();
let h2_2 = connector.reused_http_session(&peer).await.unwrap().unwrap();
assert_eq!(id, h2_2.conn.id());
let h2_3 = connector.reused_http_session(&peer).await.unwrap().unwrap();
assert_eq!(id, h2_3.conn.id());
// max stream is 3 for now
assert!(connector
.reused_http_session(&peer)
.await
.unwrap()
.is_none());
connector.release_http_session(h2_1, &peer, None);
let h2_4 = connector.reused_http_session(&peer).await.unwrap().unwrap();
assert_eq!(id, h2_4.conn.id());
connector.release_http_session(h2_2, &peer, None);
connector.release_http_session(h2_3, &peer, None);
connector.release_http_session(h2_4, &peer, None);
// all streams are released, now the connection is idle
let h2_5 = connector.reused_http_session(&peer).await.unwrap().unwrap();
assert_eq!(id, h2_5.conn.id());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/http/mod.rs | pingora-core/src/connectors/http/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Connecting to HTTP servers
use crate::connectors::http::custom::Connection;
use crate::connectors::ConnectorOptions;
use crate::listeners::ALPN;
use crate::protocols::http::client::HttpSession;
use crate::protocols::http::v1::client::HttpSession as Http1Session;
use crate::upstreams::peer::Peer;
use pingora_error::Result;
use std::time::Duration;
pub mod custom;
pub mod v1;
pub mod v2;
pub struct Connector<C = ()>
where
C: custom::Connector,
{
h1: v1::Connector,
h2: v2::Connector,
custom: C,
}
impl Connector<()> {
pub fn new(options: Option<ConnectorOptions>) -> Self {
Connector {
h1: v1::Connector::new(options.clone()),
h2: v2::Connector::new(options.clone()),
custom: Default::default(),
}
}
}
impl<C> Connector<C>
where
C: custom::Connector,
{
pub fn new_custom(options: Option<ConnectorOptions>, custom: C) -> Self {
Connector {
h1: v1::Connector::new(options.clone()),
h2: v2::Connector::new(options.clone()),
custom,
}
}
/// Get an [HttpSession] to the given server.
///
/// The second return value indicates whether the session is connected via a reused stream.
pub async fn get_http_session<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Result<(HttpSession<C::Session>, bool)> {
let peer_opts = peer.get_peer_options();
// Switch to custom protocol as early as possible
if peer_opts.is_some_and(|o| matches!(o.alpn, ALPN::Custom(_))) {
// We create the Connector before TLS, so we need to make sure that the server also supports the same custom protocol.
// We will first check for sessions that we can reuse, if not we will create a new one based on the negotiated protocol
// Step 1: Look for reused Custom Session
if let Some(session) = self.custom.reused_http_session(peer).await {
return Ok((HttpSession::Custom(session), true));
}
// Step 2: Check reuse pool for reused H1 session
if let Some(h1) = self.h1.reused_http_session(peer).await {
return Ok((HttpSession::H1(h1), true));
}
// Step 3: Try and create a new Custom session
let (connection, reused) = self.custom.get_http_session(peer).await?;
// We create the Connector before TLS, so we need to make sure that the server also supports the same custom protocol
match connection {
Connection::Session(s) => {
return Ok((HttpSession::Custom(s), reused));
}
// Negotiated ALPN is not custom, create a new H1 session
Connection::Stream(s) => {
return Ok((HttpSession::H1(Http1Session::new(s)), false));
}
}
}
// NOTE: maybe TODO: we do not yet enforce that only TLS traffic can use h2, which is the
// de facto requirement for h2, because non TLS traffic lack the negotiation mechanism.
// We assume no peer option == no ALPN == h1 only
let h1_only = peer
.get_peer_options()
.is_none_or(|o| o.alpn.get_max_http_version() == 1);
if h1_only {
let (h1, reused) = self.h1.get_http_session(peer).await?;
Ok((HttpSession::H1(h1), reused))
} else {
// the peer allows h2, we first check the h2 reuse pool
let reused_h2 = self.h2.reused_http_session(peer).await?;
if let Some(h2) = reused_h2 {
return Ok((HttpSession::H2(h2), true));
}
let h2_only = peer
.get_peer_options()
.is_some_and(|o| o.alpn.get_min_http_version() == 2)
&& !self.h2.h1_is_preferred(peer);
if !h2_only {
// We next check the reuse pool for h1 before creating a new h2 connection.
// This is because the server may not support h2 at all, connections to
// the server could all be h1.
if let Some(h1) = self.h1.reused_http_session(peer).await {
return Ok((HttpSession::H1(h1), true));
}
}
let session = self.h2.new_http_session(peer).await?;
Ok((session, false))
}
}
pub async fn release_http_session<P: Peer + Send + Sync + 'static>(
&self,
session: HttpSession<C::Session>,
peer: &P,
idle_timeout: Option<Duration>,
) {
match session {
HttpSession::H1(h1) => self.h1.release_http_session(h1, peer, idle_timeout).await,
HttpSession::H2(h2) => self.h2.release_http_session(h2, peer, idle_timeout),
HttpSession::Custom(c) => {
self.custom
.release_http_session(c, peer, idle_timeout)
.await;
}
}
}
/// Tell the connector to always send h1 for ALPN for the given peer in the future.
pub fn prefer_h1(&self, peer: &impl Peer) {
self.h2.prefer_h1(peer);
}
}
#[cfg(test)]
#[cfg(feature = "any_tls")]
mod tests {
use super::*;
use crate::connectors::TransportConnector;
use crate::listeners::tls::TlsSettings;
use crate::listeners::{Listeners, TransportStack, ALPN};
use crate::protocols::http::v1::client::HttpSession as Http1Session;
use crate::protocols::tls::CustomALPN;
use crate::upstreams::peer::HttpPeer;
use crate::upstreams::peer::PeerOptions;
use async_trait::async_trait;
use pingora_http::RequestHeader;
use std::sync::Arc;
use std::sync::Mutex;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpListener;
use tokio::task::JoinHandle;
use tokio::time::sleep;
async fn get_http(http: &mut Http1Session, expected_status: u16) {
let mut req = Box::new(RequestHeader::build("GET", b"/", None).unwrap());
req.append_header("Host", "one.one.one.one").unwrap();
http.write_request_header(req).await.unwrap();
http.read_response().await.unwrap();
http.respect_keepalive();
assert_eq!(http.get_status().unwrap(), expected_status);
while http.read_body_bytes().await.unwrap().is_some() {}
}
#[tokio::test]
async fn test_connect_h2() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 2);
let (h2, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
match &h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => assert!(!h2_stream.ping_timedout()),
HttpSession::Custom(_) => panic!("expect h2"),
}
connector.release_http_session(h2, &peer, None).await;
let (h2, reused) = connector.get_http_session(&peer).await.unwrap();
// reused this time
assert!(reused);
match &h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => assert!(!h2_stream.ping_timedout()),
HttpSession::Custom(_) => panic!("expect h2"),
}
}
#[tokio::test]
async fn test_connect_h1() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(1, 1);
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
match &mut h1 {
HttpSession::H1(http) => {
get_http(http, 200).await;
}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
connector.release_http_session(h1, &peer, None).await;
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
// reused this time
assert!(reused);
match &mut h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
}
#[tokio::test]
async fn test_connect_h2_fallback_h1_reuse() {
// this test verify that if the server doesn't support h2, the Connector will reuse the
// h1 session instead.
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
// As it is hard to find a server that support only h1, we use the following hack to trick
// the connector to think the server supports only h1. We force ALPN to use h1 and then
// return the connection to the Connector. And then we use a Peer that allows h2
peer.options.set_http_version(1, 1);
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
match &mut h1 {
HttpSession::H1(http) => {
get_http(http, 200).await;
}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
connector.release_http_session(h1, &peer, None).await;
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 1);
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
// reused this time
assert!(reused);
match &mut h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
}
#[tokio::test]
async fn test_connect_prefer_h1() {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 1);
connector.prefer_h1(&peer);
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
match &mut h1 {
HttpSession::H1(http) => {
get_http(http, 200).await;
}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
connector.release_http_session(h1, &peer, None).await;
peer.options.set_http_version(2, 2);
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
// reused this time
assert!(reused);
match &mut h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
}
// Track the flow of calls when using a custom protocol. For this we need to create a Mock Connector
struct MockConnector {
transport: TransportConnector,
reusable: Arc<Mutex<bool>>, // Mock for tracking reusable sessions
}
#[async_trait]
impl custom::Connector for MockConnector {
type Session = ();
async fn get_http_session<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Result<(Connection<Self::Session>, bool)> {
let (stream, _) = self.transport.get_stream(peer).await?;
match stream.selected_alpn_proto() {
Some(ALPN::Custom(_)) => Ok((custom::Connection::Session(()), false)),
_ => Ok(((custom::Connection::Stream(stream)), false)),
}
}
async fn reused_http_session<P: Peer + Send + Sync + 'static>(
&self,
_peer: &P,
) -> Option<Self::Session> {
let mut flag = self.reusable.lock().unwrap();
if *flag {
*flag = false;
Some(())
} else {
None
}
}
async fn release_http_session<P: Peer + Send + Sync + 'static>(
&self,
_session: Self::Session,
_peer: &P,
_idle_timeout: Option<Duration>,
) {
let mut flag = self.reusable.lock().unwrap();
*flag = true;
}
}
// Finds an available TCP port on localhost for test server setup.
async fn get_available_port() -> u16 {
TcpListener::bind("127.0.0.1:0")
.await
.unwrap()
.local_addr()
.unwrap()
.port()
}
// Creates a test connector for integration/unit tests.
// For rustls, only ConnectorOptions are used here; the actual dangerous verifier is patched in the TLS connector.
fn create_test_connector() -> Connector<MockConnector> {
#[cfg(feature = "rustls")]
let custom_transport = {
let options = ConnectorOptions::new(1);
TransportConnector::new(Some(options))
};
#[cfg(not(feature = "rustls"))]
let custom_transport = TransportConnector::new(None);
Connector {
h1: v1::Connector::new(None),
h2: v2::Connector::new(None),
custom: MockConnector {
transport: custom_transport,
reusable: Arc::new(Mutex::new(false)),
},
}
}
// Creates a test peer that uses a custom ALPN protocol and disables cert/hostname verification for tests.
fn create_peer_with_custom_proto(port: u16, proto: &[u8]) -> HttpPeer {
let mut peer = HttpPeer::new(("127.0.0.1", port), true, "localhost".into());
let mut options = PeerOptions::new();
options.alpn = ALPN::Custom(CustomALPN::new(proto.to_vec()));
// Disable cert verification for this test (self-signed or invalid certs are OK)
options.verify_cert = false;
options.verify_hostname = false;
peer.options = options;
peer
}
async fn build_custom_tls_listener(port: u16, custom_alpn: CustomALPN) -> TransportStack {
let cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key_path = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let addr = format!("127.0.0.1:{}", port);
let mut listeners = Listeners::new();
let mut tls_settings = TlsSettings::intermediate(&cert_path, &key_path).unwrap();
tls_settings.set_alpn(ALPN::Custom(custom_alpn));
listeners.add_tls_with_settings(&addr, None, tls_settings);
listeners
.build(
#[cfg(unix)]
None,
)
.await
.unwrap()
.pop()
.unwrap()
}
// Spawn a simple TLS Server
fn spawn_test_tls_server(listener: TransportStack) -> JoinHandle<()> {
tokio::spawn(async move {
loop {
let stream = match listener.accept().await {
Ok(stream) => stream,
Err(_) => break, // Exit if listener is closed
};
let mut stream = stream.handshake().await.unwrap();
let _ = stream.write_all(b"CUSTOM").await; // Ignore write errors
}
})
}
// Both server and client are using the same custom protocol
#[tokio::test]
async fn test_custom_client_custom_upstream() {
let port = get_available_port().await;
let custom_protocol = b"custom".to_vec();
let listener =
build_custom_tls_listener(port, CustomALPN::new(custom_protocol.clone())).await;
let server_handle = spawn_test_tls_server(listener);
// Wait for server to start up
sleep(Duration::from_millis(100)).await;
let connector = create_test_connector();
let peer = create_peer_with_custom_proto(port, &custom_protocol);
// Check that the agreed ALPN is custom and matches the expected value
if let Ok((stream, reused)) = connector.custom.transport.get_stream(&peer).await {
assert!(!reused);
match stream.selected_alpn_proto() {
Some(ALPN::Custom(protocol)) => {
assert_eq!(
protocol.protocol(),
custom_protocol.as_slice(),
"Negotiated custom ALPN does not match expected value"
);
}
other => panic!("Expected custom ALPN, got {:?}", other),
}
} else {
panic!("Should be able to create a stream");
}
let (custom, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
match custom {
HttpSession::H1(_) => panic!("expect custom"),
HttpSession::H2(_) => panic!("expect custom"),
HttpSession::Custom(_) => {}
}
connector.release_http_session(custom, &peer, None).await;
// Assert it returns a reused custom session this time
let (custom, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(reused);
match custom {
HttpSession::H1(_) => panic!("expect custom"),
HttpSession::H2(_) => panic!("expect custom"),
HttpSession::Custom(_) => {}
}
// Kill the server task
server_handle.abort();
sleep(Duration::from_millis(100)).await;
}
// Both client and server are using custom protocols, but different ones - we should create H1 sessions as fallback.
// For RusTLS if there is no agreed protocol, the handshake directly fails, so this won't work
// TODO: If no ALPN is matched, rustls should return None instead of failing the handshake.
#[cfg(not(feature = "rustls"))]
#[tokio::test]
async fn test_incompatible_custom_client_custom_upstream() {
let port = get_available_port().await;
let custom_protocol = b"custom".to_vec();
let listener =
build_custom_tls_listener(port, CustomALPN::new(b"different_custom".to_vec())).await;
let server_handle = spawn_test_tls_server(listener);
// Wait for server to start up
sleep(Duration::from_millis(100)).await;
let connector = create_test_connector();
let peer = create_peer_with_custom_proto(port, &custom_protocol);
// Verify that there is no agreed ALPN
if let Ok((stream, reused)) = connector.custom.transport.get_stream(&peer).await {
assert!(!reused);
assert!(stream.selected_alpn_proto().is_none());
} else {
panic!("Should be able to create a stream");
}
let (h1, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
match h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
// Not testing session reuse logic here as we haven't implemented it. Next test will test this.
// Kill the server task
server_handle.abort();
sleep(Duration::from_millis(100)).await;
}
// Client thinks server is custom but server is not Custom. Should fallback to H1
#[tokio::test]
async fn test_custom_client_non_custom_upstream() {
let custom_proto = b"custom".to_vec();
let connector = create_test_connector();
// Upstream supports H1 and H2
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
// Client sets upstream ALPN as custom protocol
peer.options.alpn = ALPN::Custom(CustomALPN::new(custom_proto));
// Verify that there is no agreed ALPN
if let Ok((stream, reused)) = connector.custom.transport.get_stream(&peer).await {
assert!(!reused);
assert!(stream.selected_alpn_proto().is_none());
} else {
panic!("Should be able to create a stream");
}
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
// Assert it returns a new H1 session
assert!(!reused);
match &mut h1 {
HttpSession::H1(http) => {
get_http(http, 200).await;
}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
connector.release_http_session(h1, &peer, None).await;
// Assert it returns a reused h1 session this time
let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(reused);
match &mut h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
HttpSession::Custom(_) => panic!("expect h1"),
}
}
}
// Used for disabling certificate/hostname verification in rustls for tests and custom ALPN/self-signed scenarios.
#[cfg(all(test, feature = "rustls"))]
pub mod rustls_no_verify {
use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
use rustls::pki_types::{CertificateDer, ServerName};
use rustls::Error as TLSError;
use std::sync::Arc;
#[derive(Debug)]
pub struct NoCertificateVerification;
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_end_entity: &CertificateDer,
_intermediates: &[CertificateDer],
_server_name: &ServerName,
_scts: &[u8],
_now: rustls::pki_types::UnixTime,
) -> Result<ServerCertVerified, TLSError> {
Ok(ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &CertificateDer,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, TLSError> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &CertificateDer,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, TLSError> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
vec![rustls::SignatureScheme::ECDSA_NISTP256_SHA256]
}
}
pub fn apply_no_verify(config: &mut rustls::ClientConfig) {
config
.dangerous()
.set_certificate_verifier(Arc::new(NoCertificateVerification));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/http/v1.rs | pingora-core/src/connectors/http/v1.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{ConnectorOptions, TransportConnector};
use crate::protocols::http::v1::client::HttpSession;
use crate::upstreams::peer::Peer;
use pingora_error::Result;
use std::time::Duration;
pub struct Connector {
transport: TransportConnector,
}
impl Connector {
pub fn new(options: Option<ConnectorOptions>) -> Self {
Connector {
transport: TransportConnector::new(options),
}
}
pub async fn get_http_session<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Result<(HttpSession, bool)> {
let (stream, reused) = self.transport.get_stream(peer).await?;
let http = HttpSession::new(stream);
Ok((http, reused))
}
pub async fn reused_http_session<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Option<HttpSession> {
self.transport
.reused_stream(peer)
.await
.map(HttpSession::new)
}
pub async fn release_http_session<P: Peer + Send + Sync + 'static>(
&self,
mut session: HttpSession,
peer: &P,
idle_timeout: Option<Duration>,
) {
session.respect_keepalive();
if let Some(stream) = session.reuse().await {
self.transport
.release_stream(stream, peer.reuse_hash(), idle_timeout);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::protocols::l4::socket::SocketAddr;
use crate::upstreams::peer::HttpPeer;
use pingora_http::RequestHeader;
async fn get_http(http: &mut HttpSession, expected_status: u16) {
let mut req = Box::new(RequestHeader::build("GET", b"/", None).unwrap());
req.append_header("Host", "one.one.one.one").unwrap();
http.write_request_header(req).await.unwrap();
http.read_response().await.unwrap();
http.respect_keepalive();
assert_eq!(http.get_status().unwrap(), expected_status);
while http.read_body_bytes().await.unwrap().is_some() {}
}
#[tokio::test]
async fn test_connect() {
let connector = Connector::new(None);
let peer = HttpPeer::new(("1.1.1.1", 80), false, "".into());
// make a new connection to 1.1.1.1
let (http, reused) = connector.get_http_session(&peer).await.unwrap();
let server_addr = http.server_addr().unwrap();
assert_eq!(*server_addr, "1.1.1.1:80".parse::<SocketAddr>().unwrap());
assert!(!reused);
// this http is not even used, so not be able to reuse
connector.release_http_session(http, &peer, None).await;
let (mut http, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
get_http(&mut http, 301).await;
connector.release_http_session(http, &peer, None).await;
let (_, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(reused);
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_connect_tls() {
let connector = Connector::new(None);
let peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
// make a new connection to https://1.1.1.1
let (http, reused) = connector.get_http_session(&peer).await.unwrap();
let server_addr = http.server_addr().unwrap();
assert_eq!(*server_addr, "1.1.1.1:443".parse::<SocketAddr>().unwrap());
assert!(!reused);
// this http is not even used, so not be able to reuse
connector.release_http_session(http, &peer, None).await;
let (mut http, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(!reused);
get_http(&mut http, 200).await;
connector.release_http_session(http, &peer, None).await;
let (_, reused) = connector.get_http_session(&peer).await.unwrap();
assert!(reused);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/connectors/http/custom/mod.rs | pingora-core/src/connectors/http/custom/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use std::time::Duration;
use pingora_error::Result;
use crate::{
protocols::{http::custom::client::Session, Stream},
upstreams::peer::Peer,
};
// Either returns a Custom Session or the Stream for creating a new H1 session as a fallback.
pub enum Connection<S: Session> {
Session(S),
Stream(Stream),
}
#[doc(hidden)]
#[async_trait]
pub trait Connector: Send + Sync + Unpin + 'static {
type Session: Session;
async fn get_http_session<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Result<(Connection<Self::Session>, bool)>;
async fn reused_http_session<P: Peer + Send + Sync + 'static>(
&self,
peer: &P,
) -> Option<Self::Session>;
async fn release_http_session<P: Peer + Send + Sync + 'static>(
&self,
mut session: Self::Session,
peer: &P,
idle_timeout: Option<Duration>,
);
}
#[doc(hidden)]
#[async_trait]
impl Connector for () {
type Session = ();
async fn get_http_session<P: Peer + Send + Sync + 'static>(
&self,
_peer: &P,
) -> Result<(Connection<Self::Session>, bool)> {
unreachable!("connector: get_http_session")
}
async fn reused_http_session<P: Peer + Send + Sync + 'static>(
&self,
_peer: &P,
) -> Option<Self::Session> {
unreachable!("connector: reused_http_session")
}
async fn release_http_session<P: Peer + Send + Sync + 'static>(
&self,
_session: Self::Session,
_peer: &P,
_idle_timeout: Option<Duration>,
) {
unreachable!("connector: release_http_session")
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/raw_connect.rs | pingora-core/src/protocols/raw_connect.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! CONNECT protocol over http 1.1 via raw Unix domain socket
//!
//! This mod implements the most rudimentary CONNECT client over raw stream.
//! The idea is to yield raw stream once the CONNECT handshake is complete
//! so that the protocol encapsulated can use the stream directly.
//! This idea only works for CONNECT over HTTP 1.1 and localhost (or where the server is close by).
use std::any::Any;
use super::http::v1::client::HttpSession;
use super::http::v1::common::*;
use super::Stream;
use bytes::{BufMut, BytesMut};
use http::request::Parts as ReqHeader;
use http::Version;
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use pingora_http::ResponseHeader;
use tokio::io::AsyncWriteExt;
/// Try to establish a CONNECT proxy via the given `stream`.
///
/// `request_header` should include the necessary request headers for the CONNECT protocol.
///
/// When successful, a [`Stream`] will be returned which is the established CONNECT proxy connection.
pub async fn connect<P>(
stream: Stream,
request_header: &ReqHeader,
peer: &P,
) -> Result<(Stream, ProxyDigest)>
where
P: crate::upstreams::peer::Peer,
{
let mut http = HttpSession::new(stream);
// We write to stream directly because HttpSession doesn't write req header in auth form
let to_wire = http_req_header_to_wire_auth_form(request_header);
http.underlying_stream
.write_all(to_wire.as_ref())
.await
.or_err(WriteError, "while writing request headers")?;
http.underlying_stream
.flush()
.await
.or_err(WriteError, "while flushing request headers")?;
// TODO: set http.read_timeout
let resp_header = http.read_resp_header_parts().await?;
Ok((
http.underlying_stream,
validate_connect_response(resp_header, peer, request_header)?,
))
}
/// Generate the CONNECT header for the given destination
pub fn generate_connect_header<'a, H, S>(
host: &str,
port: u16,
headers: H,
) -> Result<Box<ReqHeader>>
where
S: AsRef<[u8]>,
H: Iterator<Item = (S, &'a Vec<u8>)>,
{
// TODO: valid that host doesn't have port
let authority = if host.parse::<std::net::Ipv6Addr>().is_ok() {
format!("[{host}]:{port}")
} else {
format!("{host}:{port}")
};
let req = http::request::Builder::new()
.version(http::Version::HTTP_11)
.method(http::method::Method::CONNECT)
.uri(format!("https://{authority}/")) // scheme doesn't matter
.header(http::header::HOST, &authority);
let (mut req, _) = match req.body(()) {
Ok(r) => r.into_parts(),
Err(e) => {
return Err(e).or_err(InvalidHTTPHeader, "Invalid CONNECT request");
}
};
for (k, v) in headers {
let header_name = http::header::HeaderName::from_bytes(k.as_ref())
.or_err(InvalidHTTPHeader, "Invalid CONNECT request")?;
let header_value = http::header::HeaderValue::from_bytes(v.as_slice())
.or_err(InvalidHTTPHeader, "Invalid CONNECT request")?;
req.headers.insert(header_name, header_value);
}
Ok(Box::new(req))
}
/// The information about the CONNECT proxy.
#[derive(Debug)]
pub struct ProxyDigest {
/// The response header the proxy returns
pub response: Box<ResponseHeader>,
/// Optional arbitrary data.
pub user_data: Option<Box<dyn Any + Send + Sync>>,
}
impl ProxyDigest {
pub fn new(
response: Box<ResponseHeader>,
user_data: Option<Box<dyn Any + Send + Sync>>,
) -> Self {
ProxyDigest {
response,
user_data,
}
}
}
/// The error returned when the CONNECT proxy fails to establish.
#[derive(Debug)]
pub struct ConnectProxyError {
/// The response header the proxy returns
pub response: Box<ResponseHeader>,
}
impl ConnectProxyError {
pub fn boxed_new(response: Box<ResponseHeader>) -> Box<Self> {
Box::new(ConnectProxyError { response })
}
}
impl std::fmt::Display for ConnectProxyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
const PROXY_STATUS: &str = "proxy-status";
let reason = self
.response
.headers
.get(PROXY_STATUS)
.and_then(|s| s.to_str().ok())
.unwrap_or("missing proxy-status header value");
write!(
f,
"Failed CONNECT Response: status {}, proxy-status {reason}",
&self.response.status
)
}
}
impl std::error::Error for ConnectProxyError {}
#[inline]
fn http_req_header_to_wire_auth_form(req: &ReqHeader) -> BytesMut {
let mut buf = BytesMut::with_capacity(512);
// Request-Line
let method = req.method.as_str().as_bytes();
buf.put_slice(method);
buf.put_u8(b' ');
// NOTE: CONNECT doesn't need URI path so we just skip that
if let Some(path) = req.uri.authority() {
buf.put_slice(path.as_str().as_bytes());
}
buf.put_u8(b' ');
let version = match req.version {
Version::HTTP_09 => "HTTP/0.9",
Version::HTTP_10 => "HTTP/1.0",
Version::HTTP_11 => "HTTP/1.1",
_ => "HTTP/0.9",
};
buf.put_slice(version.as_bytes());
buf.put_slice(CRLF);
// headers
let headers = &req.headers;
for (key, value) in headers.iter() {
buf.put_slice(key.as_ref());
buf.put_slice(HEADER_KV_DELIMITER);
buf.put_slice(value.as_ref());
buf.put_slice(CRLF);
}
buf.put_slice(CRLF);
buf
}
#[inline]
fn validate_connect_response<P>(
resp: Box<ResponseHeader>,
peer: &P,
req: &ReqHeader,
) -> Result<ProxyDigest>
where
P: crate::upstreams::peer::Peer,
{
if !resp.status.is_success() {
return Error::e_because(
ConnectProxyFailure,
"None 2xx code",
ConnectProxyError::boxed_new(resp),
);
}
// Checking Content-Length and Transfer-Encoding is optional because we already ignore them.
// We choose to do so because we want to be strict for internal use of CONNECT.
// Ignore Content-Length header because our internal CONNECT server is coded to send it.
if resp.headers.get(http::header::TRANSFER_ENCODING).is_some() {
return Error::e_because(
ConnectProxyFailure,
"Invalid Transfer-Encoding presents",
ConnectProxyError::boxed_new(resp),
);
}
let user_data = peer
.proxy_digest_user_data_hook()
.and_then(|hook| hook(req, &resp));
Ok(ProxyDigest::new(resp, user_data))
}
#[cfg(test)]
mod test_sync {
use super::*;
use std::collections::BTreeMap;
use tokio_test::io::Builder;
#[test]
fn test_generate_connect_header() {
let mut headers = BTreeMap::new();
headers.insert(String::from("foo"), b"bar".to_vec());
let req = generate_connect_header("pingora.org", 123, headers.iter()).unwrap();
assert_eq!(req.method, http::method::Method::CONNECT);
assert_eq!(req.uri.authority().unwrap(), "pingora.org:123");
assert_eq!(req.headers.get("Host").unwrap(), "pingora.org:123");
assert_eq!(req.headers.get("foo").unwrap(), "bar");
}
#[test]
fn test_generate_connect_header_ipv6() {
let mut headers = BTreeMap::new();
headers.insert(String::from("foo"), b"bar".to_vec());
let req = generate_connect_header("::1", 123, headers.iter()).unwrap();
assert_eq!(req.method, http::method::Method::CONNECT);
assert_eq!(req.uri.authority().unwrap(), "[::1]:123");
assert_eq!(req.headers.get("Host").unwrap(), "[::1]:123");
assert_eq!(req.headers.get("foo").unwrap(), "bar");
}
#[test]
fn test_request_to_wire_auth_form() {
let new_request = http::Request::builder()
.method("CONNECT")
.uri("https://pingora.org:123/")
.header("Foo", "Bar")
.body(())
.unwrap();
let (new_request, _) = new_request.into_parts();
let wire = http_req_header_to_wire_auth_form(&new_request);
assert_eq!(
&b"CONNECT pingora.org:123 HTTP/1.1\r\nfoo: Bar\r\n\r\n"[..],
&wire
);
}
#[test]
fn test_validate_connect_response() {
use crate::upstreams::peer::BasicPeer;
struct DummyUserData {
some_num: i32,
some_string: String,
}
let peer_no_data = BasicPeer::new("127.0.0.1:80");
let mut peer_with_data = peer_no_data.clone();
peer_with_data.options.proxy_digest_user_data_hook = Some(std::sync::Arc::new(
|_req: &http::request::Parts, _resp: &pingora_http::ResponseHeader| {
Some(Box::new(DummyUserData {
some_num: 42,
some_string: "test".to_string(),
}) as Box<dyn std::any::Any + Send + Sync>)
},
));
let request = http::Request::builder()
.method("CONNECT")
.uri("https://example.com:443/")
.body(())
.unwrap();
let (req_header, _) = request.into_parts();
let resp = ResponseHeader::build(200, None).unwrap();
let proxy_digest =
validate_connect_response(Box::new(resp), &peer_with_data, &req_header).unwrap();
assert!(proxy_digest.user_data.is_some());
let user_data = proxy_digest
.user_data
.as_ref()
.unwrap()
.downcast_ref::<DummyUserData>()
.unwrap();
assert_eq!(user_data.some_num, 42);
assert_eq!(user_data.some_string, "test");
let resp = ResponseHeader::build(200, None).unwrap();
let proxy_digest =
validate_connect_response(Box::new(resp), &peer_no_data, &req_header).unwrap();
assert!(proxy_digest.user_data.is_none());
let resp = ResponseHeader::build(404, None).unwrap();
assert!(validate_connect_response(Box::new(resp), &peer_with_data, &req_header).is_err());
let mut resp = ResponseHeader::build(200, None).unwrap();
resp.append_header("content-length", 0).unwrap();
assert!(validate_connect_response(Box::new(resp), &peer_no_data, &req_header).is_ok());
let mut resp = ResponseHeader::build(200, None).unwrap();
resp.append_header("transfer-encoding", 0).unwrap();
assert!(validate_connect_response(Box::new(resp), &peer_no_data, &req_header).is_err());
}
#[tokio::test]
async fn test_connect_write_request() {
use crate::upstreams::peer::BasicPeer;
let wire = b"CONNECT pingora.org:123 HTTP/1.1\r\nhost: pingora.org:123\r\n\r\n";
let mock_io = Box::new(Builder::new().write(wire).build());
let headers: BTreeMap<String, Vec<u8>> = BTreeMap::new();
let req = generate_connect_header("pingora.org", 123, headers.iter()).unwrap();
let peer = BasicPeer::new("127.0.0.1:123");
// ConnectionClosed
assert!(connect(mock_io, &req, &peer).await.is_err());
let to_wire = b"CONNECT pingora.org:123 HTTP/1.1\r\nhost: pingora.org:123\r\n\r\n";
let from_wire = b"HTTP/1.1 200 OK\r\n\r\n";
let mock_io = Box::new(Builder::new().write(to_wire).read(from_wire).build());
let req = generate_connect_header("pingora.org", 123, headers.iter()).unwrap();
let result = connect(mock_io, &req, &peer).await;
assert!(result.is_ok());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/digest.rs | pingora-core/src/protocols/digest.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Extra information about the connection
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use once_cell::sync::OnceCell;
use super::l4::ext::{get_original_dest, get_recv_buf, get_snd_buf, get_tcp_info, TCP_INFO};
use super::l4::socket::SocketAddr;
use super::raw_connect::ProxyDigest;
use super::tls::digest::SslDigest;
/// The information can be extracted from a connection
#[derive(Clone, Debug, Default)]
pub struct Digest {
/// Information regarding the TLS of this connection if any
pub ssl_digest: Option<Arc<SslDigest>>,
/// Timing information
pub timing_digest: Vec<Option<TimingDigest>>,
/// information regarding the CONNECT proxy this connection uses.
pub proxy_digest: Option<Arc<ProxyDigest>>,
/// Information about underlying socket/fd of this connection
pub socket_digest: Option<Arc<SocketDigest>>,
}
/// The interface to return protocol related information
pub trait ProtoDigest {
fn get_digest(&self) -> Option<&Digest> {
None
}
}
/// The timing information of the connection
#[derive(Clone, Debug)]
pub struct TimingDigest {
/// When this connection was established
pub established_ts: SystemTime,
}
impl Default for TimingDigest {
fn default() -> Self {
TimingDigest {
established_ts: SystemTime::UNIX_EPOCH,
}
}
}
#[derive(Debug)]
/// The interface to return socket-related information
pub struct SocketDigest {
#[cfg(unix)]
raw_fd: std::os::unix::io::RawFd,
#[cfg(windows)]
raw_sock: std::os::windows::io::RawSocket,
/// Remote socket address
pub peer_addr: OnceCell<Option<SocketAddr>>,
/// Local socket address
pub local_addr: OnceCell<Option<SocketAddr>>,
/// Original destination address
pub original_dst: OnceCell<Option<SocketAddr>>,
}
impl SocketDigest {
#[cfg(unix)]
pub fn from_raw_fd(raw_fd: std::os::unix::io::RawFd) -> SocketDigest {
SocketDigest {
raw_fd,
peer_addr: OnceCell::new(),
local_addr: OnceCell::new(),
original_dst: OnceCell::new(),
}
}
#[cfg(windows)]
pub fn from_raw_socket(raw_sock: std::os::windows::io::RawSocket) -> SocketDigest {
SocketDigest {
raw_sock,
peer_addr: OnceCell::new(),
local_addr: OnceCell::new(),
original_dst: OnceCell::new(),
}
}
#[cfg(unix)]
pub fn peer_addr(&self) -> Option<&SocketAddr> {
self.peer_addr
.get_or_init(|| SocketAddr::from_raw_fd(self.raw_fd, true))
.as_ref()
}
#[cfg(windows)]
pub fn peer_addr(&self) -> Option<&SocketAddr> {
self.peer_addr
.get_or_init(|| SocketAddr::from_raw_socket(self.raw_sock, true))
.as_ref()
}
#[cfg(unix)]
pub fn local_addr(&self) -> Option<&SocketAddr> {
self.local_addr
.get_or_init(|| SocketAddr::from_raw_fd(self.raw_fd, false))
.as_ref()
}
#[cfg(windows)]
pub fn local_addr(&self) -> Option<&SocketAddr> {
self.local_addr
.get_or_init(|| SocketAddr::from_raw_socket(self.raw_sock, false))
.as_ref()
}
fn is_inet(&self) -> bool {
self.local_addr().and_then(|p| p.as_inet()).is_some()
}
#[cfg(unix)]
pub fn tcp_info(&self) -> Option<TCP_INFO> {
if self.is_inet() {
get_tcp_info(self.raw_fd).ok()
} else {
None
}
}
#[cfg(windows)]
pub fn tcp_info(&self) -> Option<TCP_INFO> {
if self.is_inet() {
get_tcp_info(self.raw_sock).ok()
} else {
None
}
}
#[cfg(unix)]
pub fn get_recv_buf(&self) -> Option<usize> {
if self.is_inet() {
get_recv_buf(self.raw_fd).ok()
} else {
None
}
}
#[cfg(windows)]
pub fn get_recv_buf(&self) -> Option<usize> {
if self.is_inet() {
get_recv_buf(self.raw_sock).ok()
} else {
None
}
}
#[cfg(unix)]
pub fn get_snd_buf(&self) -> Option<usize> {
if self.is_inet() {
get_snd_buf(self.raw_fd).ok()
} else {
None
}
}
#[cfg(windows)]
pub fn get_snd_buf(&self) -> Option<usize> {
if self.is_inet() {
get_snd_buf(self.raw_sock).ok()
} else {
None
}
}
#[cfg(unix)]
pub fn original_dst(&self) -> Option<&SocketAddr> {
self.original_dst
.get_or_init(|| {
get_original_dest(self.raw_fd)
.ok()
.flatten()
.map(SocketAddr::Inet)
})
.as_ref()
}
#[cfg(windows)]
pub fn original_dst(&self) -> Option<&SocketAddr> {
self.original_dst
.get_or_init(|| {
get_original_dest(self.raw_sock)
.ok()
.flatten()
.map(SocketAddr::Inet)
})
.as_ref()
}
}
/// The interface to return timing information
pub trait GetTimingDigest {
/// Return the timing for each layer from the lowest layer to upper
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>>;
fn get_read_pending_time(&self) -> Duration {
Duration::ZERO
}
fn get_write_pending_time(&self) -> Duration {
Duration::ZERO
}
}
/// The interface to set or return proxy information
pub trait GetProxyDigest {
fn get_proxy_digest(&self) -> Option<Arc<ProxyDigest>>;
fn set_proxy_digest(&mut self, _digest: ProxyDigest) {}
}
/// The interface to set or return socket information
pub trait GetSocketDigest {
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>>;
fn set_socket_digest(&mut self, _socket_digest: SocketDigest) {}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/windows.rs | pingora-core/src/protocols/windows.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Windows specific functionality for calling the WinSock c api
//!
//! Implementations here are based on the implementation in the std library
//! https://github.com/rust-lang/rust/blob/84ac80f/library/std/src/sys_common/net.rs
//! https://github.com/rust-lang/rust/blob/84ac80f/library/std/src/sys/pal/windows/net.rs
use std::os::windows::io::RawSocket;
use std::{io, mem, net::SocketAddr};
use windows_sys::Win32::Networking::WinSock::{
getpeername, getsockname, AF_INET, AF_INET6, SOCKADDR_IN, SOCKADDR_IN6, SOCKADDR_STORAGE,
SOCKET,
};
pub(crate) fn peer_addr(raw_sock: RawSocket) -> io::Result<SocketAddr> {
let mut storage = unsafe { mem::zeroed::<SOCKADDR_STORAGE>() };
let mut addrlen = mem::size_of_val(&storage) as i32;
unsafe {
let res = getpeername(
raw_sock as SOCKET,
core::ptr::addr_of_mut!(storage) as *mut _,
&mut addrlen,
);
if res != 0 {
return Err(io::Error::last_os_error());
}
}
sockaddr_to_addr(&storage, addrlen as usize)
}
pub(crate) fn local_addr(raw_sock: RawSocket) -> io::Result<SocketAddr> {
let mut storage = unsafe { mem::zeroed::<SOCKADDR_STORAGE>() };
let mut addrlen = mem::size_of_val(&storage) as i32;
unsafe {
let res = getsockname(
raw_sock as libc::SOCKET,
core::ptr::addr_of_mut!(storage) as *mut _,
&mut addrlen,
);
if res != 0 {
return Err(io::Error::last_os_error());
}
}
sockaddr_to_addr(&storage, addrlen as usize)
}
fn sockaddr_to_addr(storage: &SOCKADDR_STORAGE, len: usize) -> io::Result<SocketAddr> {
match storage.ss_family {
AF_INET => {
assert!(len >= mem::size_of::<SOCKADDR_IN>());
Ok(SocketAddr::from(unsafe {
let sockaddr = *(storage as *const _ as *const SOCKADDR_IN);
(
sockaddr.sin_addr.S_un.S_addr.to_ne_bytes(),
sockaddr.sin_port.to_be(),
)
}))
}
AF_INET6 => {
assert!(len >= mem::size_of::<SOCKADDR_IN6>());
Ok(SocketAddr::from(unsafe {
let sockaddr = *(storage as *const _ as *const SOCKADDR_IN6);
(sockaddr.sin6_addr.u.Byte, sockaddr.sin6_port.to_be())
}))
}
_ => Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid argument",
)),
}
}
#[cfg(test)]
mod tests {
use std::os::windows::io::AsRawSocket;
use crate::protocols::l4::{listener::Listener, stream::Stream};
use super::*;
async fn assert_listener_and_stream(addr: &str) {
let tokio_listener = tokio::net::TcpListener::bind(addr).await.unwrap();
let listener_local_addr = tokio_listener.local_addr().unwrap();
let tokio_stream = tokio::net::TcpStream::connect(listener_local_addr)
.await
.unwrap();
let stream_local_addr = tokio_stream.local_addr().unwrap();
let stream_peer_addr = tokio_stream.peer_addr().unwrap();
let stream: Stream = tokio_stream.into();
let listener: Listener = tokio_listener.into();
let raw_sock = listener.as_raw_socket();
assert_eq!(listener_local_addr, local_addr(raw_sock).unwrap());
let raw_sock = stream.as_raw_socket();
assert_eq!(stream_peer_addr, peer_addr(raw_sock).unwrap());
assert_eq!(stream_local_addr, local_addr(raw_sock).unwrap());
}
#[tokio::test]
async fn get_v4_addrs_from_raw_socket() {
assert_listener_and_stream("127.0.0.1:0").await
}
#[tokio::test]
async fn get_v6_addrs_from_raw_socket() {
assert_listener_and_stream("[::1]:0").await
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/mod.rs | pingora-core/src/protocols/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Abstractions and implementations for protocols including TCP, TLS and HTTP
mod digest;
pub mod http;
pub mod l4;
pub mod raw_connect;
pub mod tls;
#[cfg(windows)]
mod windows;
pub use digest::{
Digest, GetProxyDigest, GetSocketDigest, GetTimingDigest, ProtoDigest, SocketDigest,
TimingDigest,
};
pub use l4::ext::TcpKeepalive;
pub use tls::ALPN;
use async_trait::async_trait;
use std::fmt::Debug;
use std::net::{IpAddr, Ipv4Addr};
use std::sync::Arc;
#[cfg(unix)]
pub type UniqueIDType = i32;
#[cfg(windows)]
pub type UniqueIDType = usize;
/// Define how a protocol should shutdown its connection.
#[async_trait]
pub trait Shutdown {
async fn shutdown(&mut self) -> ();
}
/// Define how a given session/connection identifies itself.
pub trait UniqueID {
/// The ID returned should be unique among all existing connections of the same type.
/// But ID can be recycled after a connection is shutdown.
fn id(&self) -> UniqueIDType;
}
/// Interface to get TLS info
pub trait Ssl {
/// Return the TLS info if the connection is over TLS
fn get_ssl(&self) -> Option<&TlsRef> {
None
}
/// Return the [`tls::SslDigest`] for logging
fn get_ssl_digest(&self) -> Option<Arc<tls::SslDigest>> {
None
}
/// Return selected ALPN if any
fn selected_alpn_proto(&self) -> Option<ALPN> {
None
}
}
/// The ability peek data before consuming it
#[async_trait]
pub trait Peek {
/// Peek data but not consuming it. This call should block until some data
/// is sent.
/// Return `false` if peeking is not supported/allowed.
async fn try_peek(&mut self, _buf: &mut [u8]) -> std::io::Result<bool> {
Ok(false)
}
}
use std::any::Any;
use tokio::io::{AsyncRead, AsyncWrite};
/// The abstraction of transport layer IO
pub trait IO:
AsyncRead
+ AsyncWrite
+ Shutdown
+ UniqueID
+ Ssl
+ GetTimingDigest
+ GetProxyDigest
+ GetSocketDigest
+ Peek
+ Unpin
+ Debug
+ Send
+ Sync
{
/// helper to cast as the reference of the concrete type
fn as_any(&self) -> &dyn Any;
/// helper to cast back of the concrete type
fn into_any(self: Box<Self>) -> Box<dyn Any>;
}
impl<
T: AsyncRead
+ AsyncWrite
+ Shutdown
+ UniqueID
+ Ssl
+ GetTimingDigest
+ GetProxyDigest
+ GetSocketDigest
+ Peek
+ Unpin
+ Debug
+ Send
+ Sync,
> IO for T
where
T: 'static,
{
fn as_any(&self) -> &dyn Any {
self
}
fn into_any(self: Box<Self>) -> Box<dyn Any> {
self
}
}
/// The type of any established transport layer connection
pub type Stream = Box<dyn IO>;
// Implement IO trait for 3rd party types, mostly for testing
mod ext_io_impl {
use super::*;
use tokio_test::io::Mock;
#[async_trait]
impl Shutdown for Mock {
async fn shutdown(&mut self) -> () {}
}
impl UniqueID for Mock {
fn id(&self) -> UniqueIDType {
0
}
}
impl Ssl for Mock {}
impl GetTimingDigest for Mock {
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
vec![]
}
}
impl GetProxyDigest for Mock {
fn get_proxy_digest(&self) -> Option<Arc<raw_connect::ProxyDigest>> {
None
}
}
impl GetSocketDigest for Mock {
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
None
}
}
impl Peek for Mock {}
use std::io::Cursor;
#[async_trait]
impl<T: Send> Shutdown for Cursor<T> {
async fn shutdown(&mut self) -> () {}
}
impl<T> UniqueID for Cursor<T> {
fn id(&self) -> UniqueIDType {
0
}
}
impl<T> Ssl for Cursor<T> {}
impl<T> GetTimingDigest for Cursor<T> {
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
vec![]
}
}
impl<T> GetProxyDigest for Cursor<T> {
fn get_proxy_digest(&self) -> Option<Arc<raw_connect::ProxyDigest>> {
None
}
}
impl<T> GetSocketDigest for Cursor<T> {
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
None
}
}
impl<T> Peek for Cursor<T> {}
use tokio::io::DuplexStream;
#[async_trait]
impl Shutdown for DuplexStream {
async fn shutdown(&mut self) -> () {}
}
impl UniqueID for DuplexStream {
fn id(&self) -> UniqueIDType {
0
}
}
impl Ssl for DuplexStream {}
impl GetTimingDigest for DuplexStream {
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
vec![]
}
}
impl GetProxyDigest for DuplexStream {
fn get_proxy_digest(&self) -> Option<Arc<raw_connect::ProxyDigest>> {
None
}
}
impl GetSocketDigest for DuplexStream {
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
None
}
}
impl Peek for DuplexStream {}
}
#[cfg(unix)]
pub mod ext_test {
use std::sync::Arc;
use async_trait::async_trait;
use super::{
raw_connect, GetProxyDigest, GetSocketDigest, GetTimingDigest, Peek, Shutdown,
SocketDigest, Ssl, TimingDigest, UniqueID, UniqueIDType,
};
#[async_trait]
impl Shutdown for tokio::net::UnixStream {
async fn shutdown(&mut self) -> () {}
}
impl UniqueID for tokio::net::UnixStream {
fn id(&self) -> UniqueIDType {
0
}
}
impl Ssl for tokio::net::UnixStream {}
impl GetTimingDigest for tokio::net::UnixStream {
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
vec![]
}
}
impl GetProxyDigest for tokio::net::UnixStream {
fn get_proxy_digest(&self) -> Option<Arc<raw_connect::ProxyDigest>> {
None
}
}
impl GetSocketDigest for tokio::net::UnixStream {
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
None
}
}
impl Peek for tokio::net::UnixStream {}
}
#[cfg(unix)]
pub(crate) trait ConnFdReusable {
fn check_fd_match<V: AsRawFd>(&self, fd: V) -> bool;
}
#[cfg(windows)]
pub(crate) trait ConnSockReusable {
fn check_sock_match<V: AsRawSocket>(&self, sock: V) -> bool;
}
use l4::socket::SocketAddr;
use log::{debug, error};
#[cfg(unix)]
use nix::sys::socket::{getpeername, SockaddrStorage, UnixAddr};
#[cfg(unix)]
use std::os::unix::prelude::AsRawFd;
#[cfg(windows)]
use std::os::windows::io::AsRawSocket;
use std::{net::SocketAddr as InetSocketAddr, path::Path};
use crate::protocols::tls::TlsRef;
#[cfg(unix)]
impl ConnFdReusable for SocketAddr {
fn check_fd_match<V: AsRawFd>(&self, fd: V) -> bool {
match self {
SocketAddr::Inet(addr) => addr.check_fd_match(fd),
SocketAddr::Unix(addr) => addr
.as_pathname()
.expect("non-pathname unix sockets not supported as peer")
.check_fd_match(fd),
}
}
}
#[cfg(windows)]
impl ConnSockReusable for SocketAddr {
fn check_sock_match<V: AsRawSocket>(&self, sock: V) -> bool {
match self {
SocketAddr::Inet(addr) => addr.check_sock_match(sock),
}
}
}
#[cfg(unix)]
impl ConnFdReusable for Path {
fn check_fd_match<V: AsRawFd>(&self, fd: V) -> bool {
let fd = fd.as_raw_fd();
match getpeername::<UnixAddr>(fd) {
Ok(peer) => match UnixAddr::new(self) {
Ok(addr) => {
if addr == peer {
debug!("Unix FD to: {peer} is reusable");
true
} else {
error!("Crit: unix FD mismatch: fd: {fd:?}, peer: {peer}, addr: {addr}",);
false
}
}
Err(e) => {
error!("Bad addr: {self:?}, error: {e:?}");
false
}
},
Err(e) => {
error!("Idle unix connection is broken: {e:?}");
false
}
}
}
}
#[cfg(unix)]
impl ConnFdReusable for InetSocketAddr {
fn check_fd_match<V: AsRawFd>(&self, fd: V) -> bool {
let fd = fd.as_raw_fd();
match getpeername::<SockaddrStorage>(fd) {
Ok(peer) => {
const ZERO: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
if self.ip() == ZERO {
// https://www.rfc-editor.org/rfc/rfc1122.html#section-3.2.1.3
// 0.0.0.0 should only be used as source IP not destination
// However in some systems this destination IP is mapped to 127.0.0.1.
// We just skip this check here to avoid false positive mismatch.
return true;
}
let addr = SockaddrStorage::from(*self);
if addr == peer {
debug!("Inet FD to: {addr} is reusable");
true
} else {
error!("Crit: FD mismatch: fd: {fd:?}, addr: {addr}, peer: {peer}",);
false
}
}
Err(e) => {
debug!("Idle connection is broken: {e:?}");
false
}
}
}
}
#[cfg(windows)]
impl ConnSockReusable for InetSocketAddr {
fn check_sock_match<V: AsRawSocket>(&self, sock: V) -> bool {
let sock = sock.as_raw_socket();
match windows::peer_addr(sock) {
Ok(peer) => {
const ZERO: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
if self.ip() == ZERO {
// https://www.rfc-editor.org/rfc/rfc1122.html#section-3.2.1.3
// 0.0.0.0 should only be used as source IP not destination
// However in some systems this destination IP is mapped to 127.0.0.1.
// We just skip this check here to avoid false positive mismatch.
return true;
}
if self == &peer {
debug!("Inet FD to: {self} is reusable");
true
} else {
error!("Crit: FD mismatch: fd: {sock:?}, addr: {self}, peer: {peer}",);
false
}
}
Err(e) => {
debug!("Idle connection is broken: {e:?}");
false
}
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.