repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/digest.rs | pingora-core/src/protocols/tls/digest.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! TLS information from the TLS connection
use std::any::Any;
use std::borrow::Cow;
use std::sync::Arc;
/// The TLS connection information
#[derive(Clone, Debug)]
pub struct SslDigest {
/// The cipher used
pub cipher: Cow<'static, str>,
/// The TLS version of this connection
pub version: Cow<'static, str>,
/// The organization of the peer's certificate
pub organization: Option<String>,
/// The serial number of the peer's certificate
pub serial_number: Option<String>,
/// The digest of the peer's certificate
pub cert_digest: Vec<u8>,
/// The user-defined TLS data
pub extension: SslDigestExtension,
}
impl SslDigest {
/// Create a new SslDigest
pub fn new<S>(
cipher: S,
version: S,
organization: Option<String>,
serial_number: Option<String>,
cert_digest: Vec<u8>,
) -> Self
where
S: Into<Cow<'static, str>>,
{
SslDigest {
cipher: cipher.into(),
version: version.into(),
organization,
serial_number,
cert_digest,
extension: SslDigestExtension::default(),
}
}
}
/// The user-defined TLS data
#[derive(Clone, Debug, Default)]
pub struct SslDigestExtension {
value: Option<Arc<dyn Any + Send + Sync>>,
}
impl SslDigestExtension {
/// Retrieves a reference to the user-defined TLS data if it matches the specified type.
///
/// Returns `None` if no data has been set or if the data is not of type `T`.
pub fn get<T>(&self) -> Option<&T>
where
T: Send + Sync + 'static,
{
self.value.as_ref().and_then(|v| v.downcast_ref::<T>())
}
#[allow(dead_code)]
pub(crate) fn set(&mut self, value: Arc<dyn Any + Send + Sync>) {
self.value = Some(value);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/mod.rs | pingora-core/src/protocols/tls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The TLS layer implementations
pub mod digest;
pub use digest::*;
#[cfg(feature = "openssl_derived")]
mod boringssl_openssl;
#[cfg(feature = "openssl_derived")]
pub use boringssl_openssl::*;
#[cfg(feature = "rustls")]
mod rustls;
#[cfg(feature = "rustls")]
pub use rustls::*;
#[cfg(feature = "s2n")]
mod s2n;
#[cfg(feature = "s2n")]
pub use s2n::*;
#[cfg(not(feature = "any_tls"))]
pub mod noop_tls;
#[cfg(not(feature = "any_tls"))]
pub use noop_tls::*;
/// Containing type for a user callback to generate extensions for the `SslDigest` upon handshake
/// completion.
pub type HandshakeCompleteHook = std::sync::Arc<
dyn Fn(&TlsRef) -> Option<std::sync::Arc<dyn std::any::Any + Send + Sync>> + Send + Sync,
>;
/// The protocol for Application-Layer Protocol Negotiation
#[derive(Hash, Clone, Debug, PartialEq, PartialOrd)]
pub enum ALPN {
/// Prefer HTTP/1.1 only
H1,
/// Prefer HTTP/2 only
H2,
/// Prefer HTTP/2 over HTTP/1.1
H2H1,
/// Custom Protocol is stored in wire format (length-prefixed)
/// Wire format is precomputed at creation to avoid dangling references
Custom(CustomALPN),
}
/// Represents a Custom ALPN Protocol with a precomputed wire format and header offset.
#[derive(Hash, Clone, Debug, PartialEq, PartialOrd)]
pub struct CustomALPN {
wire: Vec<u8>,
header: usize,
}
impl CustomALPN {
/// Create a new CustomALPN from a protocol byte vector
pub fn new(proto: Vec<u8>) -> Self {
// Validate before setting
assert!(!proto.is_empty(), "Custom ALPN protocol must not be empty");
// RFC-7301
assert!(
proto.len() <= 255,
"ALPN protocol name must be 255 bytes or fewer"
);
match proto.as_slice() {
b"http/1.1" | b"h2" => {
panic!("Custom ALPN cannot be a reserved protocol (http/1.1 or h2)")
}
_ => {}
}
let mut wire = Vec::with_capacity(1 + proto.len());
wire.push(proto.len() as u8);
wire.extend_from_slice(&proto);
Self {
wire,
header: 1, // Header is always at index 1 since we prefix one length byte
}
}
/// Get the custom protocol name as a slice
pub fn protocol(&self) -> &[u8] {
&self.wire[self.header..]
}
/// Get the wire format used for ALPN negotiation
pub fn as_wire(&self) -> &[u8] {
&self.wire
}
}
impl std::fmt::Display for ALPN {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ALPN::H1 => write!(f, "H1"),
ALPN::H2 => write!(f, "H2"),
ALPN::H2H1 => write!(f, "H2H1"),
ALPN::Custom(custom) => {
// extract protocol name, print as UTF-8 if possible, else judt itd raw bytes
match std::str::from_utf8(custom.protocol()) {
Ok(s) => write!(f, "Custom({})", s),
Err(_) => write!(f, "Custom({:?})", custom.protocol()),
}
}
}
}
}
impl ALPN {
/// Create a new ALPN according to the `max` and `min` version constraints
pub fn new(max: u8, min: u8) -> Self {
if max == 1 {
ALPN::H1
} else if min == 2 {
ALPN::H2
} else {
ALPN::H2H1
}
}
/// Return the max http version this [`ALPN`] allows
pub fn get_max_http_version(&self) -> u8 {
match self {
ALPN::H1 => 1,
ALPN::H2 | ALPN::H2H1 => 2,
ALPN::Custom(_) => 0,
}
}
/// Return the min http version this [`ALPN`] allows
pub fn get_min_http_version(&self) -> u8 {
match self {
ALPN::H1 | ALPN::H2H1 => 1,
ALPN::H2 => 2,
ALPN::Custom(_) => 0,
}
}
#[cfg(feature = "openssl_derived")]
pub(crate) fn to_wire_preference(&self) -> &[u8] {
// https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_alpn_select_cb.html
// "vector of nonempty, 8-bit length-prefixed, byte strings"
match self {
Self::H1 => b"\x08http/1.1",
Self::H2 => b"\x02h2",
Self::H2H1 => b"\x02h2\x08http/1.1",
Self::Custom(custom) => custom.as_wire(),
}
}
#[cfg(feature = "any_tls")]
pub(crate) fn from_wire_selected(raw: &[u8]) -> Option<Self> {
match raw {
b"http/1.1" => Some(Self::H1),
b"h2" => Some(Self::H2),
_ => Some(Self::Custom(CustomALPN::new(raw.to_vec()))),
}
}
#[cfg(feature = "rustls")]
pub(crate) fn to_wire_protocols(&self) -> Vec<Vec<u8>> {
match self {
ALPN::H1 => vec![b"http/1.1".to_vec()],
ALPN::H2 => vec![b"h2".to_vec()],
ALPN::H2H1 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
ALPN::Custom(custom) => vec![custom.protocol().to_vec()],
}
}
#[cfg(feature = "s2n")]
pub(crate) fn to_wire_protocols(&self) -> Vec<Vec<u8>> {
match self {
ALPN::H1 => vec![b"http/1.1".to_vec()],
ALPN::H2 => vec![b"h2".to_vec()],
ALPN::H2H1 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_valid_alpn_construction_and_versions() {
// Standard Protocols
assert_eq!(ALPN::H1.get_min_http_version(), 1);
assert_eq!(ALPN::H1.get_max_http_version(), 1);
assert_eq!(ALPN::H2.get_min_http_version(), 2);
assert_eq!(ALPN::H2.get_max_http_version(), 2);
assert_eq!(ALPN::H2H1.get_min_http_version(), 1);
assert_eq!(ALPN::H2H1.get_max_http_version(), 2);
// Custom Protocol
let custom_protocol = ALPN::Custom(CustomALPN::new("custom/1.0".into()));
assert_eq!(custom_protocol.get_min_http_version(), 0);
assert_eq!(custom_protocol.get_max_http_version(), 0);
}
#[test]
#[should_panic(expected = "Custom ALPN protocol must not be empty")]
fn test_empty_custom_alpn() {
let _ = ALPN::Custom(CustomALPN::new("".into()));
}
#[test]
#[should_panic(expected = "ALPN protocol name must be 255 bytes or fewer")]
fn test_large_custom_alpn() {
let large_alpn = vec![b'a'; 256];
let _ = ALPN::Custom(CustomALPN::new(large_alpn));
}
#[test]
#[should_panic(expected = "Custom ALPN cannot be a reserved protocol (http/1.1 or h2)")]
fn test_custom_h1_alpn() {
let _ = ALPN::Custom(CustomALPN::new("http/1.1".into()));
}
#[test]
#[should_panic(expected = "Custom ALPN cannot be a reserved protocol (http/1.1 or h2)")]
fn test_custom_h2_alpn() {
let _ = ALPN::Custom(CustomALPN::new("h2".into()));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/noop_tls/mod.rs | pingora-core/src/protocols/tls/noop_tls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This is a set of stubs that provides the minimum types to let pingora work
//! without any tls providers configured
pub struct TlsRef;
pub type CaType = [CertWrapper];
#[derive(Debug)]
pub struct CertWrapper;
impl CertWrapper {
pub fn not_after(&self) -> &str {
""
}
}
pub mod connectors {
use pingora_error::Result;
use crate::{
connectors::ConnectorOptions,
protocols::{ALPN, IO},
upstreams::peer::Peer,
};
use super::stream::SslStream;
#[derive(Clone)]
pub struct Connector {
pub ctx: TlsConnector,
}
#[derive(Clone)]
pub struct TlsConnector;
pub struct TlsSettings;
impl Connector {
pub fn new(_: Option<ConnectorOptions>) -> Self {
Self { ctx: TlsConnector }
}
}
pub async fn connect<T, P>(
_: T,
_: &P,
_: Option<ALPN>,
_: &TlsConnector,
) -> Result<SslStream<T>>
where
T: IO,
P: Peer + Send + Sync,
{
Ok(SslStream::default())
}
}
pub mod listeners {
use pingora_error::Result;
use tokio::io::{AsyncRead, AsyncWrite};
use super::stream::SslStream;
pub struct Acceptor;
pub struct TlsSettings;
impl TlsSettings {
pub fn build(&self) -> Acceptor {
Acceptor
}
pub fn intermediate(_: &str, _: &str) -> Result<Self> {
Ok(Self)
}
pub fn enable_h2(&mut self) {}
}
impl Acceptor {
pub async fn tls_handshake<S: AsyncRead + AsyncWrite>(&self, _: S) -> Result<SslStream<S>> {
unimplemented!("No tls feature was specified")
}
}
}
pub mod stream {
use std::{
pin::Pin,
task::{Context, Poll},
};
use async_trait::async_trait;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use crate::protocols::{
GetProxyDigest, GetSocketDigest, GetTimingDigest, Peek, Shutdown, Ssl, UniqueID,
};
/// A TLS session over a stream.
#[derive(Debug)]
pub struct SslStream<S> {
marker: std::marker::PhantomData<S>,
}
impl<S> Default for SslStream<S> {
fn default() -> Self {
Self {
marker: Default::default(),
}
}
}
impl<S> AsyncRead for SslStream<S>
where
S: AsyncRead + AsyncWrite,
{
fn poll_read(
self: Pin<&mut Self>,
_ctx: &mut Context<'_>,
_buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
}
impl<S> AsyncWrite for SslStream<S>
where
S: AsyncRead + AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
_ctx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
Poll::Ready(Ok(buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
self: Pin<&mut Self>,
_ctx: &mut Context<'_>,
) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
}
#[async_trait]
impl<S: Send> Shutdown for SslStream<S> {
async fn shutdown(&mut self) {}
}
impl<S> UniqueID for SslStream<S> {
fn id(&self) -> crate::protocols::UniqueIDType {
0
}
}
impl<S> Ssl for SslStream<S> {}
impl<S> GetTimingDigest for SslStream<S> {
fn get_timing_digest(&self) -> Vec<Option<crate::protocols::TimingDigest>> {
vec![]
}
}
impl<S> GetProxyDigest for SslStream<S> {
fn get_proxy_digest(
&self,
) -> Option<std::sync::Arc<crate::protocols::raw_connect::ProxyDigest>> {
None
}
}
impl<S> GetSocketDigest for SslStream<S> {
fn get_socket_digest(&self) -> Option<std::sync::Arc<crate::protocols::SocketDigest>> {
None
}
}
impl<S> Peek for SslStream<S> {}
}
pub mod utils {
use std::fmt::Display;
use super::CertWrapper;
#[derive(Debug, Clone, Hash)]
pub struct CertKey;
impl Display for CertKey {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Ok(())
}
}
pub fn get_organization_unit(_: &CertWrapper) -> Option<String> {
None
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/boringssl_openssl/stream.rs | pingora-core/src/protocols/tls/boringssl_openssl/stream.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::protocols::digest::TimingDigest;
use crate::protocols::tls::{SslDigest, ALPN};
use crate::protocols::{Peek, Ssl, UniqueID, UniqueIDType};
use crate::tls::{self, ssl, tokio_ssl::SslStream as InnerSsl};
use crate::utils::tls::{get_organization, get_serial};
use log::warn;
use pingora_error::{ErrorType::*, OrErr, Result};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::SystemTime;
use tokio::io::{self, AsyncRead, AsyncWrite, ReadBuf};
#[cfg(feature = "boringssl")]
use pingora_boringssl as ssl_lib;
#[cfg(feature = "openssl")]
use pingora_openssl as ssl_lib;
use ssl_lib::{hash::MessageDigest, ssl::SslRef};
/// The TLS connection
#[derive(Debug)]
pub struct SslStream<T> {
ssl: InnerSsl<T>,
digest: Option<Arc<SslDigest>>,
pub(super) timing: TimingDigest,
}
impl<T> SslStream<T>
where
T: AsyncRead + AsyncWrite + std::marker::Unpin,
{
/// Create a new TLS connection from the given `stream`
///
/// The caller needs to perform [`Self::connect()`] or [`Self::accept()`] to perform TLS
/// handshake after.
pub fn new(ssl: ssl::Ssl, stream: T) -> Result<Self> {
let ssl = InnerSsl::new(ssl, stream)
.explain_err(TLSHandshakeFailure, |e| format!("ssl stream error: {e}"))?;
Ok(SslStream {
ssl,
digest: None,
timing: Default::default(),
})
}
/// Connect to the remote TLS server as a client
pub async fn connect(&mut self) -> Result<(), ssl::Error> {
Self::clear_error();
Pin::new(&mut self.ssl).connect().await?;
self.timing.established_ts = SystemTime::now();
self.digest = Some(Arc::new(SslDigest::from_ssl(self.ssl())));
Ok(())
}
/// Finish the TLS handshake from client as a server
pub async fn accept(&mut self) -> Result<(), ssl::Error> {
Self::clear_error();
Pin::new(&mut self.ssl).accept().await?;
self.timing.established_ts = SystemTime::now();
self.digest = Some(Arc::new(SslDigest::from_ssl(self.ssl())));
Ok(())
}
#[inline]
fn clear_error() {
let errs = tls::error::ErrorStack::get();
if !errs.errors().is_empty() {
warn!("Clearing dirty TLS error stack: {}", errs);
}
}
}
impl<T> SslStream<T> {
pub fn ssl_digest(&self) -> Option<Arc<SslDigest>> {
self.digest.clone()
}
/// Attempts to obtain a mutable reference to the SslDigest.
/// This method returns `None` if the SslDigest is currently held by other references.
pub(crate) fn ssl_digest_mut(&mut self) -> Option<&mut SslDigest> {
Arc::get_mut(self.digest.as_mut()?)
}
}
use std::ops::{Deref, DerefMut};
impl<T> Deref for SslStream<T> {
type Target = InnerSsl<T>;
fn deref(&self) -> &Self::Target {
&self.ssl
}
}
impl<T> DerefMut for SslStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.ssl
}
}
impl<T> AsyncRead for SslStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Self::clear_error();
Pin::new(&mut self.ssl).poll_read(cx, buf)
}
}
impl<T> AsyncWrite for SslStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Self::clear_error();
Pin::new(&mut self.ssl).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
Self::clear_error();
Pin::new(&mut self.ssl).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
Self::clear_error();
Pin::new(&mut self.ssl).poll_shutdown(cx)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Self::clear_error();
Pin::new(&mut self.ssl).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
true
}
}
impl<T> UniqueID for SslStream<T>
where
T: UniqueID,
{
fn id(&self) -> UniqueIDType {
self.ssl.get_ref().id()
}
}
impl<T> Ssl for SslStream<T> {
fn get_ssl(&self) -> Option<&ssl::SslRef> {
Some(self.ssl())
}
fn get_ssl_digest(&self) -> Option<Arc<SslDigest>> {
self.ssl_digest()
}
/// Return selected ALPN if any
fn selected_alpn_proto(&self) -> Option<ALPN> {
let ssl = self.get_ssl()?;
ALPN::from_wire_selected(ssl.selected_alpn_protocol()?)
}
}
impl SslDigest {
pub fn from_ssl(ssl: &SslRef) -> Self {
let cipher = match ssl.current_cipher() {
Some(c) => c.name(),
None => "",
};
let (cert_digest, org, sn) = match ssl.peer_certificate() {
Some(cert) => {
let cert_digest = match cert.digest(MessageDigest::sha256()) {
Ok(c) => c.as_ref().to_vec(),
Err(_) => Vec::new(),
};
(cert_digest, get_organization(&cert), get_serial(&cert).ok())
}
None => (Vec::new(), None, None),
};
SslDigest::new(cipher, ssl.version_str(), org, sn, cert_digest)
}
}
// TODO: implement Peek if needed
impl<T> Peek for SslStream<T> {}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/boringssl_openssl/client.rs | pingora-core/src/protocols/tls/boringssl_openssl/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! TLS client specific implementation
use crate::protocols::raw_connect::ProxyDigest;
use crate::protocols::tls::SslStream;
use crate::protocols::{
GetProxyDigest, GetSocketDigest, GetTimingDigest, SocketDigest, TimingDigest, IO,
};
use crate::tls::{ssl, ssl::ConnectConfiguration, ssl::SslRef, ssl_sys::X509_V_ERR_INVALID_CALL};
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use std::any::Any;
use std::sync::Arc;
use std::time::Duration;
/// Perform the TLS handshake for the given connection with the given configuration
pub async fn handshake<S: IO>(
conn_config: ConnectConfiguration,
domain: &str,
io: S,
complete_hook: Option<Arc<dyn Fn(&SslRef) -> Option<Arc<dyn Any + Send + Sync>> + Send + Sync>>,
) -> Result<SslStream<S>> {
let ssl = conn_config
.into_ssl(domain)
.explain_err(TLSHandshakeFailure, |e| format!("ssl config error: {e}"))?;
let mut stream = SslStream::new(ssl, io)
.explain_err(TLSHandshakeFailure, |e| format!("ssl stream error: {e}"))?;
let handshake_result = stream.connect().await;
match handshake_result {
Ok(()) => {
if let Some(hook) = complete_hook {
if let Some(extension) = hook(stream.ssl()) {
if let Some(digest_mut) = stream.ssl_digest_mut() {
digest_mut.extension.set(extension);
}
}
}
Ok(stream)
}
Err(e) => {
let context = format!("TLS connect() failed: {e}, SNI: {domain}");
match e.code() {
ssl::ErrorCode::SSL => {
// Unify the return type of `verify_result` for openssl
#[cfg(not(feature = "boringssl"))]
fn verify_result<S>(stream: SslStream<S>) -> Result<(), i32> {
match stream.ssl().verify_result().as_raw() {
crate::tls::ssl_sys::X509_V_OK => Ok(()),
e => Err(e),
}
}
// Unify the return type of `verify_result` for boringssl
#[cfg(feature = "boringssl")]
fn verify_result<S>(stream: SslStream<S>) -> Result<(), i32> {
stream.ssl().verify_result().map_err(|e| e.as_raw())
}
match verify_result(stream) {
Ok(()) => Error::e_explain(TLSHandshakeFailure, context),
// X509_V_ERR_INVALID_CALL in case verify result was never set
Err(X509_V_ERR_INVALID_CALL) => {
Error::e_explain(TLSHandshakeFailure, context)
}
_ => Error::e_explain(InvalidCert, context),
}
}
/* likely network error, but still mark as TLS error */
_ => Error::e_explain(TLSHandshakeFailure, context),
}
}
}
}
impl<S> GetTimingDigest for SslStream<S>
where
S: GetTimingDigest,
{
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
let mut ts_vec = self.get_ref().get_timing_digest();
ts_vec.push(Some(self.timing.clone()));
ts_vec
}
fn get_read_pending_time(&self) -> Duration {
self.get_ref().get_read_pending_time()
}
fn get_write_pending_time(&self) -> Duration {
self.get_ref().get_write_pending_time()
}
}
impl<S> GetProxyDigest for SslStream<S>
where
S: GetProxyDigest,
{
fn get_proxy_digest(&self) -> Option<Arc<ProxyDigest>> {
self.get_ref().get_proxy_digest()
}
}
impl<S> GetSocketDigest for SslStream<S>
where
S: GetSocketDigest,
{
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
self.get_ref().get_socket_digest()
}
fn set_socket_digest(&mut self, socket_digest: SocketDigest) {
self.get_mut().set_socket_digest(socket_digest)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/boringssl_openssl/mod.rs | pingora-core/src/protocols/tls/boringssl_openssl/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod client;
pub mod server;
mod stream;
#[cfg(feature = "boringssl")]
use pingora_boringssl as ssl_lib;
#[cfg(feature = "openssl")]
use pingora_openssl as ssl_lib;
use ssl_lib::{ssl::SslRef, x509::X509};
pub use stream::*;
pub type TlsRef = SslRef;
pub type CaType = Box<[X509]>;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/boringssl_openssl/server.rs | pingora-core/src/protocols/tls/boringssl_openssl/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! TLS server specific implementation
use crate::listeners::TlsAcceptCallbacks;
use crate::protocols::tls::SslStream;
use crate::protocols::{Shutdown, IO};
use crate::tls::ext;
use crate::tls::ext::ssl_from_acceptor;
use crate::tls::ssl;
use crate::tls::ssl::SslAcceptor;
use async_trait::async_trait;
use log::warn;
use pingora_error::{ErrorType::*, OrErr, Result};
use std::pin::Pin;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
/// Prepare a TLS stream for handshake
pub fn prepare_tls_stream<S: IO>(ssl_acceptor: &SslAcceptor, io: S) -> Result<SslStream<S>> {
let ssl = ssl_from_acceptor(ssl_acceptor)
.explain_err(TLSHandshakeFailure, |e| format!("ssl_acceptor error: {e}"))?;
SslStream::new(ssl, io).explain_err(TLSHandshakeFailure, |e| format!("ssl stream error: {e}"))
}
/// Perform TLS handshake for the given connection with the given configuration
pub async fn handshake<S: IO>(ssl_acceptor: &SslAcceptor, io: S) -> Result<SslStream<S>> {
let mut stream = prepare_tls_stream(ssl_acceptor, io)?;
stream
.accept()
.await
.explain_err(TLSHandshakeFailure, |e| format!("TLS accept() failed: {e}"))?;
Ok(stream)
}
/// Perform TLS handshake for the given connection with the given configuration and callbacks
pub async fn handshake_with_callback<S: IO>(
ssl_acceptor: &SslAcceptor,
io: S,
callbacks: &TlsAcceptCallbacks,
) -> Result<SslStream<S>> {
let mut tls_stream = prepare_tls_stream(ssl_acceptor, io)?;
let done = Pin::new(&mut tls_stream)
.start_accept()
.await
.explain_err(TLSHandshakeFailure, |e| format!("TLS accept() failed: {e}"))?;
if !done {
// safety: we do hold a mut ref of tls_stream
let ssl_mut = unsafe { ext::ssl_mut(tls_stream.ssl()) };
callbacks.certificate_callback(ssl_mut).await;
Pin::new(&mut tls_stream)
.resume_accept()
.await
.explain_err(TLSHandshakeFailure, |e| format!("TLS accept() failed: {e}"))?;
}
{
let ssl = tls_stream.ssl();
if let Some(extension) = callbacks.handshake_complete_callback(ssl).await {
if let Some(digest_mut) = tls_stream.ssl_digest_mut() {
digest_mut.extension.set(extension);
}
}
}
Ok(tls_stream)
}
#[async_trait]
impl<S> Shutdown for SslStream<S>
where
S: AsyncRead + AsyncWrite + Sync + Unpin + Send,
{
async fn shutdown(&mut self) {
match <Self as AsyncWriteExt>::shutdown(self).await {
Ok(()) => {}
Err(e) => {
warn!("TLS shutdown failed, {e}");
}
}
}
}
/// Resumable TLS server side handshake.
#[async_trait]
pub trait ResumableAccept {
/// Start a resumable TLS accept handshake.
///
/// * `Ok(true)` when the handshake is finished
/// * `Ok(false)`` when the handshake is paused midway
///
/// For now, the accept will only pause when a certificate is needed.
async fn start_accept(self: Pin<&mut Self>) -> Result<bool, ssl::Error>;
/// Continue the TLS handshake
///
/// This function should be called after the certificate is provided.
async fn resume_accept(self: Pin<&mut Self>) -> Result<(), ssl::Error>;
}
#[async_trait]
impl<S: AsyncRead + AsyncWrite + Send + Unpin> ResumableAccept for SslStream<S> {
async fn start_accept(mut self: Pin<&mut Self>) -> Result<bool, ssl::Error> {
// safety: &mut self
let ssl_mut = unsafe { ext::ssl_mut(self.ssl()) };
ext::suspend_when_need_ssl_cert(ssl_mut);
let res = self.accept().await;
match res {
Ok(()) => Ok(true),
Err(e) => {
if ext::is_suspended_for_cert(&e) {
Ok(false)
} else {
Err(e)
}
}
}
}
async fn resume_accept(mut self: Pin<&mut Self>) -> Result<(), ssl::Error> {
// safety: &mut ssl
let ssl_mut = unsafe { ext::ssl_mut(self.ssl()) };
ext::unblock_ssl_cert(ssl_mut);
self.accept().await
}
}
#[cfg(test)]
mod tests {
use super::handshake_with_callback;
use crate::listeners::{TlsAccept, TlsAcceptCallbacks};
use crate::protocols::tls::SslStream;
use crate::protocols::tls::TlsRef;
use crate::tls::ext;
use crate::tls::ssl;
use async_trait::async_trait;
use std::pin::Pin;
use std::sync::Arc;
use tokio::io::DuplexStream;
async fn client_task(client: DuplexStream) {
use tokio::io::AsyncReadExt;
let ssl_context = ssl::SslContext::builder(ssl::SslMethod::tls())
.unwrap()
.build();
let mut ssl = ssl::Ssl::new(&ssl_context).unwrap();
ssl.set_hostname("pingora.org").unwrap();
ssl.set_verify(ssl::SslVerifyMode::NONE); // we don have a valid cert
let mut stream = SslStream::new(ssl, client).unwrap();
Pin::new(&mut stream).connect().await.unwrap();
let mut buf = [0; 1];
let _ = stream.read(&mut buf).await;
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_async_cert() {
let acceptor = ssl::SslAcceptor::mozilla_intermediate_v5(ssl::SslMethod::tls())
.unwrap()
.build();
struct Callback;
#[async_trait]
impl TlsAccept for Callback {
async fn certificate_callback(&self, ssl: &mut TlsRef) -> () {
assert_eq!(
ssl.servername(ssl::NameType::HOST_NAME).unwrap(),
"pingora.org"
);
let cert = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let cert_bytes = std::fs::read(cert).unwrap();
let cert = crate::tls::x509::X509::from_pem(&cert_bytes).unwrap();
let key_bytes = std::fs::read(key).unwrap();
let key = crate::tls::pkey::PKey::private_key_from_pem(&key_bytes).unwrap();
ext::ssl_use_certificate(ssl, &cert).unwrap();
ext::ssl_use_private_key(ssl, &key).unwrap();
}
}
let cb: TlsAcceptCallbacks = Box::new(Callback);
let (client, server) = tokio::io::duplex(1024);
tokio::spawn(client_task(client));
handshake_with_callback(&acceptor, server, &cb)
.await
.unwrap();
}
#[tokio::test]
#[cfg(feature = "openssl_derived")]
async fn test_handshake_complete_callback() {
use crate::tls::ssl::SslFiletype;
let cert = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let acceptor = {
let mut builder =
ssl::SslAcceptor::mozilla_intermediate_v5(ssl::SslMethod::tls()).unwrap();
builder.set_certificate_chain_file(cert).unwrap();
builder.set_private_key_file(key, SslFiletype::PEM).unwrap();
builder.build()
};
struct Sni(String);
struct Callback;
#[async_trait]
impl TlsAccept for Callback {
async fn handshake_complete_callback(
&self,
ssl: &TlsRef,
) -> Option<Arc<dyn std::any::Any + Send + Sync>> {
let sni = ssl.servername(ssl::NameType::HOST_NAME)?.to_string();
Some(Arc::new(Sni(sni)))
}
}
let cb: TlsAcceptCallbacks = Box::new(Callback);
let (client, server) = tokio::io::duplex(1024);
tokio::spawn(client_task(client));
let stream = handshake_with_callback(&acceptor, server, &cb)
.await
.unwrap();
let ssl_digest = stream.ssl_digest().unwrap();
let sni = ssl_digest.extension.get::<Sni>().unwrap();
assert_eq!(sni.0, "pingora.org");
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/rustls/stream.rs | pingora-core/src/protocols/tls/rustls/stream.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Result as IoResult;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, SystemTime};
use crate::listeners::tls::Acceptor;
use crate::protocols::raw_connect::ProxyDigest;
use crate::protocols::{tls::SslDigest, Peek, TimingDigest, UniqueIDType};
use crate::protocols::{
GetProxyDigest, GetSocketDigest, GetTimingDigest, SocketDigest, Ssl, UniqueID, ALPN,
};
use crate::utils::tls::get_organization_serial_bytes;
use pingora_error::ErrorType::{AcceptError, ConnectError, InternalError, TLSHandshakeFailure};
use pingora_error::{OkOrErr, OrErr, Result};
use pingora_rustls::TlsStream as RusTlsStream;
use pingora_rustls::{hash_certificate, NoDebug};
use pingora_rustls::{Accept, Connect, ServerName, TlsConnector};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use x509_parser::nom::AsBytes;
#[derive(Debug)]
pub struct InnerStream<T> {
pub(crate) stream: Option<RusTlsStream<T>>,
connect: NoDebug<Option<Connect<T>>>,
accept: NoDebug<Option<Accept<T>>>,
}
/// The TLS connection
#[derive(Debug)]
pub struct TlsStream<T> {
tls: InnerStream<T>,
digest: Option<Arc<SslDigest>>,
timing: TimingDigest,
}
impl<T> TlsStream<T>
where
T: AsyncRead + AsyncWrite + Unpin + Send,
{
/// Create a new TLS connection from the given `stream`
///
/// Using RustTLS the stream is only returned after the handshake.
/// The caller does therefor not need to perform [`Self::connect()`].
pub async fn from_connector(connector: &TlsConnector, domain: &str, stream: T) -> Result<Self> {
let server = ServerName::try_from(domain).or_err_with(InternalError, || {
format!("Invalid Input: Failed to parse domain: {domain}")
})?;
let tls = InnerStream::from_connector(connector, server, stream)
.await
.explain_err(TLSHandshakeFailure, |e| format!("tls stream error: {e}"))?;
Ok(TlsStream {
tls,
digest: None,
timing: Default::default(),
})
}
/// Create a new TLS connection from the given `stream`
///
/// Using RustTLS the stream is only returned after the handshake.
/// The caller does therefor not need to perform [`Self::accept()`].
pub(crate) async fn from_acceptor(acceptor: &Acceptor, stream: T) -> Result<Self> {
let tls = InnerStream::from_acceptor(acceptor, stream)
.await
.explain_err(TLSHandshakeFailure, |e| format!("tls stream error: {e}"))?;
Ok(TlsStream {
tls,
digest: None,
timing: Default::default(),
})
}
}
impl<S> GetSocketDigest for TlsStream<S>
where
S: GetSocketDigest,
{
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
self.tls.get_socket_digest()
}
fn set_socket_digest(&mut self, socket_digest: SocketDigest) {
self.tls.set_socket_digest(socket_digest)
}
}
impl<S> GetTimingDigest for TlsStream<S>
where
S: GetTimingDigest,
{
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
let mut ts_vec = self.tls.get_timing_digest();
ts_vec.push(Some(self.timing.clone()));
ts_vec
}
fn get_read_pending_time(&self) -> Duration {
self.tls.get_read_pending_time()
}
fn get_write_pending_time(&self) -> Duration {
self.tls.get_write_pending_time()
}
}
impl<S> GetProxyDigest for TlsStream<S>
where
S: GetProxyDigest,
{
fn get_proxy_digest(&self) -> Option<Arc<ProxyDigest>> {
self.tls.get_proxy_digest()
}
}
impl<T> TlsStream<T> {
pub fn ssl_digest(&self) -> Option<Arc<SslDigest>> {
self.digest.clone()
}
/// Attempts to obtain a mutable reference to the SslDigest.
/// This method returns `None` if the SslDigest is currently held by other references.
pub(crate) fn ssl_digest_mut(&mut self) -> Option<&mut SslDigest> {
Arc::get_mut(self.digest.as_mut()?)
}
}
impl<T> Deref for TlsStream<T> {
type Target = InnerStream<T>;
fn deref(&self) -> &Self::Target {
&self.tls
}
}
impl<T> DerefMut for TlsStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.tls
}
}
impl<T> TlsStream<T>
where
T: AsyncRead + AsyncWrite + Unpin + Send,
{
/// Connect to the remote TLS server as a client
pub(crate) async fn connect(&mut self) -> Result<()> {
self.tls.connect().await?;
self.timing.established_ts = SystemTime::now();
self.digest = self.tls.digest();
Ok(())
}
/// Finish the TLS handshake from client as a server
pub(crate) async fn accept(&mut self) -> Result<()> {
self.tls.accept().await?;
self.timing.established_ts = SystemTime::now();
self.digest = self.tls.digest();
Ok(())
}
}
impl<T> AsyncRead for TlsStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<IoResult<()>> {
Pin::new(&mut self.tls.stream.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<T> AsyncWrite for TlsStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<IoResult<usize>> {
Pin::new(&mut self.tls.stream.as_mut().unwrap()).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<IoResult<()>> {
Pin::new(&mut self.tls.stream.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<IoResult<()>> {
Pin::new(&mut self.tls.stream.as_mut().unwrap()).poll_shutdown(cx)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<IoResult<usize>> {
Pin::new(&mut self.tls.stream.as_mut().unwrap()).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
true
}
}
impl<T> UniqueID for TlsStream<T>
where
T: UniqueID,
{
fn id(&self) -> UniqueIDType {
self.tls.stream.as_ref().unwrap().get_ref().0.id()
}
}
impl<T> Ssl for TlsStream<T> {
fn get_ssl_digest(&self) -> Option<Arc<SslDigest>> {
self.ssl_digest()
}
fn selected_alpn_proto(&self) -> Option<ALPN> {
let st = self.tls.stream.as_ref();
if let Some(stream) = st {
let proto = stream.get_ref().1.alpn_protocol();
match proto {
None => None,
Some(raw) => ALPN::from_wire_selected(raw),
}
} else {
None
}
}
}
/// Create a new TLS connection from the given `stream`
///
/// The caller needs to perform [`Self::connect()`] or [`Self::accept()`] to perform TLS
/// handshake after.
impl<T: AsyncRead + AsyncWrite + Unpin> InnerStream<T> {
pub(crate) async fn from_connector(
connector: &TlsConnector,
server: ServerName<'_>,
stream: T,
) -> Result<Self> {
let connect = connector.connect(server.to_owned(), stream);
Ok(InnerStream {
accept: None.into(),
connect: Some(connect).into(),
stream: None,
})
}
pub(crate) async fn from_acceptor(acceptor: &Acceptor, stream: T) -> Result<Self> {
let accept = acceptor.acceptor.accept(stream);
Ok(InnerStream {
accept: Some(accept).into(),
connect: None.into(),
stream: None,
})
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + Send> InnerStream<T> {
/// Connect to the remote TLS server as a client
pub(crate) async fn connect(&mut self) -> Result<()> {
let connect = &mut (*self.connect);
let connect = connect.take().or_err(
ConnectError,
"TLS connect not available to perform handshake.",
)?;
let stream = connect
.await
.or_err(TLSHandshakeFailure, "tls connect error")?;
self.stream = Some(RusTlsStream::Client(stream));
Ok(())
}
/// Finish the TLS handshake from client as a server
/// no-op implementation within Rustls, handshake is performed during creation of stream.
pub(crate) async fn accept(&mut self) -> Result<()> {
let accept = &mut (*self.accept);
let accept = accept.take().or_err(
AcceptError,
"TLS accept not available to perform handshake.",
)?;
let stream = accept
.await
.explain_err(TLSHandshakeFailure, |e| format!("tls connect error: {e}"))?;
self.stream = Some(RusTlsStream::Server(stream));
Ok(())
}
pub(crate) fn digest(&mut self) -> Option<Arc<SslDigest>> {
Some(Arc::new(SslDigest::from_stream(&self.stream)))
}
}
impl<S> GetSocketDigest for InnerStream<S>
where
S: GetSocketDigest,
{
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
if let Some(stream) = self.stream.as_ref() {
stream.get_ref().0.get_socket_digest()
} else {
None
}
}
fn set_socket_digest(&mut self, socket_digest: SocketDigest) {
self.stream
.as_mut()
.unwrap()
.get_mut()
.0
.set_socket_digest(socket_digest)
}
}
impl<S> GetTimingDigest for InnerStream<S>
where
S: GetTimingDigest,
{
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
self.stream
.as_ref()
.unwrap()
.get_ref()
.0
.get_timing_digest()
}
}
impl<S> GetProxyDigest for InnerStream<S>
where
S: GetProxyDigest,
{
fn get_proxy_digest(&self) -> Option<Arc<ProxyDigest>> {
if let Some(stream) = self.stream.as_ref() {
stream.get_ref().0.get_proxy_digest()
} else {
None
}
}
}
impl SslDigest {
fn from_stream<T>(stream: &Option<RusTlsStream<T>>) -> Self {
let stream = stream.as_ref().unwrap();
let (_io, session) = stream.get_ref();
let protocol = session.protocol_version();
let cipher_suite = session.negotiated_cipher_suite();
let peer_certificates = session.peer_certificates();
let cipher = cipher_suite
.and_then(|suite| suite.suite().as_str())
.unwrap_or_default();
let version = protocol
.and_then(|proto| proto.as_str())
.unwrap_or_default();
let cert_digest = peer_certificates
.and_then(|certs| certs.first())
.map(|cert| hash_certificate(cert))
.unwrap_or_default();
let (organization, serial_number) = peer_certificates
.and_then(|certs| certs.first())
.map(|cert| get_organization_serial_bytes(cert.as_bytes()))
.transpose()
.ok()
.flatten()
.map(|(organization, serial)| (organization, Some(serial)))
.unwrap_or_default();
SslDigest::new(cipher, version, organization, serial_number, cert_digest)
}
}
impl<S> Peek for TlsStream<S> {}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/rustls/client.rs | pingora-core/src/protocols/tls/rustls/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Rustls TLS client specific implementation
use crate::protocols::tls::rustls::TlsStream;
use crate::protocols::IO;
use pingora_error::ErrorType::TLSHandshakeFailure;
use pingora_error::{Error, OrErr, Result};
use pingora_rustls::TlsConnector;
// Perform the TLS handshake for the given connection with the given configuration
pub async fn handshake<S: IO>(
connector: &TlsConnector,
domain: &str,
io: S,
) -> Result<TlsStream<S>> {
let mut stream = TlsStream::from_connector(connector, domain, io)
.await
.or_err(TLSHandshakeFailure, "tls stream error")?;
let handshake_result = stream.connect().await;
match handshake_result {
Ok(()) => Ok(stream),
Err(e) => {
let context = format!("TLS connect() failed: {e}, SNI: {domain}");
Error::e_explain(TLSHandshakeFailure, context)
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/rustls/mod.rs | pingora-core/src/protocols/tls/rustls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod client;
pub mod server;
mod stream;
pub use stream::*;
use crate::utils::tls::WrappedX509;
pub type CaType = [WrappedX509];
pub struct TlsRef;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/rustls/server.rs | pingora-core/src/protocols/tls/rustls/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Rustls TLS server specific implementation
use crate::listeners::TlsAcceptCallbacks;
use crate::protocols::tls::rustls::TlsStream;
use crate::protocols::tls::TlsRef;
use crate::protocols::IO;
use crate::{listeners::tls::Acceptor, protocols::Shutdown};
use async_trait::async_trait;
use log::warn;
use pingora_error::{ErrorType::*, OrErr, Result};
use std::pin::Pin;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
impl<S: AsyncRead + AsyncWrite + Send + Unpin> TlsStream<S> {
async fn start_accept(mut self: Pin<&mut Self>) -> Result<bool> {
// TODO: suspend cert callback
let res = self.accept().await;
match res {
Ok(()) => Ok(true),
Err(e) => {
if e.etype == TLSWantX509Lookup {
Ok(false)
} else {
Err(e)
}
}
}
}
async fn resume_accept(mut self: Pin<&mut Self>) -> Result<()> {
// TODO: unblock cert callback
self.accept().await
}
}
async fn prepare_tls_stream<S: IO>(acceptor: &Acceptor, io: S) -> Result<TlsStream<S>> {
TlsStream::from_acceptor(acceptor, io)
.await
.explain_err(TLSHandshakeFailure, |e| format!("tls stream error: {e}"))
}
/// Perform TLS handshake for the given connection with the given configuration
pub async fn handshake<S: IO>(acceptor: &Acceptor, io: S) -> Result<TlsStream<S>> {
let mut stream = prepare_tls_stream(acceptor, io).await?;
stream
.accept()
.await
.explain_err(TLSHandshakeFailure, |e| format!("TLS accept() failed: {e}"))?;
Ok(stream)
}
/// Perform TLS handshake for the given connection with the given configuration and callbacks
/// callbacks are currently not supported within pingora Rustls and are ignored
pub async fn handshake_with_callback<S: IO>(
acceptor: &Acceptor,
io: S,
callbacks: &TlsAcceptCallbacks,
) -> Result<TlsStream<S>> {
let mut tls_stream = prepare_tls_stream(acceptor, io).await?;
let done = Pin::new(&mut tls_stream).start_accept().await?;
if !done {
// TODO: verify if/how callback in handshake can be done using Rustls
warn!("Callacks are not supported with feature \"rustls\".");
Pin::new(&mut tls_stream)
.resume_accept()
.await
.explain_err(TLSHandshakeFailure, |e| format!("TLS accept() failed: {e}"))?;
}
{
let tls_ref = TlsRef;
if let Some(extension) = callbacks.handshake_complete_callback(&tls_ref).await {
if let Some(digest_mut) = tls_stream.ssl_digest_mut() {
digest_mut.extension.set(extension);
}
}
}
Ok(tls_stream)
}
#[async_trait]
impl<S> Shutdown for TlsStream<S>
where
S: AsyncRead + AsyncWrite + Sync + Unpin + Send,
{
async fn shutdown(&mut self) {
match <Self as AsyncWriteExt>::shutdown(self).await {
Ok(()) => {}
Err(e) => {
warn!("TLS shutdown failed, {e}");
}
}
}
}
#[ignore]
#[tokio::test]
async fn test_async_cert() {
todo!("callback support and test for Rustls")
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/s2n/stream.rs | pingora-core/src/protocols/tls/s2n/stream.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::protocols::digest::TimingDigest;
use crate::protocols::raw_connect::ProxyDigest;
use crate::protocols::tls::SslDigest;
use crate::protocols::{
GetProxyDigest, GetSocketDigest, GetTimingDigest, Peek, Shutdown, SocketDigest, Ssl, UniqueID,
UniqueIDType, ALPN,
};
use crate::tls::TlsStream as S2NTlsStream;
use crate::utils::tls::get_organization_serial_bytes;
use async_trait::async_trait;
use log::debug;
use pingora_s2n::hash_certificate;
use std::fmt::Debug;
use std::io::Result as IoResult;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, SystemTime};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
/// Stream wrapper that will automatically flush all writes depending on the value of
/// `auto_flush`. That is, it will always call `poll_flush` on every invocation of
/// `poll_write` or `poll_write_vectored`.
///
/// The underlying transport stream implementation (pingora_core::protocols::l4::stream::Stream)
/// used by Pingora buffers writes to the TCP connection. During the handshake process
/// s2n-tls does not flush writes to the TCP connection, which can lead to scenarios
/// where writes are never sent over the connection causing the handshake process to hang
/// and timeout. This wrapper ensures that all writes are flushed to the TCP connection
/// during the handshake process.
pub struct AutoFlushableStream<T: AsyncRead + AsyncWrite + Unpin> {
stream: T,
auto_flush: bool,
}
impl<T> AutoFlushableStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
pub fn new(stream: T, auto_flush: bool) -> Self {
AutoFlushableStream { stream, auto_flush }
}
pub fn set_auto_flush(&mut self, auto_flush: bool) {
self.auto_flush = auto_flush;
}
}
impl<T> AsyncRead for AutoFlushableStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<IoResult<()>> {
Pin::new(&mut self.stream).poll_read(cx, buf)
}
}
impl<T> AsyncWrite for AutoFlushableStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<IoResult<usize>> {
let write = Pin::new(&mut self.stream).poll_write(cx, buf);
if self.auto_flush {
let _ = Pin::new(&mut self.stream).poll_flush(cx);
}
write
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<IoResult<()>> {
Pin::new(&mut self.stream).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<IoResult<()>> {
Pin::new(&mut self.stream).poll_shutdown(cx)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<IoResult<usize>> {
let write = Pin::new(&mut self.stream).poll_write_vectored(cx, bufs);
if self.auto_flush {
let _ = Pin::new(&mut self.stream).poll_flush(cx);
}
write
}
fn is_write_vectored(&self) -> bool {
true
}
}
#[derive(Debug)]
pub struct TlsStream<T: AsyncRead + AsyncWrite + Unpin> {
stream: S2NTlsStream<AutoFlushableStream<T>>,
digest: Option<Arc<SslDigest>>,
pub(super) timing: TimingDigest,
}
impl<T> TlsStream<T>
where
T: AsyncRead + AsyncWrite + std::marker::Unpin,
{
pub fn from_s2n_stream(stream: S2NTlsStream<AutoFlushableStream<T>>) -> TlsStream<T> {
let mut timing: TimingDigest = Default::default();
timing.established_ts = SystemTime::now();
let digest = Some(Arc::new(SslDigest::from_stream(Some(&stream))));
TlsStream {
stream,
digest,
timing,
}
}
}
impl<T: AsyncRead + AsyncWrite + std::marker::Unpin> Deref for AutoFlushableStream<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.stream
}
}
impl<T: AsyncRead + AsyncWrite + std::marker::Unpin> DerefMut for AutoFlushableStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.stream
}
}
impl<T: AsyncRead + AsyncWrite + std::marker::Unpin> Deref for TlsStream<T> {
type Target = S2NTlsStream<AutoFlushableStream<T>>;
fn deref(&self) -> &Self::Target {
&self.stream
}
}
impl<T: AsyncRead + AsyncWrite + std::marker::Unpin> DerefMut for TlsStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.stream
}
}
impl<T: AsyncRead + AsyncWrite + std::marker::Unpin> Ssl for TlsStream<T> {
fn get_ssl_digest(&self) -> Option<Arc<SslDigest>> {
self.ssl_digest()
}
fn selected_alpn_proto(&self) -> Option<ALPN> {
let stream = self.stream.as_ref();
let proto = stream.application_protocol();
match proto {
None => None,
Some(raw) => ALPN::from_wire_selected(raw),
}
}
}
impl<T> TlsStream<T>
where
T: AsyncRead + AsyncWrite + std::marker::Unpin,
{
pub fn ssl_digest(&self) -> Option<Arc<SslDigest>> {
self.digest.clone()
}
}
impl<T> AsyncRead for TlsStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<IoResult<()>> {
debug!("poll_read");
Pin::new(&mut self.stream).poll_read(cx, buf)
}
}
impl<T> AsyncWrite for TlsStream<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<IoResult<usize>> {
Pin::new(&mut self.stream).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<IoResult<()>> {
Pin::new(&mut self.stream).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<IoResult<()>> {
Pin::new(&mut self.stream).poll_shutdown(cx)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<IoResult<usize>> {
Pin::new(&mut self.stream).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
true
}
}
impl<T> UniqueID for TlsStream<T>
where
T: UniqueID + AsyncRead + AsyncWrite + Unpin,
{
fn id(&self) -> UniqueIDType {
self.stream.get_ref().id()
}
}
impl<S> GetSocketDigest for TlsStream<S>
where
S: GetSocketDigest + AsyncRead + AsyncWrite + std::marker::Unpin,
{
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
self.stream.get_ref().get_socket_digest()
}
fn set_socket_digest(&mut self, socket_digest: SocketDigest) {
self.stream.get_mut().set_socket_digest(socket_digest)
}
}
impl<S> GetTimingDigest for TlsStream<S>
where
S: GetTimingDigest + AsyncRead + AsyncWrite + std::marker::Unpin,
{
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
let mut ts_vec = self.stream.get_ref().get_timing_digest();
ts_vec.push(Some(self.timing.clone()));
ts_vec
}
fn get_read_pending_time(&self) -> Duration {
self.stream.get_ref().get_read_pending_time()
}
fn get_write_pending_time(&self) -> Duration {
self.stream.get_ref().get_write_pending_time()
}
}
impl<S> GetProxyDigest for TlsStream<S>
where
S: GetProxyDigest + AsyncRead + AsyncWrite + std::marker::Unpin,
{
fn get_proxy_digest(&self) -> Option<Arc<ProxyDigest>> {
self.stream.get_ref().get_proxy_digest()
}
}
impl SslDigest {
fn from_stream<T: AsyncRead + AsyncWrite + Unpin>(stream: Option<&S2NTlsStream<T>>) -> Self {
let conn = stream.unwrap().as_ref();
let cipher = conn.cipher_suite().unwrap_or_default().to_string();
let version = conn
.actual_protocol_version()
.map(|v| format!("{:?}", v))
.unwrap_or_default()
.to_string();
let mut organization = None;
let mut serial_number = None;
let mut cert_digest = None;
if let Ok(cert_chain) = conn.peer_cert_chain() {
if let Some(Ok(cert)) = cert_chain.iter().next() {
if let Ok(raw_cert) = cert.der() {
if let Ok((org, serial)) = get_organization_serial_bytes(raw_cert) {
organization = org;
serial_number = Some(serial);
}
cert_digest = Some(hash_certificate(raw_cert));
}
}
}
SslDigest::new(
cipher,
version,
organization,
serial_number,
cert_digest.unwrap_or_default(),
)
}
}
impl<S: AsyncRead + AsyncWrite + std::marker::Unpin> Peek for TlsStream<S> {}
#[async_trait]
impl<S: Shutdown + AsyncRead + AsyncWrite + std::marker::Unpin + Send> Shutdown for TlsStream<S> {
async fn shutdown(&mut self) -> () {
self.get_mut().shutdown().await
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/s2n/client.rs | pingora-core/src/protocols/tls/s2n/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! S2N client specific implementation
use crate::protocols::tls::{AutoFlushableStream, S2NConnectionBuilder, TlsStream};
use crate::protocols::IO;
use pingora_error::ErrorType::TLSHandshakeFailure;
use pingora_error::{Error, Result};
use pingora_s2n::TlsConnector;
// Perform the TLS handshake for the given connection with the given configuration
pub async fn handshake<S: IO>(
connector: &TlsConnector<S2NConnectionBuilder>,
domain: &str,
stream: S,
) -> Result<TlsStream<S>> {
// Wrap incoming stream in an auto flushable stream with auto flush enabled because
// s2n-tls doesn't invoke flush after writing to the connection. This would result in
// the handshake hanging and timing on streams with write buffering.
let auto_flushable_stream = AutoFlushableStream::new(stream, true);
let mut s2n_stream = connector
.connect(domain, auto_flushable_stream)
.await
.map_err(|e| {
let context = format!("TLS connect() failed: {e}, SNI: {domain}");
Error::explain(TLSHandshakeFailure, context)
})?;
// Disable auto-flush to not interfere with write buffering going forward.
s2n_stream.get_mut().set_auto_flush(false);
Ok(TlsStream::from_s2n_stream(s2n_stream))
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/s2n/mod.rs | pingora-core/src/protocols/tls/s2n/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod client;
pub mod server;
mod stream;
use std::{
hash::{Hash, Hasher},
sync::Arc,
};
use pingora_s2n::{
Config, Connection, ConnectionBuilder, Mode, Psk as S2NPsk, PskHmac, S2NError, S2NPolicy,
};
pub use stream::*;
use crate::utils::tls::X509Pem;
pub type CaType = X509Pem;
pub type PskType = PskConfig;
#[derive(Debug)]
pub struct PskConfig {
pub keys: Vec<Psk>,
}
impl PskConfig {
pub fn new(keys: Vec<Psk>) -> Self {
Self { keys }
}
}
impl Hash for PskConfig {
fn hash<H: Hasher>(&self, state: &mut H) {
for psk in self.keys.iter() {
psk.identity.hash(state);
psk.secret.hash(state);
}
}
}
#[derive(Debug)]
pub struct Psk {
pub identity: Vec<u8>,
pub secret: Vec<u8>,
pub hmac: PskHmac,
}
impl Psk {
pub fn new(identity: String, secret: Vec<u8>, hmac: PskHmac) -> Self {
Self {
identity: identity.into_bytes(),
secret,
hmac,
}
}
}
pub struct TlsRef;
/// Custom s2n-tls connection builder. The s2n-tls-tokio crate doesn't expose
/// a higher level api to configure private shared keys on a TLS connection.
///
/// This builder will create a new connection and configure it with the appropriate
/// psk configurations based on the provided private shared keys.
/// ```
#[derive(Debug, Clone)]
pub struct S2NConnectionBuilder {
pub config: Config,
pub psk_config: Option<Arc<PskConfig>>,
pub security_policy: Option<S2NPolicy>,
}
impl ConnectionBuilder for S2NConnectionBuilder {
type Output = Connection;
fn build_connection(&self, mode: Mode) -> std::result::Result<Self::Output, S2NError> {
let mut conn = Connection::new(mode);
conn.set_config(self.config.clone())?;
if let Some(psk_config) = &self.psk_config {
for psk in psk_config.keys.iter() {
let mut psk_builder = S2NPsk::builder()?;
psk_builder.set_identity(&psk.identity)?;
psk_builder.set_hmac(PskHmac::SHA256)?;
psk_builder.set_secret(&psk.secret)?;
conn.append_psk(&psk_builder.build()?)?;
}
}
if let Some(policy) = &self.security_policy {
conn.set_security_policy(policy)?;
}
Ok(conn)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/tls/s2n/server.rs | pingora-core/src/protocols/tls/s2n/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! S2N server specific implementation
use crate::listeners::tls::Acceptor;
use crate::protocols::tls::{AutoFlushableStream, TlsStream};
use crate::protocols::IO;
use pingora_error::ErrorType::TLSHandshakeFailure;
use pingora_error::{Error, Result};
pub async fn handshake<S: IO>(acceptor: &Acceptor, stream: S) -> Result<TlsStream<S>> {
// Wrap incoming stream in an auto flushable stream with auto flush enabled because
// s2n-tls doesn't invoke flush after writing to the connection. This would result in
// the handshake hanging and timing on streams with write buffering.
let auto_flushable_stream = AutoFlushableStream::new(stream, true);
let mut s2n_stream = acceptor
.acceptor
.accept(auto_flushable_stream)
.await
.map_err(|e| {
let context = format!("TLS accept() failed: {e}");
Error::explain(TLSHandshakeFailure, context)
})?;
// Disable auto-flush to not interfere with write buffering going forward.
s2n_stream.get_mut().set_auto_flush(false);
Ok(TlsStream::from_s2n_stream(s2n_stream))
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/l4/stream.rs | pingora-core/src/protocols/l4/stream.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Transport layer connection
use async_trait::async_trait;
use futures::FutureExt;
use log::{debug, error};
use pingora_error::{ErrorType::*, OrErr, Result};
#[cfg(target_os = "linux")]
use std::io::IoSliceMut;
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
#[cfg(windows)]
use std::os::windows::io::AsRawSocket;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant, SystemTime};
#[cfg(target_os = "linux")]
use tokio::io::Interest;
use tokio::io::{self, AsyncRead, AsyncWrite, AsyncWriteExt, BufStream, ReadBuf};
use tokio::net::TcpStream;
#[cfg(unix)]
use tokio::net::UnixStream;
use crate::protocols::l4::ext::{set_tcp_keepalive, TcpKeepalive};
use crate::protocols::l4::virt;
use crate::protocols::raw_connect::ProxyDigest;
use crate::protocols::{
GetProxyDigest, GetSocketDigest, GetTimingDigest, Peek, Shutdown, SocketDigest, Ssl,
TimingDigest, UniqueID, UniqueIDType,
};
use crate::upstreams::peer::Tracer;
#[derive(Debug)]
enum RawStream {
Tcp(TcpStream),
#[cfg(unix)]
Unix(UnixStream),
Virtual(virt::VirtualSocketStream),
}
impl AsyncRead for RawStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self) {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_read(cx, buf),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_read(cx, buf),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_read(cx, buf),
}
}
}
}
impl AsyncWrite for RawStream {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self) {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_write(cx, buf),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_write(cx, buf),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_write(cx, buf),
}
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self) {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_flush(cx),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_flush(cx),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_flush(cx),
}
}
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self) {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_shutdown(cx),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_shutdown(cx),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_shutdown(cx),
}
}
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self) {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_write_vectored(cx, bufs),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_write_vectored(cx, bufs),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_write_vectored(cx, bufs),
}
}
}
fn is_write_vectored(&self) -> bool {
match self {
RawStream::Tcp(s) => s.is_write_vectored(),
#[cfg(unix)]
RawStream::Unix(s) => s.is_write_vectored(),
RawStream::Virtual(s) => s.is_write_vectored(),
}
}
}
#[cfg(unix)]
impl AsRawFd for RawStream {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
match self {
RawStream::Tcp(s) => s.as_raw_fd(),
RawStream::Unix(s) => s.as_raw_fd(),
RawStream::Virtual(_) => -1, // Virtual stream does not have a real fd
}
}
}
#[cfg(windows)]
impl AsRawSocket for RawStream {
fn as_raw_socket(&self) -> std::os::windows::io::RawSocket {
match self {
RawStream::Tcp(s) => s.as_raw_socket(),
}
}
}
#[derive(Debug)]
struct RawStreamWrapper {
pub(crate) stream: RawStream,
/// store the last rx timestamp of the stream.
pub(crate) rx_ts: Option<SystemTime>,
/// enable reading rx timestamp
#[cfg(target_os = "linux")]
pub(crate) enable_rx_ts: bool,
#[cfg(target_os = "linux")]
/// This can be reused across multiple recvmsg calls. The cmsg buffer may
/// come from old sockets created by older version of pingora and so,
/// this vector can only grow.
reusable_cmsg_space: Vec<u8>,
}
impl RawStreamWrapper {
pub fn new(stream: RawStream) -> Self {
RawStreamWrapper {
stream,
rx_ts: None,
#[cfg(target_os = "linux")]
enable_rx_ts: false,
#[cfg(target_os = "linux")]
reusable_cmsg_space: nix::cmsg_space!(nix::sys::time::TimeSpec),
}
}
#[cfg(target_os = "linux")]
pub fn enable_rx_ts(&mut self, enable_rx_ts: bool) {
self.enable_rx_ts = enable_rx_ts;
}
}
impl AsyncRead for RawStreamWrapper {
#[cfg(not(target_os = "linux"))]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
// Safety: Basic enum pin projection
unsafe {
let rs_wrapper = Pin::get_unchecked_mut(self);
match &mut rs_wrapper.stream {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_read(cx, buf),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_read(cx, buf),
RawStream::Virtual(s) => return Pin::new_unchecked(s).poll_read(cx, buf),
}
}
}
#[cfg(target_os = "linux")]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
use futures::ready;
use nix::sys::socket::{recvmsg, ControlMessageOwned, MsgFlags, SockaddrStorage};
// if we do not need rx timestamp, then use the standard path
if !self.enable_rx_ts {
// Safety: Basic enum pin projection
unsafe {
let rs_wrapper = Pin::get_unchecked_mut(self);
match &mut rs_wrapper.stream {
RawStream::Tcp(s) => return Pin::new_unchecked(s).poll_read(cx, buf),
RawStream::Unix(s) => return Pin::new_unchecked(s).poll_read(cx, buf),
RawStream::Virtual(s) => return Pin::new_unchecked(s).poll_read(cx, buf),
}
}
}
// Safety: Basic pin projection to get mutable stream
let rs_wrapper = unsafe { Pin::get_unchecked_mut(self) };
match &mut rs_wrapper.stream {
RawStream::Tcp(s) => {
loop {
ready!(s.poll_read_ready(cx))?;
// Safety: maybe uninitialized bytes will only be passed to recvmsg
let b = unsafe {
&mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>]
as *mut [u8])
};
let mut iov = [IoSliceMut::new(b)];
rs_wrapper.reusable_cmsg_space.clear();
match s.try_io(Interest::READABLE, || {
recvmsg::<SockaddrStorage>(
s.as_raw_fd(),
&mut iov,
Some(&mut rs_wrapper.reusable_cmsg_space),
MsgFlags::empty(),
)
.map_err(|errno| errno.into())
}) {
Ok(r) => {
if let Some(ControlMessageOwned::ScmTimestampsns(rtime)) = r
.cmsgs()
.find(|i| matches!(i, ControlMessageOwned::ScmTimestampsns(_)))
{
// The returned timestamp is a real (i.e. not monotonic) timestamp
// https://docs.kernel.org/networking/timestamping.html
rs_wrapper.rx_ts =
SystemTime::UNIX_EPOCH.checked_add(rtime.system.into());
}
// Safety: We trust `recvmsg` to have filled up `r.bytes` bytes in the buffer.
unsafe {
buf.assume_init(r.bytes);
}
buf.advance(r.bytes);
return Poll::Ready(Ok(()));
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
Err(e) => return Poll::Ready(Err(e)),
}
}
}
// Unix RX timestamp only works with datagram for now, so we do not care about it
RawStream::Unix(s) => unsafe { Pin::new_unchecked(s).poll_read(cx, buf) },
RawStream::Virtual(s) => unsafe { Pin::new_unchecked(s).poll_read(cx, buf) },
}
}
}
impl AsyncWrite for RawStreamWrapper {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self).stream {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_write(cx, buf),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_write(cx, buf),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_write(cx, buf),
}
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self).stream {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_flush(cx),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_flush(cx),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_flush(cx),
}
}
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self).stream {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_shutdown(cx),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_shutdown(cx),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_shutdown(cx),
}
}
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
// Safety: Basic enum pin projection
unsafe {
match &mut Pin::get_unchecked_mut(self).stream {
RawStream::Tcp(s) => Pin::new_unchecked(s).poll_write_vectored(cx, bufs),
#[cfg(unix)]
RawStream::Unix(s) => Pin::new_unchecked(s).poll_write_vectored(cx, bufs),
RawStream::Virtual(s) => Pin::new_unchecked(s).poll_write_vectored(cx, bufs),
}
}
}
fn is_write_vectored(&self) -> bool {
self.stream.is_write_vectored()
}
}
#[cfg(unix)]
impl AsRawFd for RawStreamWrapper {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
self.stream.as_raw_fd()
}
}
#[cfg(windows)]
impl AsRawSocket for RawStreamWrapper {
fn as_raw_socket(&self) -> std::os::windows::io::RawSocket {
self.stream.as_raw_socket()
}
}
// Large read buffering helps reducing syscalls with little trade-off
// Ssl layer always does "small" reads in 16k (TLS record size) so L4 read buffer helps a lot.
const BUF_READ_SIZE: usize = 64 * 1024;
// Small write buf to match MSS. Too large write buf delays real time communication.
// This buffering effectively implements something similar to Nagle's algorithm.
// The benefit is that user space can control when to flush, where Nagle's can't be controlled.
// And userspace buffering reduce both syscalls and small packets.
const BUF_WRITE_SIZE: usize = 1460;
// NOTE: with writer buffering, users need to call flush() to make sure the data is actually
// sent. Otherwise data could be stuck in the buffer forever or get lost when stream is closed.
/// A concrete type for transport layer connection + extra fields for logging
#[derive(Debug)]
pub struct Stream {
// Use `Option` to be able to swap to adjust the buffer size. Always safe to unwrap
stream: Option<BufStream<RawStreamWrapper>>,
// the data put back at the front of the read buffer, in order to replay the read
rewind_read_buf: Vec<Vec<u8>>,
buffer_write: bool,
proxy_digest: Option<Arc<ProxyDigest>>,
socket_digest: Option<Arc<SocketDigest>>,
/// When this connection is established
pub established_ts: SystemTime,
/// The distributed tracing object for this stream
pub tracer: Option<Tracer>,
read_pending_time: AccumulatedDuration,
write_pending_time: AccumulatedDuration,
/// Last rx timestamp associated with the last recvmsg call.
pub rx_ts: Option<SystemTime>,
}
impl Stream {
fn stream(&self) -> &BufStream<RawStreamWrapper> {
self.stream.as_ref().expect("stream should always be set")
}
fn stream_mut(&mut self) -> &mut BufStream<RawStreamWrapper> {
self.stream.as_mut().expect("stream should always be set")
}
/// set TCP nodelay for this connection if `self` is TCP
pub fn set_nodelay(&mut self) -> Result<()> {
match &self.stream_mut().get_mut().stream {
RawStream::Tcp(s) => {
s.set_nodelay(true)
.or_err(ConnectError, "failed to set_nodelay")?;
}
RawStream::Virtual(s) => {
s.set_socket_option(virt::VirtualSockOpt::NoDelay)
.or_err(ConnectError, "failed to set_nodelay on virtual socket")?;
}
_ => (),
}
Ok(())
}
/// set TCP keepalive settings for this connection if `self` is TCP
pub fn set_keepalive(&mut self, ka: &TcpKeepalive) -> Result<()> {
match &self.stream_mut().get_mut().stream {
RawStream::Tcp(s) => {
debug!("Setting tcp keepalive");
set_tcp_keepalive(s, ka)?;
}
RawStream::Virtual(s) => {
s.set_socket_option(virt::VirtualSockOpt::KeepAlive(ka.clone()))
.or_err(ConnectError, "failed to set_keepalive on virtual socket")?;
}
_ => (),
}
Ok(())
}
#[cfg(target_os = "linux")]
pub fn set_rx_timestamp(&mut self) -> Result<()> {
use nix::sys::socket::{setsockopt, sockopt, TimestampingFlag};
if let RawStream::Tcp(s) = &self.stream_mut().get_mut().stream {
let timestamp_options = TimestampingFlag::SOF_TIMESTAMPING_RX_SOFTWARE
| TimestampingFlag::SOF_TIMESTAMPING_SOFTWARE;
setsockopt(s.as_raw_fd(), sockopt::Timestamping, ×tamp_options)
.or_err(InternalError, "failed to set SOF_TIMESTAMPING_RX_SOFTWARE")?;
self.stream_mut().get_mut().enable_rx_ts(true);
}
Ok(())
}
#[cfg(not(target_os = "linux"))]
pub fn set_rx_timestamp(&mut self) -> io::Result<()> {
Ok(())
}
/// Put Some data back to the head of the stream to be read again
pub(crate) fn rewind(&mut self, data: &[u8]) {
if !data.is_empty() {
self.rewind_read_buf.push(data.to_vec());
}
}
/// Set the buffer of BufStream
/// It is only set later because of the malloc overhead in critical accept() path
pub(crate) fn set_buffer(&mut self) {
use std::mem;
// Since BufStream doesn't provide an API to adjust the buf directly,
// we take the raw stream out of it and put it in a new BufStream with the size we want
let stream = mem::take(&mut self.stream);
let stream =
stream.map(|s| BufStream::with_capacity(BUF_READ_SIZE, BUF_WRITE_SIZE, s.into_inner()));
let _ = mem::replace(&mut self.stream, stream);
}
}
impl From<TcpStream> for Stream {
fn from(s: TcpStream) -> Self {
Stream {
stream: Some(BufStream::with_capacity(
0,
0,
RawStreamWrapper::new(RawStream::Tcp(s)),
)),
rewind_read_buf: Vec::new(),
buffer_write: true,
established_ts: SystemTime::now(),
proxy_digest: None,
socket_digest: None,
tracer: None,
read_pending_time: AccumulatedDuration::new(),
write_pending_time: AccumulatedDuration::new(),
rx_ts: None,
}
}
}
impl From<virt::VirtualSocketStream> for Stream {
fn from(s: virt::VirtualSocketStream) -> Self {
Stream {
stream: Some(BufStream::with_capacity(
0,
0,
RawStreamWrapper::new(RawStream::Virtual(s)),
)),
rewind_read_buf: Vec::new(),
buffer_write: true,
established_ts: SystemTime::now(),
proxy_digest: None,
socket_digest: None,
tracer: None,
read_pending_time: AccumulatedDuration::new(),
write_pending_time: AccumulatedDuration::new(),
rx_ts: None,
}
}
}
#[cfg(unix)]
impl From<UnixStream> for Stream {
fn from(s: UnixStream) -> Self {
Stream {
stream: Some(BufStream::with_capacity(
0,
0,
RawStreamWrapper::new(RawStream::Unix(s)),
)),
rewind_read_buf: Vec::new(),
buffer_write: true,
established_ts: SystemTime::now(),
proxy_digest: None,
socket_digest: None,
tracer: None,
read_pending_time: AccumulatedDuration::new(),
write_pending_time: AccumulatedDuration::new(),
rx_ts: None,
}
}
}
#[cfg(unix)]
impl AsRawFd for Stream {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
self.stream().get_ref().as_raw_fd()
}
}
#[cfg(windows)]
impl AsRawSocket for Stream {
fn as_raw_socket(&self) -> std::os::windows::io::RawSocket {
self.stream().get_ref().as_raw_socket()
}
}
#[cfg(unix)]
impl UniqueID for Stream {
fn id(&self) -> UniqueIDType {
self.as_raw_fd()
}
}
#[cfg(windows)]
impl UniqueID for Stream {
fn id(&self) -> usize {
self.as_raw_socket() as usize
}
}
impl Ssl for Stream {}
#[async_trait]
impl Peek for Stream {
async fn try_peek(&mut self, buf: &mut [u8]) -> std::io::Result<bool> {
use tokio::io::AsyncReadExt;
self.read_exact(buf).await?;
// rewind regardless of what is read
self.rewind(buf);
Ok(true)
}
}
#[async_trait]
impl Shutdown for Stream {
async fn shutdown(&mut self) {
AsyncWriteExt::shutdown(self).await.unwrap_or_else(|e| {
debug!("Failed to shutdown connection: {:?}", e);
});
}
}
impl GetTimingDigest for Stream {
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
let mut digest = Vec::with_capacity(2); // expect to have both L4 stream and TLS layer
digest.push(Some(TimingDigest {
established_ts: self.established_ts,
}));
digest
}
fn get_read_pending_time(&self) -> Duration {
self.read_pending_time.total
}
fn get_write_pending_time(&self) -> Duration {
self.write_pending_time.total
}
}
impl GetProxyDigest for Stream {
fn get_proxy_digest(&self) -> Option<Arc<ProxyDigest>> {
self.proxy_digest.clone()
}
fn set_proxy_digest(&mut self, digest: ProxyDigest) {
self.proxy_digest = Some(Arc::new(digest));
}
}
impl GetSocketDigest for Stream {
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
self.socket_digest.clone()
}
fn set_socket_digest(&mut self, socket_digest: SocketDigest) {
self.socket_digest = Some(Arc::new(socket_digest))
}
}
impl Drop for Stream {
fn drop(&mut self) {
if let Some(t) = self.tracer.as_ref() {
t.0.on_disconnected();
}
/* use nodelay/local_addr function to detect socket status */
let ret = match &self.stream().get_ref().stream {
RawStream::Tcp(s) => s.nodelay().err(),
#[cfg(unix)]
RawStream::Unix(s) => s.local_addr().err(),
RawStream::Virtual(_) => {
// TODO: should this do something?
None
}
};
if let Some(e) = ret {
match e.kind() {
tokio::io::ErrorKind::Other => {
if let Some(ecode) = e.raw_os_error() {
if ecode == 9 {
// Or we could panic here
error!("Crit: socket {:?} is being double closed", self.stream);
}
}
}
_ => {
debug!("Socket is already broken {:?}", e);
}
}
} else {
// try flush the write buffer. We use now_or_never() because
// 1. Drop cannot be async
// 2. write should usually be ready, unless the buf is full.
let _ = self.flush().now_or_never();
}
debug!("Dropping socket {:?}", self.stream);
}
}
impl AsyncRead for Stream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let result = if !self.rewind_read_buf.is_empty() {
let data_to_read = self.rewind_read_buf.pop().unwrap(); // safe
let mut data_to_read = data_to_read.as_slice();
let result = Pin::new(&mut data_to_read).poll_read(cx, buf);
// return the remaining data back to the head of rewind_read_buf
if !data_to_read.is_empty() {
let remaining_buf = Vec::from(data_to_read);
self.rewind_read_buf.push(remaining_buf);
}
result
} else {
Pin::new(&mut self.stream_mut()).poll_read(cx, buf)
};
self.read_pending_time.poll_time(&result);
self.rx_ts = self.stream().get_ref().rx_ts;
result
}
}
impl AsyncWrite for Stream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let result = if self.buffer_write {
Pin::new(&mut self.stream_mut()).poll_write(cx, buf)
} else {
Pin::new(&mut self.stream_mut().get_mut()).poll_write(cx, buf)
};
self.write_pending_time.poll_write_time(&result, buf.len());
result
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
let result = Pin::new(&mut self.stream_mut()).poll_flush(cx);
self.write_pending_time.poll_time(&result);
result
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
Pin::new(&mut self.stream_mut()).poll_shutdown(cx)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let total_size = bufs.iter().fold(0, |acc, s| acc + s.len());
let result = if self.buffer_write {
Pin::new(&mut self.stream_mut()).poll_write_vectored(cx, bufs)
} else {
Pin::new(&mut self.stream_mut().get_mut()).poll_write_vectored(cx, bufs)
};
self.write_pending_time.poll_write_time(&result, total_size);
result
}
fn is_write_vectored(&self) -> bool {
if self.buffer_write {
self.stream().is_write_vectored() // it is true
} else {
self.stream().get_ref().is_write_vectored()
}
}
}
pub mod async_write_vec {
use bytes::Buf;
use futures::ready;
use std::future::Future;
use std::io::IoSlice;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io;
use tokio::io::AsyncWrite;
/*
the missing write_buf https://github.com/tokio-rs/tokio/pull/3156#issuecomment-738207409
https://github.com/tokio-rs/tokio/issues/2610
In general vectored write is lost when accessing the trait object: Box<S: AsyncWrite>
*/
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct WriteVec<'a, W, B> {
writer: &'a mut W,
buf: &'a mut B,
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct WriteVecAll<'a, W, B> {
writer: &'a mut W,
buf: &'a mut B,
}
pub trait AsyncWriteVec {
fn poll_write_vec<B: Buf>(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &mut B,
) -> Poll<io::Result<usize>>;
fn write_vec<'a, B>(&'a mut self, src: &'a mut B) -> WriteVec<'a, Self, B>
where
Self: Sized,
B: Buf,
{
WriteVec {
writer: self,
buf: src,
}
}
fn write_vec_all<'a, B>(&'a mut self, src: &'a mut B) -> WriteVecAll<'a, Self, B>
where
Self: Sized,
B: Buf,
{
WriteVecAll {
writer: self,
buf: src,
}
}
}
impl<W, B> Future for WriteVec<'_, W, B>
where
W: AsyncWriteVec + Unpin,
B: Buf,
{
type Output = io::Result<usize>;
fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<usize>> {
let me = &mut *self;
Pin::new(&mut *me.writer).poll_write_vec(ctx, me.buf)
}
}
impl<W, B> Future for WriteVecAll<'_, W, B>
where
W: AsyncWriteVec + Unpin,
B: Buf,
{
type Output = io::Result<()>;
fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<()>> {
let me = &mut *self;
while me.buf.has_remaining() {
let n = ready!(Pin::new(&mut *me.writer).poll_write_vec(ctx, me.buf))?;
if n == 0 {
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
}
Poll::Ready(Ok(()))
}
}
/* from https://github.com/tokio-rs/tokio/blob/master/tokio-util/src/lib.rs#L177 */
impl<T> AsyncWriteVec for T
where
T: AsyncWrite,
{
fn poll_write_vec<B: Buf>(
self: Pin<&mut Self>,
ctx: &mut Context,
buf: &mut B,
) -> Poll<io::Result<usize>> {
const MAX_BUFS: usize = 64;
if !buf.has_remaining() {
return Poll::Ready(Ok(0));
}
let n = if self.is_write_vectored() {
let mut slices = [IoSlice::new(&[]); MAX_BUFS];
let cnt = buf.chunks_vectored(&mut slices);
ready!(self.poll_write_vectored(ctx, &slices[..cnt]))?
} else {
ready!(self.poll_write(ctx, buf.chunk()))?
};
buf.advance(n);
Poll::Ready(Ok(n))
}
}
}
pub use async_write_vec::AsyncWriteVec;
#[derive(Debug)]
struct AccumulatedDuration {
total: Duration,
last_start: Option<Instant>,
}
impl AccumulatedDuration {
fn new() -> Self {
AccumulatedDuration {
total: Duration::ZERO,
last_start: None,
}
}
fn start(&mut self) {
if self.last_start.is_none() {
self.last_start = Some(Instant::now());
}
}
fn stop(&mut self) {
if let Some(start) = self.last_start.take() {
self.total += start.elapsed();
}
}
fn poll_write_time(&mut self, result: &Poll<io::Result<usize>>, buf_size: usize) {
match result {
Poll::Ready(Ok(n)) => {
if *n == buf_size {
self.stop();
} else {
// partial write
self.start();
}
}
Poll::Ready(Err(_)) => {
self.stop();
}
_ => self.start(),
}
}
fn poll_time(&mut self, result: &Poll<io::Result<()>>) {
match result {
Poll::Ready(_) => {
self.stop();
}
_ => self.start(),
}
}
}
#[cfg(test)]
#[cfg(target_os = "linux")]
mod tests {
use super::*;
use std::sync::Arc;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpListener;
use tokio::sync::Notify;
#[cfg(target_os = "linux")]
#[tokio::test]
async fn test_rx_timestamp() {
let message = "hello world".as_bytes();
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let notify = Arc::new(Notify::new());
let notify2 = notify.clone();
tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
notify2.notified().await;
stream.write_all(message).await.unwrap();
});
let mut stream: Stream = TcpStream::connect(addr).await.unwrap().into();
stream.set_rx_timestamp().unwrap();
// Receive the message
// setsockopt for SO_TIMESTAMPING is asynchronous so sleep a little bit
// to let kernel do the work
std::thread::sleep(Duration::from_micros(100));
notify.notify_one();
let mut buffer = vec![0u8; message.len()];
let n = stream.read(buffer.as_mut_slice()).await.unwrap();
assert_eq!(n, message.len());
assert!(stream.rx_ts.is_some());
}
#[cfg(target_os = "linux")]
#[tokio::test]
async fn test_rx_timestamp_standard_path() {
let message = "hello world".as_bytes();
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let notify = Arc::new(Notify::new());
let notify2 = notify.clone();
tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
notify2.notified().await;
stream.write_all(message).await.unwrap();
});
let mut stream: Stream = TcpStream::connect(addr).await.unwrap().into();
std::thread::sleep(Duration::from_micros(100));
notify.notify_one();
let mut buffer = vec![0u8; message.len()];
let n = stream.read(buffer.as_mut_slice()).await.unwrap();
assert_eq!(n, message.len());
assert!(stream.rx_ts.is_none());
}
#[tokio::test]
async fn test_stream_rewind() {
let message = b"hello world";
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/l4/virt.rs | pingora-core/src/protocols/l4/virt.rs | //! Provides [`VirtualSocketStream`].
use std::{
pin::Pin,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncWrite};
use super::ext::TcpKeepalive;
/// A limited set of socket options that can be set on a [`VirtualSocket`].
#[non_exhaustive]
#[derive(Debug, Clone)]
pub enum VirtualSockOpt {
NoDelay,
KeepAlive(TcpKeepalive),
}
/// A "virtual" socket that supports async read and write operations.
pub trait VirtualSocket: AsyncRead + AsyncWrite + Unpin + Send + Sync + std::fmt::Debug {
/// Set a socket option.
fn set_socket_option(&self, opt: VirtualSockOpt) -> std::io::Result<()>;
}
/// Wrapper around any type implementing [`VirtualSocket`].
#[derive(Debug)]
pub struct VirtualSocketStream {
pub(crate) socket: Box<dyn VirtualSocket>,
}
impl VirtualSocketStream {
pub fn new(socket: Box<dyn VirtualSocket>) -> Self {
Self { socket }
}
#[inline]
pub fn set_socket_option(&self, opt: VirtualSockOpt) -> std::io::Result<()> {
self.socket.set_socket_option(opt)
}
}
impl AsyncRead for VirtualSocketStream {
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
Pin::new(&mut *self.get_mut().socket).poll_read(cx, buf)
}
}
impl AsyncWrite for VirtualSocketStream {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
Pin::new(&mut *self.get_mut().socket).poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut *self.get_mut().socket).poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut *self.get_mut().socket).poll_shutdown(cx)
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use tokio::io::{AsyncReadExt, AsyncWriteExt as _};
use crate::protocols::l4::stream::Stream;
use super::*;
#[derive(Debug)]
struct StaticVirtualSocket {
content: Vec<u8>,
read_pos: usize,
write_buf: Arc<Mutex<Vec<u8>>>,
}
impl AsyncRead for StaticVirtualSocket {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
debug_assert!(self.read_pos <= self.content.len());
let remaining = self.content.len() - self.read_pos;
if remaining == 0 {
return Poll::Ready(Ok(()));
}
let to_read = std::cmp::min(remaining, buf.remaining());
buf.put_slice(&self.content[self.read_pos..self.read_pos + to_read]);
self.read_pos += to_read;
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for StaticVirtualSocket {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
// write to internal buffer
let this = self.get_mut();
this.write_buf.lock().unwrap().extend_from_slice(buf);
Poll::Ready(Ok(buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
}
impl VirtualSocket for StaticVirtualSocket {
fn set_socket_option(&self, _opt: VirtualSockOpt) -> std::io::Result<()> {
Ok(())
}
}
/// Basic test that ensures reading and writing works with a virtual socket.
//
/// Mostly just ensures that construction works and the plumbing is correct.
#[tokio::test]
async fn test_stream_virtual() {
let content = b"hello virtual world";
let write_buf = Arc::new(Mutex::new(Vec::new()));
let mut stream = Stream::from(VirtualSocketStream::new(Box::new(StaticVirtualSocket {
content: content.to_vec(),
read_pos: 0,
write_buf: write_buf.clone(),
})));
let mut buf = Vec::new();
let out = stream.read_to_end(&mut buf).await.unwrap();
assert_eq!(out, content.len());
assert_eq!(buf, content);
stream.write_all(content).await.unwrap();
assert_eq!(write_buf.lock().unwrap().as_slice(), content);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/l4/ext.rs | pingora-core/src/protocols/l4/ext.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Extensions to the regular TCP APIs
#![allow(non_camel_case_types)]
#[cfg(unix)]
use libc::socklen_t;
#[cfg(target_os = "linux")]
use libc::{c_int, c_ulonglong, c_void};
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use std::io::{self, ErrorKind};
use std::mem;
use std::net::SocketAddr;
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, RawSocket};
use std::time::Duration;
#[cfg(unix)]
use tokio::net::UnixStream;
use tokio::net::{TcpSocket, TcpStream};
use crate::connectors::l4::BindTo;
/// The (copy of) the kernel struct tcp_info returns
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct TCP_INFO {
pub tcpi_state: u8,
pub tcpi_ca_state: u8,
pub tcpi_retransmits: u8,
pub tcpi_probes: u8,
pub tcpi_backoff: u8,
pub tcpi_options: u8,
pub tcpi_snd_wscale_4_rcv_wscale_4: u8,
pub tcpi_delivery_rate_app_limited: u8,
pub tcpi_rto: u32,
pub tcpi_ato: u32,
pub tcpi_snd_mss: u32,
pub tcpi_rcv_mss: u32,
pub tcpi_unacked: u32,
pub tcpi_sacked: u32,
pub tcpi_lost: u32,
pub tcpi_retrans: u32,
pub tcpi_fackets: u32,
pub tcpi_last_data_sent: u32,
pub tcpi_last_ack_sent: u32,
pub tcpi_last_data_recv: u32,
pub tcpi_last_ack_recv: u32,
pub tcpi_pmtu: u32,
pub tcpi_rcv_ssthresh: u32,
pub tcpi_rtt: u32,
pub tcpi_rttvar: u32,
pub tcpi_snd_ssthresh: u32,
pub tcpi_snd_cwnd: u32,
pub tcpi_advmss: u32,
pub tcpi_reordering: u32,
pub tcpi_rcv_rtt: u32,
pub tcpi_rcv_space: u32,
pub tcpi_total_retrans: u32,
pub tcpi_pacing_rate: u64,
pub tcpi_max_pacing_rate: u64,
pub tcpi_bytes_acked: u64,
pub tcpi_bytes_received: u64,
pub tcpi_segs_out: u32,
pub tcpi_segs_in: u32,
pub tcpi_notsent_bytes: u32,
pub tcpi_min_rtt: u32,
pub tcpi_data_segs_in: u32,
pub tcpi_data_segs_out: u32,
pub tcpi_delivery_rate: u64,
pub tcpi_busy_time: u64,
pub tcpi_rwnd_limited: u64,
pub tcpi_sndbuf_limited: u64,
pub tcpi_delivered: u32,
pub tcpi_delivered_ce: u32,
pub tcpi_bytes_sent: u64,
pub tcpi_bytes_retrans: u64,
pub tcpi_dsack_dups: u32,
pub tcpi_reord_seen: u32,
pub tcpi_rcv_ooopack: u32,
pub tcpi_snd_wnd: u32,
pub tcpi_rcv_wnd: u32,
// and more, see include/linux/tcp.h
}
impl TCP_INFO {
/// Create a new zeroed out [`TCP_INFO`]
pub unsafe fn new() -> Self {
mem::zeroed()
}
/// Return the size of [`TCP_INFO`]
#[cfg(unix)]
pub fn len() -> socklen_t {
mem::size_of::<Self>() as socklen_t
}
/// Return the size of [`TCP_INFO`]
#[cfg(windows)]
pub fn len() -> usize {
mem::size_of::<Self>()
}
}
#[cfg(target_os = "linux")]
fn set_opt<T: Copy>(sock: c_int, opt: c_int, val: c_int, payload: T) -> io::Result<()> {
unsafe {
let payload = &payload as *const T as *const c_void;
cvt_linux_error(libc::setsockopt(
sock,
opt,
val,
payload as *const _,
mem::size_of::<T>() as socklen_t,
))?;
Ok(())
}
}
#[cfg(target_os = "linux")]
fn get_opt<T>(
sock: c_int,
opt: c_int,
val: c_int,
payload: &mut T,
size: &mut socklen_t,
) -> io::Result<()> {
unsafe {
let payload = payload as *mut T as *mut c_void;
cvt_linux_error(libc::getsockopt(sock, opt, val, payload as *mut _, size))?;
Ok(())
}
}
#[cfg(target_os = "linux")]
fn get_opt_sized<T>(sock: c_int, opt: c_int, val: c_int) -> io::Result<T> {
let mut payload = mem::MaybeUninit::zeroed();
let expected_size = mem::size_of::<T>() as socklen_t;
let mut size = expected_size;
get_opt(sock, opt, val, &mut payload, &mut size)?;
if size != expected_size {
return Err(std::io::Error::other("get_opt size mismatch"));
}
// Assume getsockopt() will set the value properly
let payload = unsafe { payload.assume_init() };
Ok(payload)
}
#[cfg(target_os = "linux")]
fn cvt_linux_error(t: i32) -> io::Result<i32> {
if t == -1 {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
#[cfg(target_os = "linux")]
fn ip_bind_addr_no_port(fd: RawFd, val: bool) -> io::Result<()> {
set_opt(
fd,
libc::IPPROTO_IP,
libc::IP_BIND_ADDRESS_NO_PORT,
val as c_int,
)
}
#[cfg(all(unix, not(target_os = "linux")))]
fn ip_bind_addr_no_port(_fd: RawFd, _val: bool) -> io::Result<()> {
Ok(())
}
/// IP_LOCAL_PORT_RANGE is only supported on Linux 6.3 and higher,
/// ip_local_port_range() is a no-op on unsupported versions.
/// See the [man page](https://man7.org/linux/man-pages/man7/ip.7.html) for more details.
#[cfg(target_os = "linux")]
fn ip_local_port_range(fd: RawFd, low: u16, high: u16) -> io::Result<()> {
const IP_LOCAL_PORT_RANGE: i32 = 51;
let range: u32 = (low as u32) | ((high as u32) << 16);
let result = set_opt(fd, libc::IPPROTO_IP, IP_LOCAL_PORT_RANGE, range as c_int);
match result {
Err(e) if e.raw_os_error() != Some(libc::ENOPROTOOPT) => Err(e),
_ => Ok(()), // no error or ENOPROTOOPT
}
}
#[cfg(all(unix, not(target_os = "linux")))]
fn ip_local_port_range(_fd: RawFd, _low: u16, _high: u16) -> io::Result<()> {
Ok(())
}
#[cfg(windows)]
fn ip_local_port_range(_fd: RawSocket, _low: u16, _high: u16) -> io::Result<()> {
Ok(())
}
#[cfg(target_os = "linux")]
fn set_so_keepalive(fd: RawFd, val: bool) -> io::Result<()> {
set_opt(fd, libc::SOL_SOCKET, libc::SO_KEEPALIVE, val as c_int)
}
#[cfg(target_os = "linux")]
fn set_so_keepalive_idle(fd: RawFd, val: Duration) -> io::Result<()> {
set_opt(
fd,
libc::IPPROTO_TCP,
libc::TCP_KEEPIDLE,
val.as_secs() as c_int, // only the seconds part of val is used
)
}
#[cfg(target_os = "linux")]
fn set_so_keepalive_user_timeout(fd: RawFd, val: Duration) -> io::Result<()> {
set_opt(
fd,
libc::IPPROTO_TCP,
libc::TCP_USER_TIMEOUT,
val.as_millis() as c_int, // only the ms part of val is used
)
}
#[cfg(target_os = "linux")]
fn set_so_keepalive_interval(fd: RawFd, val: Duration) -> io::Result<()> {
set_opt(
fd,
libc::IPPROTO_TCP,
libc::TCP_KEEPINTVL,
val.as_secs() as c_int, // only the seconds part of val is used
)
}
#[cfg(target_os = "linux")]
fn set_so_keepalive_count(fd: RawFd, val: usize) -> io::Result<()> {
set_opt(fd, libc::IPPROTO_TCP, libc::TCP_KEEPCNT, val as c_int)
}
#[cfg(target_os = "linux")]
fn set_keepalive(fd: RawFd, ka: &TcpKeepalive) -> io::Result<()> {
set_so_keepalive(fd, true)?;
set_so_keepalive_idle(fd, ka.idle)?;
set_so_keepalive_interval(fd, ka.interval)?;
set_so_keepalive_count(fd, ka.count)?;
set_so_keepalive_user_timeout(fd, ka.user_timeout)
}
#[cfg(all(unix, not(target_os = "linux")))]
fn set_keepalive(_fd: RawFd, _ka: &TcpKeepalive) -> io::Result<()> {
Ok(())
}
#[cfg(windows)]
fn set_keepalive(_sock: RawSocket, _ka: &TcpKeepalive) -> io::Result<()> {
Ok(())
}
/// Get the kernel TCP_INFO for the given FD.
#[cfg(target_os = "linux")]
pub fn get_tcp_info(fd: RawFd) -> io::Result<TCP_INFO> {
get_opt_sized(fd, libc::IPPROTO_TCP, libc::TCP_INFO)
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn get_tcp_info(_fd: RawFd) -> io::Result<TCP_INFO> {
Ok(unsafe { TCP_INFO::new() })
}
#[cfg(windows)]
pub fn get_tcp_info(_fd: RawSocket) -> io::Result<TCP_INFO> {
Ok(unsafe { TCP_INFO::new() })
}
/// Set the TCP receive buffer size. See SO_RCVBUF.
#[cfg(target_os = "linux")]
pub fn set_recv_buf(fd: RawFd, val: usize) -> Result<()> {
set_opt(fd, libc::SOL_SOCKET, libc::SO_RCVBUF, val as c_int)
.or_err(ConnectError, "failed to set SO_RCVBUF")
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn set_recv_buf(_fd: RawFd, _: usize) -> Result<()> {
Ok(())
}
#[cfg(windows)]
pub fn set_recv_buf(_sock: RawSocket, _: usize) -> Result<()> {
Ok(())
}
#[cfg(target_os = "linux")]
pub fn get_recv_buf(fd: RawFd) -> io::Result<usize> {
get_opt_sized::<c_int>(fd, libc::SOL_SOCKET, libc::SO_RCVBUF).map(|v| v as usize)
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn get_recv_buf(_fd: RawFd) -> io::Result<usize> {
Ok(0)
}
#[cfg(windows)]
pub fn get_recv_buf(_sock: RawSocket) -> io::Result<usize> {
Ok(0)
}
#[cfg(target_os = "linux")]
pub fn get_snd_buf(fd: RawFd) -> io::Result<usize> {
get_opt_sized::<c_int>(fd, libc::SOL_SOCKET, libc::SO_SNDBUF).map(|v| v as usize)
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn get_snd_buf(_fd: RawFd) -> io::Result<usize> {
Ok(0)
}
#[cfg(windows)]
pub fn get_snd_buf(_sock: RawSocket) -> io::Result<usize> {
Ok(0)
}
/// Enable client side TCP fast open.
#[cfg(target_os = "linux")]
pub fn set_tcp_fastopen_connect(fd: RawFd) -> Result<()> {
set_opt(
fd,
libc::IPPROTO_TCP,
libc::TCP_FASTOPEN_CONNECT,
1 as c_int,
)
.or_err(ConnectError, "failed to set TCP_FASTOPEN_CONNECT")
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn set_tcp_fastopen_connect(_fd: RawFd) -> Result<()> {
Ok(())
}
#[cfg(windows)]
pub fn set_tcp_fastopen_connect(_sock: RawSocket) -> Result<()> {
Ok(())
}
/// Enable server side TCP fast open.
#[cfg(target_os = "linux")]
pub fn set_tcp_fastopen_backlog(fd: RawFd, backlog: usize) -> Result<()> {
set_opt(fd, libc::IPPROTO_TCP, libc::TCP_FASTOPEN, backlog as c_int)
.or_err(ConnectError, "failed to set TCP_FASTOPEN")
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn set_tcp_fastopen_backlog(_fd: RawFd, _backlog: usize) -> Result<()> {
Ok(())
}
#[cfg(windows)]
pub fn set_tcp_fastopen_backlog(_sock: RawSocket, _backlog: usize) -> Result<()> {
Ok(())
}
#[cfg(target_os = "linux")]
pub fn set_dscp(fd: RawFd, value: u8) -> Result<()> {
use super::socket::SocketAddr;
use pingora_error::OkOrErr;
let sock = SocketAddr::from_raw_fd(fd, false);
let addr = sock
.as_ref()
.and_then(|s| s.as_inet())
.or_err(SocketError, "failed to set dscp, invalid IP socket")?;
if addr.is_ipv6() {
set_opt(fd, libc::IPPROTO_IPV6, libc::IPV6_TCLASS, value as c_int)
.or_err(SocketError, "failed to set dscp (IPV6_TCLASS)")
} else {
set_opt(fd, libc::IPPROTO_IP, libc::IP_TOS, value as c_int)
.or_err(SocketError, "failed to set dscp (IP_TOS)")
}
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn set_dscp(_fd: RawFd, _value: u8) -> Result<()> {
Ok(())
}
#[cfg(windows)]
pub fn set_dscp(_sock: RawSocket, _value: u8) -> Result<()> {
Ok(())
}
#[cfg(target_os = "linux")]
pub fn get_socket_cookie(fd: RawFd) -> io::Result<u64> {
get_opt_sized::<c_ulonglong>(fd, libc::SOL_SOCKET, libc::SO_COOKIE)
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn get_socket_cookie(_fd: RawFd) -> io::Result<u64> {
Ok(0) // SO_COOKIE is a Linux concept
}
#[cfg(target_os = "linux")]
pub fn get_original_dest(fd: RawFd) -> Result<Option<SocketAddr>> {
use super::socket;
use pingora_error::OkOrErr;
use std::net::{SocketAddrV4, SocketAddrV6};
let sock = socket::SocketAddr::from_raw_fd(fd, false);
let addr = sock
.as_ref()
.and_then(|s| s.as_inet())
.or_err(SocketError, "failed get original dest, invalid IP socket")?;
let dest = if addr.is_ipv4() {
get_opt_sized::<libc::sockaddr_in>(fd, libc::SOL_IP, libc::SO_ORIGINAL_DST).map(|addr| {
SocketAddr::V4(SocketAddrV4::new(
u32::from_be(addr.sin_addr.s_addr).into(),
u16::from_be(addr.sin_port),
))
})
} else {
get_opt_sized::<libc::sockaddr_in6>(fd, libc::SOL_IPV6, libc::IP6T_SO_ORIGINAL_DST).map(
|addr| {
SocketAddr::V6(SocketAddrV6::new(
addr.sin6_addr.s6_addr.into(),
u16::from_be(addr.sin6_port),
addr.sin6_flowinfo,
addr.sin6_scope_id,
))
},
)
};
dest.or_err(SocketError, "failed to get original dest")
.map(Some)
}
#[cfg(all(unix, not(target_os = "linux")))]
pub fn get_original_dest(_fd: RawFd) -> Result<Option<SocketAddr>> {
Ok(None)
}
#[cfg(windows)]
pub fn get_original_dest(_sock: RawSocket) -> Result<Option<SocketAddr>> {
Ok(None)
}
/// connect() to the given address while optionally binding to the specific source address and port range.
///
/// The `set_socket` callback can be used to tune the socket before `connect()` is called.
///
/// If a [`BindTo`] is set with a port range and fallback setting enabled this function will retry
/// on EADDRNOTAVAIL ignoring the port range.
///
/// `IP_BIND_ADDRESS_NO_PORT` is used.
/// `IP_LOCAL_PORT_RANGE` is used if a port range is set on [`BindTo`].
pub(crate) async fn connect_with<F: FnOnce(&TcpSocket) -> Result<()> + Clone>(
addr: &SocketAddr,
bind_to: Option<&BindTo>,
set_socket: F,
) -> Result<TcpStream> {
if bind_to.as_ref().is_some_and(|b| b.will_fallback()) {
// if we see an EADDRNOTAVAIL error clear the port range and try again
let connect_result = inner_connect_with(addr, bind_to, set_socket.clone()).await;
if let Err(e) = connect_result.as_ref() {
if matches!(e.etype(), BindError) {
let mut new_bind_to = BindTo::default();
new_bind_to.addr = bind_to.as_ref().and_then(|b| b.addr);
// reset the port range
new_bind_to.set_port_range(None).unwrap();
return inner_connect_with(addr, Some(&new_bind_to), set_socket).await;
}
}
connect_result
} else {
// not retryable
inner_connect_with(addr, bind_to, set_socket).await
}
}
async fn inner_connect_with<F: FnOnce(&TcpSocket) -> Result<()>>(
addr: &SocketAddr,
bind_to: Option<&BindTo>,
set_socket: F,
) -> Result<TcpStream> {
let socket = if addr.is_ipv4() {
TcpSocket::new_v4()
} else {
TcpSocket::new_v6()
}
.or_err(SocketError, "failed to create socket")?;
#[cfg(unix)]
{
ip_bind_addr_no_port(socket.as_raw_fd(), true).or_err(
SocketError,
"failed to set socket opts IP_BIND_ADDRESS_NO_PORT",
)?;
if let Some(bind_to) = bind_to {
if let Some((low, high)) = bind_to.port_range() {
ip_local_port_range(socket.as_raw_fd(), low, high)
.or_err(SocketError, "failed to set socket opts IP_LOCAL_PORT_RANGE")?;
}
if let Some(baddr) = bind_to.addr {
socket
.bind(baddr)
.or_err_with(BindError, || format!("failed to bind to socket {}", baddr))?;
}
}
}
#[cfg(windows)]
if let Some(bind_to) = bind_to {
if let Some(baddr) = bind_to.addr {
socket
.bind(baddr)
.or_err_with(BindError, || format!("failed to bind to socket {}", baddr))?;
};
};
// TODO: add support for bind on other platforms
set_socket(&socket)?;
socket
.connect(*addr)
.await
.map_err(|e| wrap_os_connect_error(e, format!("Fail to connect to {}", *addr)))
}
/// connect() to the given address while optionally binding to the specific source address.
///
/// `IP_BIND_ADDRESS_NO_PORT` is used
/// `IP_LOCAL_PORT_RANGE` is used if a port range is set on [`BindTo`].
pub async fn connect(addr: &SocketAddr, bind_to: Option<&BindTo>) -> Result<TcpStream> {
connect_with(addr, bind_to, |_| Ok(())).await
}
/// connect() to the given Unix domain socket
#[cfg(unix)]
pub async fn connect_uds(path: &std::path::Path) -> Result<UnixStream> {
UnixStream::connect(path)
.await
.map_err(|e| wrap_os_connect_error(e, format!("Fail to connect to {}", path.display())))
}
fn wrap_os_connect_error(e: std::io::Error, context: String) -> Box<Error> {
match e.kind() {
ErrorKind::ConnectionRefused => Error::because(ConnectRefused, context, e),
ErrorKind::TimedOut => Error::because(ConnectTimedout, context, e),
ErrorKind::AddrNotAvailable => Error::because(BindError, context, e),
ErrorKind::PermissionDenied | ErrorKind::AddrInUse => {
Error::because(InternalError, context, e)
}
_ => match e.raw_os_error() {
Some(libc::ENETUNREACH | libc::EHOSTUNREACH) => {
Error::because(ConnectNoRoute, context, e)
}
_ => Error::because(ConnectError, context, e),
},
}
}
/// The configuration for TCP keepalive
#[derive(Clone, Debug)]
pub struct TcpKeepalive {
/// The time a connection needs to be idle before TCP begins sending out keep-alive probes.
pub idle: Duration,
/// The number of seconds between TCP keep-alive probes.
pub interval: Duration,
/// The maximum number of TCP keep-alive probes to send before giving up and killing the connection
pub count: usize,
/// the maximum amount of time in milliseconds that transmitted data may
/// remain unacknowledged, or buffered data may remain untransmitted (due to
/// zero window size) before TCP will forcibly close the corresponding
/// connection and return ETIMEDOUT. If the value is specified as 0 (the
/// default), TCP will use the system default.
#[cfg(target_os = "linux")]
pub user_timeout: Duration,
}
impl std::fmt::Display for TcpKeepalive {
#[cfg(target_os = "linux")]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:?}/{:?}/{}/{:?}",
self.idle, self.interval, self.count, self.user_timeout
)
}
#[cfg(not(target_os = "linux"))]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}/{:?}/{}", self.idle, self.interval, self.count)
}
}
/// Apply the given TCP keepalive settings to the given connection
pub fn set_tcp_keepalive(stream: &TcpStream, ka: &TcpKeepalive) -> Result<()> {
#[cfg(unix)]
let raw = stream.as_raw_fd();
#[cfg(windows)]
let raw = stream.as_raw_socket();
// TODO: check localhost or if keepalive is already set
set_keepalive(raw, ka).or_err(ConnectError, "failed to set keepalive")
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_set_recv_buf() {
use tokio::net::TcpSocket;
let socket = TcpSocket::new_v4().unwrap();
#[cfg(unix)]
set_recv_buf(socket.as_raw_fd(), 102400).unwrap();
#[cfg(windows)]
set_recv_buf(socket.as_raw_socket(), 102400).unwrap();
#[cfg(target_os = "linux")]
{
// kernel doubles whatever is set
assert_eq!(get_recv_buf(socket.as_raw_fd()).unwrap(), 102400 * 2);
}
}
#[cfg(target_os = "linux")]
#[ignore] // this test requires the Linux system to have net.ipv4.tcp_fastopen set
#[tokio::test]
async fn test_set_fast_open() {
use std::time::Instant;
// connect once to make sure their is a SYN cookie to use for TFO
connect_with(&"1.1.1.1:80".parse().unwrap(), None, |socket| {
set_tcp_fastopen_connect(socket.as_raw_fd())
})
.await
.unwrap();
let start = Instant::now();
connect_with(&"1.1.1.1:80".parse().unwrap(), None, |socket| {
set_tcp_fastopen_connect(socket.as_raw_fd())
})
.await
.unwrap();
let connection_time = start.elapsed();
// connect() return right away as the SYN goes out only when the first write() is called.
assert!(connection_time.as_millis() < 4);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/l4/listener.rs | pingora-core/src/protocols/l4/listener.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Listeners
use std::io;
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
#[cfg(windows)]
use std::os::windows::io::AsRawSocket;
use tokio::net::TcpListener;
#[cfg(unix)]
use tokio::net::UnixListener;
use crate::protocols::digest::{GetSocketDigest, SocketDigest};
use crate::protocols::l4::stream::Stream;
/// The type for generic listener for both TCP and Unix domain socket
#[derive(Debug)]
pub enum Listener {
Tcp(TcpListener),
#[cfg(unix)]
Unix(UnixListener),
}
impl From<TcpListener> for Listener {
fn from(s: TcpListener) -> Self {
Self::Tcp(s)
}
}
#[cfg(unix)]
impl From<UnixListener> for Listener {
fn from(s: UnixListener) -> Self {
Self::Unix(s)
}
}
#[cfg(unix)]
impl AsRawFd for Listener {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
match &self {
Self::Tcp(l) => l.as_raw_fd(),
Self::Unix(l) => l.as_raw_fd(),
}
}
}
#[cfg(windows)]
impl AsRawSocket for Listener {
fn as_raw_socket(&self) -> std::os::windows::io::RawSocket {
match &self {
Self::Tcp(l) => l.as_raw_socket(),
}
}
}
impl Listener {
/// Accept a connection from the listening endpoint
pub async fn accept(&self) -> io::Result<Stream> {
match &self {
Self::Tcp(l) => l.accept().await.map(|(stream, peer_addr)| {
let mut s: Stream = stream.into();
#[cfg(unix)]
let digest = SocketDigest::from_raw_fd(s.as_raw_fd());
#[cfg(windows)]
let digest = SocketDigest::from_raw_socket(s.as_raw_socket());
digest
.peer_addr
.set(Some(peer_addr.into()))
.expect("newly created OnceCell must be empty");
s.set_socket_digest(digest);
// TODO: if listening on a specific bind address, we could save
// an extra syscall looking up the local_addr later if we can pass
// and init it in the socket digest here
s
}),
#[cfg(unix)]
Self::Unix(l) => l.accept().await.map(|(stream, peer_addr)| {
let mut s: Stream = stream.into();
let digest = SocketDigest::from_raw_fd(s.as_raw_fd());
// note: if unnamed/abstract UDS, it will be `None`
// (see TryFrom<tokio::net::unix::SocketAddr>)
let addr = peer_addr.try_into().ok();
digest
.peer_addr
.set(addr)
.expect("newly created OnceCell must be empty");
s.set_socket_digest(digest);
s
}),
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/l4/mod.rs | pingora-core/src/protocols/l4/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Transport layer protocol implementation
pub mod ext;
pub mod listener;
pub mod socket;
pub mod stream;
pub mod virt;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/l4/socket.rs | pingora-core/src/protocols/l4/socket.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic socket type
use crate::{Error, OrErr};
use log::warn;
#[cfg(unix)]
use nix::sys::socket::{getpeername, getsockname, SockaddrStorage};
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::net::SocketAddr as StdSockAddr;
#[cfg(unix)]
use std::os::unix::net::SocketAddr as StdUnixSockAddr;
#[cfg(unix)]
use tokio::net::unix::SocketAddr as TokioUnixSockAddr;
/// [`SocketAddr`] is a storage type that contains either an Internet (IP address)
/// socket address or a Unix domain socket address.
#[derive(Debug, Clone)]
pub enum SocketAddr {
Inet(StdSockAddr),
#[cfg(unix)]
Unix(StdUnixSockAddr),
}
impl SocketAddr {
/// Get a reference to the IP socket if it is one
pub fn as_inet(&self) -> Option<&StdSockAddr> {
if let SocketAddr::Inet(addr) = self {
Some(addr)
} else {
None
}
}
/// Get a reference to the Unix domain socket if it is one
#[cfg(unix)]
pub fn as_unix(&self) -> Option<&StdUnixSockAddr> {
if let SocketAddr::Unix(addr) = self {
Some(addr)
} else {
None
}
}
/// Set the port if the address is an IP socket.
pub fn set_port(&mut self, port: u16) {
if let SocketAddr::Inet(addr) = self {
addr.set_port(port)
}
}
#[cfg(unix)]
fn from_sockaddr_storage(sock: &SockaddrStorage) -> Option<SocketAddr> {
if let Some(v4) = sock.as_sockaddr_in() {
return Some(SocketAddr::Inet(StdSockAddr::V4(
std::net::SocketAddrV4::new(v4.ip().into(), v4.port()),
)));
} else if let Some(v6) = sock.as_sockaddr_in6() {
return Some(SocketAddr::Inet(StdSockAddr::V6(
std::net::SocketAddrV6::new(v6.ip(), v6.port(), v6.flowinfo(), v6.scope_id()),
)));
}
// TODO: don't set abstract / unnamed for now,
// for parity with how we treat these types in TryFrom<TokioUnixSockAddr>
Some(SocketAddr::Unix(
sock.as_unix_addr()
.map(|addr| addr.path().map(StdUnixSockAddr::from_pathname))??
.ok()?,
))
}
#[cfg(unix)]
pub fn from_raw_fd(fd: std::os::unix::io::RawFd, peer_addr: bool) -> Option<SocketAddr> {
let sockaddr_storage = if peer_addr {
getpeername(fd)
} else {
getsockname(fd)
};
match sockaddr_storage {
Ok(sockaddr) => Self::from_sockaddr_storage(&sockaddr),
// could be errors such as EBADF, i.e. fd is no longer a valid socket
// fail open in this case
Err(_e) => None,
}
}
#[cfg(windows)]
pub fn from_raw_socket(
sock: std::os::windows::io::RawSocket,
is_peer_addr: bool,
) -> Option<SocketAddr> {
use crate::protocols::windows::{local_addr, peer_addr};
if is_peer_addr {
peer_addr(sock)
} else {
local_addr(sock)
}
.map(|s| s.into())
.ok()
}
}
impl std::fmt::Display for SocketAddr {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
SocketAddr::Inet(addr) => write!(f, "{addr}"),
#[cfg(unix)]
SocketAddr::Unix(addr) => {
if let Some(path) = addr.as_pathname() {
write!(f, "{}", path.display())
} else {
write!(f, "{addr:?}")
}
}
}
}
}
impl Hash for SocketAddr {
fn hash<H: Hasher>(&self, state: &mut H) {
match self {
Self::Inet(sockaddr) => sockaddr.hash(state),
#[cfg(unix)]
Self::Unix(sockaddr) => {
if let Some(path) = sockaddr.as_pathname() {
// use the underlying path as the hash
path.hash(state);
} else {
// unnamed or abstract UDS
// abstract UDS name not yet exposed by std API
// panic for now, we can decide on the right way to hash them later
panic!("Unnamed and abstract UDS types not yet supported for hashing")
}
}
}
}
}
impl PartialEq for SocketAddr {
fn eq(&self, other: &Self) -> bool {
match self {
Self::Inet(addr) => Some(addr) == other.as_inet(),
#[cfg(unix)]
Self::Unix(addr) => {
let path = addr.as_pathname();
// can only compare UDS with path, assume false on all unnamed UDS
path.is_some() && path == other.as_unix().and_then(|addr| addr.as_pathname())
}
}
}
}
impl PartialOrd for SocketAddr {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for SocketAddr {
fn cmp(&self, other: &Self) -> Ordering {
match self {
Self::Inet(addr) => {
if let Some(o) = other.as_inet() {
addr.cmp(o)
} else {
// always make Inet < Unix "smallest for variants at the top"
Ordering::Less
}
}
#[cfg(unix)]
Self::Unix(addr) => {
if let Some(o) = other.as_unix() {
// NOTE: unnamed UDS are consider the same
addr.as_pathname().cmp(&o.as_pathname())
} else {
// always make Inet < Unix "smallest for variants at the top"
Ordering::Greater
}
}
}
}
}
impl Eq for SocketAddr {}
impl std::str::FromStr for SocketAddr {
type Err = Box<Error>;
// This is very basic parsing logic, it might treat invalid IP:PORT str as UDS path
#[cfg(unix)]
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.starts_with("unix:") {
// format unix:/tmp/server.socket
let path = s.trim_start_matches("unix:");
let uds_socket = StdUnixSockAddr::from_pathname(path)
.or_err(crate::BindError, "invalid UDS path")?;
Ok(SocketAddr::Unix(uds_socket))
} else {
match StdSockAddr::from_str(s) {
Ok(addr) => Ok(SocketAddr::Inet(addr)),
Err(_) => {
// Try to parse as UDS for backward compatibility
let uds_socket = StdUnixSockAddr::from_pathname(s)
.or_err(crate::BindError, "invalid UDS path")?;
warn!("Raw Unix domain socket path support will be deprecated, add 'unix:' prefix instead");
Ok(SocketAddr::Unix(uds_socket))
}
}
}
}
#[cfg(windows)]
fn from_str(s: &str) -> Result<Self, Self::Err> {
let addr = StdSockAddr::from_str(s).or_err(crate::BindError, "invalid socket addr")?;
Ok(SocketAddr::Inet(addr))
}
}
impl std::net::ToSocketAddrs for SocketAddr {
type Iter = std::iter::Once<StdSockAddr>;
// Error if UDS addr
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
if let Some(inet) = self.as_inet() {
Ok(std::iter::once(*inet))
} else {
Err(std::io::Error::other(
"UDS socket cannot be used as inet socket",
))
}
}
}
impl From<StdSockAddr> for SocketAddr {
fn from(sockaddr: StdSockAddr) -> Self {
SocketAddr::Inet(sockaddr)
}
}
#[cfg(unix)]
impl From<StdUnixSockAddr> for SocketAddr {
fn from(sockaddr: StdUnixSockAddr) -> Self {
SocketAddr::Unix(sockaddr)
}
}
// TODO: ideally mio/tokio will start using the std version of the unix `SocketAddr`
// so we can avoid a fallible conversion
// https://github.com/tokio-rs/mio/issues/1527
#[cfg(unix)]
impl TryFrom<TokioUnixSockAddr> for SocketAddr {
type Error = String;
fn try_from(value: TokioUnixSockAddr) -> Result<Self, Self::Error> {
if let Some(Ok(addr)) = value.as_pathname().map(StdUnixSockAddr::from_pathname) {
Ok(addr.into())
} else {
// may be unnamed/abstract UDS
Err(format!("could not convert {value:?} to SocketAddr"))
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn parse_ip() {
let ip: SocketAddr = "127.0.0.1:80".parse().unwrap();
assert!(ip.as_inet().is_some());
}
#[cfg(unix)]
#[test]
fn parse_uds() {
let uds: SocketAddr = "/tmp/my.sock".parse().unwrap();
assert!(uds.as_unix().is_some());
}
#[cfg(unix)]
#[test]
fn parse_uds_with_prefix() {
let uds: SocketAddr = "unix:/tmp/my.sock".parse().unwrap();
assert!(uds.as_unix().is_some());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/date.rs | pingora-core/src/protocols/http/date.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use chrono::DateTime;
use http::header::HeaderValue;
use std::cell::RefCell;
use std::time::{Duration, SystemTime};
fn to_date_string(epoch_sec: i64) -> String {
let dt = DateTime::from_timestamp(epoch_sec, 0).unwrap();
dt.format("%a, %d %b %Y %H:%M:%S GMT").to_string()
}
struct CacheableDate {
h1_date: HeaderValue,
epoch: Duration,
}
impl CacheableDate {
pub fn new() -> Self {
let d = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap();
CacheableDate {
h1_date: HeaderValue::from_str(&to_date_string(d.as_secs() as i64)).unwrap(),
epoch: d,
}
}
pub fn update(&mut self, d_now: Duration) {
if d_now.as_secs() != self.epoch.as_secs() {
self.epoch = d_now;
self.h1_date = HeaderValue::from_str(&to_date_string(d_now.as_secs() as i64)).unwrap();
}
}
pub fn get_date(&mut self) -> HeaderValue {
let d = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap();
self.update(d);
self.h1_date.clone()
}
}
thread_local! {
static CACHED_DATE: RefCell<CacheableDate>
= RefCell::new(CacheableDate::new());
}
pub fn get_cached_date() -> HeaderValue {
CACHED_DATE.with(|cache_date| (*cache_date.borrow_mut()).get_date())
}
#[cfg(test)]
mod test {
use super::*;
fn now_date_header() -> HeaderValue {
HeaderValue::from_str(&to_date_string(
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
))
.unwrap()
}
#[test]
fn test_date_string() {
let date_str = to_date_string(1);
assert_eq!("Thu, 01 Jan 1970 00:00:01 GMT", date_str);
}
#[test]
fn test_date_cached() {
assert_eq!(get_cached_date(), now_date_header());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/body_buffer.rs | pingora-core/src/protocols/http/body_buffer.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::{Bytes, BytesMut};
/// A buffer with size limit. When the total amount of data written to the buffer is below the limit
/// all the data will be held in the buffer. Otherwise, the buffer will report to be truncated.
pub struct FixedBuffer {
buffer: BytesMut,
capacity: usize,
truncated: bool,
}
impl FixedBuffer {
pub fn new(capacity: usize) -> Self {
FixedBuffer {
buffer: BytesMut::new(),
capacity,
truncated: false,
}
}
// TODO: maybe store a Vec of Bytes for zero-copy
pub fn write_to_buffer(&mut self, data: &Bytes) {
if !self.truncated && (self.buffer.len() + data.len() <= self.capacity) {
self.buffer.extend_from_slice(data);
} else {
// TODO: clear data because the data held here is useless anyway?
self.truncated = true;
}
}
pub fn clear(&mut self) {
self.truncated = false;
self.buffer.clear();
}
pub fn is_empty(&self) -> bool {
self.buffer.len() == 0
}
pub fn is_truncated(&self) -> bool {
self.truncated
}
pub fn get_buffer(&self) -> Option<Bytes> {
// TODO: return None if truncated?
if !self.is_empty() {
Some(self.buffer.clone().freeze())
} else {
None
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/client.rs | pingora-core/src/protocols/http/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::Bytes;
use pingora_error::Result;
use pingora_http::{RequestHeader, ResponseHeader};
use std::time::Duration;
use super::v2::client::Http2Session;
use super::{custom::client::Session, v1::client::HttpSession as Http1Session};
use crate::protocols::{Digest, SocketAddr, Stream};
/// A type for Http client session. It can be either an Http1 connection or an Http2 stream.
pub enum HttpSession<S = ()> {
H1(Http1Session),
H2(Http2Session),
Custom(S),
}
impl<S: Session> HttpSession<S> {
pub fn as_http1(&self) -> Option<&Http1Session> {
match self {
Self::H1(s) => Some(s),
Self::H2(_) => None,
Self::Custom(_) => None,
}
}
pub fn as_http2(&self) -> Option<&Http2Session> {
match self {
Self::H1(_) => None,
Self::H2(s) => Some(s),
Self::Custom(_) => None,
}
}
pub fn as_custom(&self) -> Option<&S> {
match self {
Self::H1(_) => None,
Self::H2(_) => None,
Self::Custom(c) => Some(c),
}
}
pub fn as_custom_mut(&mut self) -> Option<&mut S> {
match self {
Self::H1(_) => None,
Self::H2(_) => None,
Self::Custom(c) => Some(c),
}
}
/// Write the request header to the server
/// After the request header is sent. The caller can either start reading the response or
/// sending request body if any.
pub async fn write_request_header(&mut self, req: Box<RequestHeader>) -> Result<()> {
match self {
HttpSession::H1(h1) => {
h1.write_request_header(req).await?;
Ok(())
}
HttpSession::H2(h2) => h2.write_request_header(req, false),
HttpSession::Custom(c) => c.write_request_header(req, false).await,
}
}
/// Write a chunk of the request body.
pub async fn write_request_body(&mut self, data: Bytes, end: bool) -> Result<()> {
match self {
HttpSession::H1(h1) => {
// TODO: maybe h1 should also have the concept of `end`
h1.write_body(&data).await?;
Ok(())
}
HttpSession::H2(h2) => h2.write_request_body(data, end).await,
HttpSession::Custom(c) => c.write_request_body(data, end).await,
}
}
/// Signal that the request body has ended
pub async fn finish_request_body(&mut self) -> Result<()> {
match self {
HttpSession::H1(h1) => {
h1.finish_body().await?;
Ok(())
}
HttpSession::H2(h2) => h2.finish_request_body(),
HttpSession::Custom(c) => c.finish_request_body().await,
}
}
/// Set the read timeout for reading header and body.
///
/// The timeout is per read operation, not on the overall time reading the entire response
pub fn set_read_timeout(&mut self, timeout: Option<Duration>) {
match self {
HttpSession::H1(h1) => h1.read_timeout = timeout,
HttpSession::H2(h2) => h2.read_timeout = timeout,
HttpSession::Custom(c) => c.set_read_timeout(timeout),
}
}
/// Set the write timeout for writing header and body.
///
/// The timeout is per write operation, not on the overall time writing the entire request.
pub fn set_write_timeout(&mut self, timeout: Option<Duration>) {
match self {
HttpSession::H1(h1) => h1.write_timeout = timeout,
HttpSession::H2(h2) => h2.write_timeout = timeout,
HttpSession::Custom(c) => c.set_write_timeout(timeout),
}
}
/// Read the response header from the server
/// For http1, this function can be called multiple times, if the headers received are just
/// informational headers.
pub async fn read_response_header(&mut self) -> Result<()> {
match self {
HttpSession::H1(h1) => {
h1.read_response().await?;
Ok(())
}
HttpSession::H2(h2) => h2.read_response_header().await,
HttpSession::Custom(c) => c.read_response_header().await,
}
}
/// Read response body
///
/// `None` when no more body to read.
pub async fn read_response_body(&mut self) -> Result<Option<Bytes>> {
match self {
HttpSession::H1(h1) => h1.read_body_bytes().await,
HttpSession::H2(h2) => h2.read_response_body().await,
HttpSession::Custom(c) => c.read_response_body().await,
}
}
/// No (more) body to read
pub fn response_done(&mut self) -> bool {
match self {
HttpSession::H1(h1) => h1.is_body_done(),
HttpSession::H2(h2) => h2.response_finished(),
HttpSession::Custom(c) => c.response_finished(),
}
}
/// Give up the http session abruptly.
/// For H1 this will close the underlying connection
/// For H2 this will send RST_STREAM frame to end this stream if the stream has not ended at all
pub async fn shutdown(&mut self) {
match self {
Self::H1(s) => s.shutdown().await,
Self::H2(s) => s.shutdown(),
Self::Custom(c) => c.shutdown(1, "shutdown").await,
}
}
/// Get the response header of the server
///
/// `None` if the response header is not read yet.
pub fn response_header(&self) -> Option<&ResponseHeader> {
match self {
Self::H1(s) => s.resp_header(),
Self::H2(s) => s.response_header(),
Self::Custom(c) => c.response_header(),
}
}
/// Return the [Digest] of the connection
///
/// For reused connection, the timing in the digest will reflect its initial handshakes
/// The caller should check if the connection is reused to avoid misuse of the timing field.
pub fn digest(&self) -> Option<&Digest> {
match self {
Self::H1(s) => Some(s.digest()),
Self::H2(s) => s.digest(),
Self::Custom(c) => c.digest(),
}
}
/// Return a mutable [Digest] reference for the connection.
///
/// Will return `None` if this is an H2 session and multiple streams are open.
pub fn digest_mut(&mut self) -> Option<&mut Digest> {
match self {
Self::H1(s) => Some(s.digest_mut()),
Self::H2(s) => s.digest_mut(),
Self::Custom(s) => s.digest_mut(),
}
}
/// Return the server (peer) address of the connection.
pub fn server_addr(&self) -> Option<&SocketAddr> {
match self {
Self::H1(s) => s.server_addr(),
Self::H2(s) => s.server_addr(),
Self::Custom(s) => s.server_addr(),
}
}
/// Return the client (local) address of the connection.
pub fn client_addr(&self) -> Option<&SocketAddr> {
match self {
Self::H1(s) => s.client_addr(),
Self::H2(s) => s.client_addr(),
Self::Custom(s) => s.client_addr(),
}
}
/// Get the reference of the [Stream] that this HTTP/1 session is operating upon.
/// None if the HTTP session is over H2
pub fn stream(&self) -> Option<&Stream> {
match self {
Self::H1(s) => Some(s.stream()),
Self::H2(_) => None,
Self::Custom(_) => None,
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/mod.rs | pingora-core/src/protocols/http/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP/1.x and HTTP/2 implementation APIs
pub mod body_buffer;
pub mod bridge;
pub mod client;
pub mod compression;
pub mod conditional_filter;
pub mod custom;
pub mod date;
pub mod error_resp;
pub mod server;
pub mod subrequest;
pub mod v1;
pub mod v2;
pub use server::Session as ServerSession;
/// The Pingora server name string
pub const SERVER_NAME: &[u8; 7] = b"Pingora";
/// An enum to hold all possible HTTP response events.
#[derive(Debug)]
pub enum HttpTask {
/// the response header and the boolean end of response flag
Header(Box<pingora_http::ResponseHeader>, bool),
/// A piece of response body and the end of response boolean flag
Body(Option<bytes::Bytes>, bool),
/// HTTP response trailer
Trailer(Option<Box<http::HeaderMap>>),
/// Signal that the response is already finished
Done,
/// Signal that the reading of the response encountered errors.
Failed(pingora_error::BError),
}
impl HttpTask {
/// Whether this [`HttpTask`] means the end of the response.
pub fn is_end(&self) -> bool {
match self {
HttpTask::Header(_, end) => *end,
HttpTask::Body(_, end) => *end,
HttpTask::Trailer(_) => true,
HttpTask::Done => true,
HttpTask::Failed(_) => true,
}
}
/// The [`HttpTask`] type as string.
pub fn type_str(&self) -> &'static str {
match self {
HttpTask::Header(..) => "Header",
HttpTask::Body(..) => "Body",
HttpTask::Trailer(_) => "Trailer",
HttpTask::Done => "Done",
HttpTask::Failed(_) => "Failed",
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/server.rs | pingora-core/src/protocols/http/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP server session APIs
use super::custom::server::Session as SessionCustom;
use super::error_resp;
use super::subrequest::server::HttpSession as SessionSubrequest;
use super::v1::server::HttpSession as SessionV1;
use super::v2::server::HttpSession as SessionV2;
use super::HttpTask;
use crate::custom_session;
use crate::protocols::{Digest, SocketAddr, Stream};
use bytes::Bytes;
use http::HeaderValue;
use http::{header::AsHeaderName, HeaderMap};
use pingora_error::Result;
use pingora_http::{RequestHeader, ResponseHeader};
use std::time::Duration;
/// HTTP server session object for both HTTP/1.x and HTTP/2
pub enum Session {
H1(SessionV1),
H2(SessionV2),
Subrequest(SessionSubrequest),
Custom(Box<dyn SessionCustom>),
}
impl Session {
/// Create a new [`Session`] from an established connection for HTTP/1.x
pub fn new_http1(stream: Stream) -> Self {
Self::H1(SessionV1::new(stream))
}
/// Create a new [`Session`] from an established HTTP/2 stream
pub fn new_http2(session: SessionV2) -> Self {
Self::H2(session)
}
/// Create a new [`Session`] from a subrequest session
pub fn new_subrequest(session: SessionSubrequest) -> Self {
Self::Subrequest(session)
}
/// Create a new [`Session`] from a custom session
pub fn new_custom(session: Box<dyn SessionCustom>) -> Self {
Self::Custom(session)
}
/// Whether the session is HTTP/2. If not it is HTTP/1.x
pub fn is_http2(&self) -> bool {
matches!(self, Self::H2(_))
}
/// Whether the session is for a subrequest.
pub fn is_subrequest(&self) -> bool {
matches!(self, Self::Subrequest(_))
}
/// Whether the session is Custom
pub fn is_custom(&self) -> bool {
matches!(self, Self::Custom(_))
}
/// Read the request header. This method is required to be called first before doing anything
/// else with the session.
/// - `Ok(true)`: successful
/// - `Ok(false)`: client exit without sending any bytes. This is normal on reused connection.
/// In this case the user should give up this session.
pub async fn read_request(&mut self) -> Result<bool> {
match self {
Self::H1(s) => {
let read = s.read_request().await?;
Ok(read.is_some())
}
// This call will always return `Ok(true)` for Http2 because the request is already read
Self::H2(_) => Ok(true),
Self::Subrequest(s) => {
let read = s.read_request().await?;
Ok(read.is_some())
}
Self::Custom(_) => Ok(true),
}
}
/// Return the request header it just read.
/// # Panic
/// This function will panic if [`Self::read_request()`] is not called.
pub fn req_header(&self) -> &RequestHeader {
match self {
Self::H1(s) => s.req_header(),
Self::H2(s) => s.req_header(),
Self::Subrequest(s) => s.req_header(),
Self::Custom(s) => s.req_header(),
}
}
/// Return a mutable reference to request header it just read.
/// # Panic
/// This function will panic if [`Self::read_request()`] is not called.
pub fn req_header_mut(&mut self) -> &mut RequestHeader {
match self {
Self::H1(s) => s.req_header_mut(),
Self::H2(s) => s.req_header_mut(),
Self::Subrequest(s) => s.req_header_mut(),
Self::Custom(s) => s.req_header_mut(),
}
}
/// Return the header by name. None if the header doesn't exist.
///
/// In case there are multiple headers under the same name, the first one will be returned. To
/// get all the headers: use `self.req_header().headers.get_all()`.
pub fn get_header<K: AsHeaderName>(&self, key: K) -> Option<&HeaderValue> {
self.req_header().headers.get(key)
}
/// Get the header value in its raw format.
/// If the header doesn't exist, return an empty slice.
pub fn get_header_bytes<K: AsHeaderName>(&self, key: K) -> &[u8] {
self.get_header(key).map_or(b"", |v| v.as_bytes())
}
/// Read the request body. Ok(None) if no (more) body to read
pub async fn read_request_body(&mut self) -> Result<Option<Bytes>> {
match self {
Self::H1(s) => s.read_body_bytes().await,
Self::H2(s) => s.read_body_bytes().await,
Self::Subrequest(s) => s.read_body_bytes().await,
Self::Custom(s) => s.read_body_bytes().await,
}
}
/// Discard the request body by reading it until completion.
///
/// This is useful for making streams reusable (in particular for HTTP/1.1) after returning an
/// error before the whole body has been read.
pub async fn drain_request_body(&mut self) -> Result<()> {
match self {
Self::H1(s) => s.drain_request_body().await,
Self::H2(s) => s.drain_request_body().await,
Self::Subrequest(s) => s.drain_request_body().await,
Self::Custom(s) => s.drain_request_body().await,
}
}
/// Write the response header to client
/// Informational headers (status code 100-199, excluding 101) can be written multiple times the final
/// response header (status code 200+ or 101) is written.
pub async fn write_response_header(&mut self, resp: Box<ResponseHeader>) -> Result<()> {
match self {
Self::H1(s) => {
s.write_response_header(resp).await?;
Ok(())
}
Self::H2(s) => s.write_response_header(resp, false),
Self::Subrequest(s) => {
s.write_response_header(resp).await?;
Ok(())
}
Self::Custom(s) => s.write_response_header(resp, false).await,
}
}
/// Similar to `write_response_header()`, this fn will clone the `resp` internally
pub async fn write_response_header_ref(&mut self, resp: &ResponseHeader) -> Result<()> {
match self {
Self::H1(s) => {
s.write_response_header_ref(resp).await?;
Ok(())
}
Self::H2(s) => s.write_response_header_ref(resp, false),
Self::Subrequest(s) => {
s.write_response_header_ref(resp).await?;
Ok(())
}
Self::Custom(s) => s.write_response_header_ref(resp, false).await,
}
}
/// Write the response body to client
pub async fn write_response_body(&mut self, data: Bytes, end: bool) -> Result<()> {
if data.is_empty() && !end {
// writing 0 byte to a chunked encoding h1 would finish the stream
// writing 0 bytes to h2 is noop
// we don't want to actually write in either cases
return Ok(());
}
match self {
Self::H1(s) => {
if !data.is_empty() {
s.write_body(&data).await?;
}
if end {
s.finish_body().await?;
}
Ok(())
}
Self::H2(s) => s.write_body(data, end).await,
Self::Subrequest(s) => {
s.write_body(data).await?;
Ok(())
}
Self::Custom(s) => s.write_body(data, end).await,
}
}
/// Write the response trailers to client
pub async fn write_response_trailers(&mut self, trailers: HeaderMap) -> Result<()> {
match self {
Self::H1(_) => Ok(()), // TODO: support trailers for h1
Self::H2(s) => s.write_trailers(trailers),
Self::Subrequest(s) => s.write_trailers(Some(Box::new(trailers))).await,
Self::Custom(s) => s.write_trailers(trailers).await,
}
}
/// Finish the life of this request.
/// For H1, if connection reuse is supported, a Some(Stream) will be returned, otherwise None.
/// For H2, always return None because H2 stream is not reusable.
/// For subrequests, there is no true underlying stream to return.
pub async fn finish(self) -> Result<Option<Stream>> {
match self {
Self::H1(mut s) => {
// need to flush body due to buffering
s.finish_body().await?;
s.reuse().await
}
Self::H2(mut s) => {
s.finish()?;
Ok(None)
}
Self::Subrequest(mut s) => {
s.finish().await?;
Ok(None)
}
Self::Custom(mut s) => {
s.finish().await?;
Ok(None)
}
}
}
pub async fn response_duplex_vec(&mut self, tasks: Vec<HttpTask>) -> Result<bool> {
match self {
Self::H1(s) => s.response_duplex_vec(tasks).await,
Self::H2(s) => s.response_duplex_vec(tasks).await,
Self::Subrequest(s) => s.response_duplex_vec(tasks).await,
Self::Custom(s) => s.response_duplex_vec(tasks).await,
}
}
/// Set connection reuse. `duration` defines how long the connection is kept open for the next
/// request to reuse. Noop for h2 and subrequest
pub fn set_keepalive(&mut self, duration: Option<u64>) {
match self {
Self::H1(s) => s.set_server_keepalive(duration),
Self::H2(_) => {}
Self::Subrequest(_) => {}
Self::Custom(_) => {}
}
}
/// Get the keepalive timeout. None if keepalive is disabled. Not applicable for h2 or
/// subrequest
pub fn get_keepalive(&self) -> Option<u64> {
match self {
Self::H1(s) => s.get_keepalive_timeout(),
Self::H2(_) => None,
Self::Subrequest(_) => None,
Self::Custom(_) => None,
}
}
/// Sets the downstream read timeout. This will trigger if we're unable
/// to read from the stream after `timeout`.
///
/// This is a noop for h2.
pub fn set_read_timeout(&mut self, timeout: Option<Duration>) {
match self {
Self::H1(s) => s.set_read_timeout(timeout),
Self::H2(_) => {}
Self::Subrequest(s) => s.set_read_timeout(timeout),
Self::Custom(c) => c.set_read_timeout(timeout),
}
}
/// Gets the downstream read timeout if set.
pub fn get_read_timeout(&self) -> Option<Duration> {
match self {
Self::H1(s) => s.get_read_timeout(),
Self::H2(_) => None,
Self::Subrequest(s) => s.get_read_timeout(),
Self::Custom(s) => s.get_read_timeout(),
}
}
/// Sets the downstream write timeout. This will trigger if we're unable
/// to write to the stream after `timeout`. If a `min_send_rate` is
/// configured then the `min_send_rate` calculated timeout has higher priority.
pub fn set_write_timeout(&mut self, timeout: Option<Duration>) {
match self {
Self::H1(s) => s.set_write_timeout(timeout),
Self::H2(s) => s.set_write_timeout(timeout),
Self::Subrequest(s) => s.set_write_timeout(timeout),
Self::Custom(c) => c.set_write_timeout(timeout),
}
}
/// Gets the downstream write timeout if set.
pub fn get_write_timeout(&self) -> Option<Duration> {
match self {
Self::H1(s) => s.get_write_timeout(),
Self::H2(s) => s.get_write_timeout(),
Self::Subrequest(s) => s.get_write_timeout(),
Self::Custom(s) => s.get_write_timeout(),
}
}
/// Sets the total drain timeout, which will be applied while discarding the
/// request body using `drain_request_body`.
///
/// For HTTP/1.1, reusing a session requires ensuring that the request body
/// is consumed. If the timeout is exceeded, the caller should give up on
/// trying to reuse the session.
pub fn set_total_drain_timeout(&mut self, timeout: Option<Duration>) {
match self {
Self::H1(s) => s.set_total_drain_timeout(timeout),
Self::H2(s) => s.set_total_drain_timeout(timeout),
Self::Subrequest(s) => s.set_total_drain_timeout(timeout),
Self::Custom(c) => c.set_total_drain_timeout(timeout),
}
}
/// Gets the total drain timeout if set.
pub fn get_total_drain_timeout(&self) -> Option<Duration> {
match self {
Self::H1(s) => s.get_total_drain_timeout(),
Self::H2(s) => s.get_total_drain_timeout(),
Self::Subrequest(s) => s.get_total_drain_timeout(),
Self::Custom(s) => s.get_total_drain_timeout(),
}
}
/// Sets the minimum downstream send rate in bytes per second. This
/// is used to calculate a write timeout in seconds based on the size
/// of the buffer being written. If a `min_send_rate` is configured it
/// has higher priority over a set `write_timeout`. The minimum send
/// rate must be greater than zero.
///
/// Calculated write timeout is guaranteed to be at least 1s if `min_send_rate`
/// is greater than zero, a send rate of zero is equivalent to disabling.
///
/// This is a noop for h2.
pub fn set_min_send_rate(&mut self, rate: Option<usize>) {
match self {
Self::H1(s) => s.set_min_send_rate(rate),
Self::H2(_) => {}
Self::Subrequest(_) => {}
Self::Custom(_) => {}
}
}
/// Sets whether we ignore writing informational responses downstream.
///
/// For HTTP/1.1 this is a noop if the response is Upgrade or Continue and
/// Expect: 100-continue was set on the request.
///
/// This is a noop for h2 because informational responses are always ignored.
/// Subrequests will always proxy the info response and let the true downstream
/// decide to ignore or not.
pub fn set_ignore_info_resp(&mut self, ignore: bool) {
match self {
Self::H1(s) => s.set_ignore_info_resp(ignore),
Self::H2(_) => {} // always ignored
Self::Subrequest(_) => {}
Self::Custom(_) => {} // always ignored
}
}
/// Sets whether keepalive should be disabled if response is written prior to
/// downstream body finishing.
///
/// This is a noop for h2.
pub fn set_close_on_response_before_downstream_finish(&mut self, close: bool) {
match self {
Self::H1(s) => s.set_close_on_response_before_downstream_finish(close),
Self::H2(_) => {} // always ignored
Self::Subrequest(_) => {} // always ignored
Self::Custom(_) => {} // always ignored
}
}
/// Return a digest of the request including the method, path and Host header
// TODO: make this use a `Formatter`
pub fn request_summary(&self) -> String {
match self {
Self::H1(s) => s.request_summary(),
Self::H2(s) => s.request_summary(),
Self::Subrequest(s) => s.request_summary(),
Self::Custom(s) => s.request_summary(),
}
}
/// Return the written response header. `None` if it is not written yet.
/// Only the final (status code >= 200 or 101) response header will be returned
pub fn response_written(&self) -> Option<&ResponseHeader> {
match self {
Self::H1(s) => s.response_written(),
Self::H2(s) => s.response_written(),
Self::Subrequest(s) => s.response_written(),
Self::Custom(s) => s.response_written(),
}
}
/// Give up the http session abruptly.
/// For H1 this will close the underlying connection
/// For H2 this will send RESET frame to end this stream without impacting the connection
/// For subrequests, this will drop task senders and receivers.
pub async fn shutdown(&mut self) {
match self {
Self::H1(s) => s.shutdown().await,
Self::H2(s) => s.shutdown(),
Self::Subrequest(s) => s.shutdown(),
Self::Custom(s) => s.shutdown(1, "shutdown").await,
}
}
pub fn to_h1_raw(&self) -> Bytes {
match self {
Self::H1(s) => s.get_headers_raw_bytes(),
Self::H2(s) => s.pseudo_raw_h1_request_header(),
Self::Subrequest(s) => s.get_headers_raw_bytes(),
Self::Custom(c) => c.pseudo_raw_h1_request_header(),
}
}
/// Whether the whole request body is sent
pub fn is_body_done(&mut self) -> bool {
match self {
Self::H1(s) => s.is_body_done(),
Self::H2(s) => s.is_body_done(),
Self::Subrequest(s) => s.is_body_done(),
Self::Custom(s) => s.is_body_done(),
}
}
/// Notify the client that the entire body is sent
/// for H1 chunked encoding, this will end the last empty chunk
/// for H1 content-length, this has no effect.
/// for H2, this will send an empty DATA frame with END_STREAM flag
/// for subrequest, this will send a Done http task
pub async fn finish_body(&mut self) -> Result<()> {
match self {
Self::H1(s) => s.finish_body().await.map(|_| ()),
Self::H2(s) => s.finish(),
Self::Subrequest(s) => s.finish().await.map(|_| ()),
Self::Custom(s) => s.finish().await,
}
}
pub fn generate_error(error: u16) -> ResponseHeader {
match error {
/* common error responses are pre-generated */
502 => error_resp::HTTP_502_RESPONSE.clone(),
400 => error_resp::HTTP_400_RESPONSE.clone(),
_ => error_resp::gen_error_response(error),
}
}
/// Send error response to client using a pre-generated error message.
pub async fn respond_error(&mut self, error: u16) -> Result<()> {
self.respond_error_with_body(error, Bytes::default()).await
}
/// Send error response to client using a pre-generated error message and custom body.
pub async fn respond_error_with_body(&mut self, error: u16, body: Bytes) -> Result<()> {
let mut resp = Self::generate_error(error);
if !body.is_empty() {
// error responses have a default content-length of zero
resp.set_content_length(body.len())?
}
self.write_error_response(resp, body).await
}
/// Send an error response to a client with a response header and body.
pub async fn write_error_response(&mut self, resp: ResponseHeader, body: Bytes) -> Result<()> {
// TODO: we shouldn't be closing downstream connections on internally generated errors
// and possibly other upstream connect() errors (connection refused, timeout, etc)
//
// This change is only here because we DO NOT re-use downstream connections
// today on these errors and we should signal to the client that pingora is dropping it
// rather than a misleading the client with 'keep-alive'
self.set_keepalive(None);
// If a response was already written and it's not informational 1xx, return.
// The only exception is an informational 101 Switching Protocols, which is treated
// as final response https://www.rfc-editor.org/rfc/rfc9110#section-15.2.2.
if let Some(resp_written) = self.response_written().as_ref() {
if !resp_written.status.is_informational() || resp_written.status == 101 {
return Ok(());
}
}
self.write_response_header(Box::new(resp)).await?;
if !body.is_empty() {
self.write_response_body(body, true).await?;
} else {
self.finish_body().await?;
}
custom_session!(self.finish_custom().await?);
Ok(())
}
/// Whether there is no request body
pub fn is_body_empty(&mut self) -> bool {
match self {
Self::H1(s) => s.is_body_empty(),
Self::H2(s) => s.is_body_empty(),
Self::Subrequest(s) => s.is_body_empty(),
Self::Custom(s) => s.is_body_empty(),
}
}
pub fn retry_buffer_truncated(&self) -> bool {
match self {
Self::H1(s) => s.retry_buffer_truncated(),
Self::H2(s) => s.retry_buffer_truncated(),
Self::Subrequest(s) => s.retry_buffer_truncated(),
Self::Custom(s) => s.retry_buffer_truncated(),
}
}
pub fn enable_retry_buffering(&mut self) {
match self {
Self::H1(s) => s.enable_retry_buffering(),
Self::H2(s) => s.enable_retry_buffering(),
Self::Subrequest(s) => s.enable_retry_buffering(),
Self::Custom(s) => s.enable_retry_buffering(),
}
}
pub fn get_retry_buffer(&self) -> Option<Bytes> {
match self {
Self::H1(s) => s.get_retry_buffer(),
Self::H2(s) => s.get_retry_buffer(),
Self::Subrequest(s) => s.get_retry_buffer(),
Self::Custom(s) => s.get_retry_buffer(),
}
}
/// Read body (same as `read_request_body()`) or pending forever until downstream
/// terminates the session.
pub async fn read_body_or_idle(&mut self, no_body_expected: bool) -> Result<Option<Bytes>> {
match self {
Self::H1(s) => s.read_body_or_idle(no_body_expected).await,
Self::H2(s) => s.read_body_or_idle(no_body_expected).await,
Self::Subrequest(s) => s.read_body_or_idle(no_body_expected).await,
Self::Custom(s) => s.read_body_or_idle(no_body_expected).await,
}
}
pub fn as_http1(&self) -> Option<&SessionV1> {
match self {
Self::H1(s) => Some(s),
Self::H2(_) => None,
Self::Subrequest(_) => None,
Self::Custom(_) => None,
}
}
pub fn as_http2(&self) -> Option<&SessionV2> {
match self {
Self::H1(_) => None,
Self::H2(s) => Some(s),
Self::Subrequest(_) => None,
Self::Custom(_) => None,
}
}
pub fn as_subrequest(&self) -> Option<&SessionSubrequest> {
match self {
Self::H1(_) => None,
Self::H2(_) => None,
Self::Subrequest(s) => Some(s),
Self::Custom(_) => None,
}
}
pub fn as_subrequest_mut(&mut self) -> Option<&mut SessionSubrequest> {
match self {
Self::H1(_) => None,
Self::H2(_) => None,
Self::Subrequest(s) => Some(s),
Self::Custom(_) => None,
}
}
pub fn as_custom(&self) -> Option<&dyn SessionCustom> {
match self {
Self::H1(_) => None,
Self::H2(_) => None,
Self::Subrequest(_) => None,
Self::Custom(c) => Some(c.as_ref()),
}
}
pub fn as_custom_mut(&mut self) -> Option<&mut Box<dyn SessionCustom>> {
match self {
Self::H1(_) => None,
Self::H2(_) => None,
Self::Subrequest(_) => None,
Self::Custom(c) => Some(c),
}
}
/// Write a 100 Continue response to the client.
pub async fn write_continue_response(&mut self) -> Result<()> {
match self {
Self::H1(s) => s.write_continue_response().await,
Self::H2(s) => s.write_response_header(
Box::new(ResponseHeader::build(100, Some(0)).unwrap()),
false,
),
Self::Subrequest(s) => s.write_continue_response().await,
// TODO(slava): is there any write_continue_response calls?
Self::Custom(s) => {
s.write_response_header(
Box::new(ResponseHeader::build(100, Some(0)).unwrap()),
false,
)
.await
}
}
}
/// Whether this request is for upgrade (e.g., websocket)
pub fn is_upgrade_req(&self) -> bool {
match self {
Self::H1(s) => s.is_upgrade_req(),
Self::H2(_) => false,
Self::Subrequest(s) => s.is_upgrade_req(),
Self::Custom(_) => false,
}
}
/// Return how many response body bytes (application, not wire) already sent downstream
pub fn body_bytes_sent(&self) -> usize {
match self {
Self::H1(s) => s.body_bytes_sent(),
Self::H2(s) => s.body_bytes_sent(),
Self::Subrequest(s) => s.body_bytes_sent(),
Self::Custom(s) => s.body_bytes_sent(),
}
}
/// Return how many request body bytes (application, not wire) already read from downstream
pub fn body_bytes_read(&self) -> usize {
match self {
Self::H1(s) => s.body_bytes_read(),
Self::H2(s) => s.body_bytes_read(),
Self::Subrequest(s) => s.body_bytes_read(),
Self::Custom(s) => s.body_bytes_read(),
}
}
/// Return the [Digest] for the connection.
pub fn digest(&self) -> Option<&Digest> {
match self {
Self::H1(s) => Some(s.digest()),
Self::H2(s) => s.digest(),
Self::Subrequest(s) => s.digest(),
Self::Custom(s) => s.digest(),
}
}
/// Return a mutable [Digest] reference for the connection.
///
/// Will return `None` if multiple H2 streams are open.
pub fn digest_mut(&mut self) -> Option<&mut Digest> {
match self {
Self::H1(s) => Some(s.digest_mut()),
Self::H2(s) => s.digest_mut(),
Self::Subrequest(s) => s.digest_mut(),
Self::Custom(s) => s.digest_mut(),
}
}
/// Return the client (peer) address of the connection.
pub fn client_addr(&self) -> Option<&SocketAddr> {
match self {
Self::H1(s) => s.client_addr(),
Self::H2(s) => s.client_addr(),
Self::Subrequest(s) => s.client_addr(),
Self::Custom(s) => s.client_addr(),
}
}
/// Return the server (local) address of the connection.
pub fn server_addr(&self) -> Option<&SocketAddr> {
match self {
Self::H1(s) => s.server_addr(),
Self::H2(s) => s.server_addr(),
Self::Subrequest(s) => s.server_addr(),
Self::Custom(s) => s.server_addr(),
}
}
/// Get the reference of the [Stream] that this HTTP/1 session is operating upon.
/// None if the HTTP session is over H2, or a subrequest
pub fn stream(&self) -> Option<&Stream> {
match self {
Self::H1(s) => Some(s.stream()),
Self::H2(_) => None,
Self::Subrequest(_) => None,
Self::Custom(_) => None,
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/conditional_filter.rs | pingora-core/src/protocols/http/conditional_filter.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Conditional filter (not modified) utilities
use http::{header::*, StatusCode};
use httpdate::{parse_http_date, HttpDate};
use pingora_error::{ErrorType::InvalidHTTPHeader, OrErr, Result};
use pingora_http::{RequestHeader, ResponseHeader};
/// Evaluates conditional headers according to the [RFC](https://datatracker.ietf.org/doc/html/rfc9111#name-handling-a-received-validat).
///
/// Returns true if the request should receive 304 Not Modified.
pub fn not_modified_filter(req: &RequestHeader, resp: &ResponseHeader) -> bool {
// https://datatracker.ietf.org/doc/html/rfc9110#name-304-not-modified
// 304 can only validate 200
if resp.status != StatusCode::OK {
return false;
}
// Evulation of conditional headers, based on RFC:
// https://datatracker.ietf.org/doc/html/rfc9111#name-handling-a-received-validat
// TODO: If-Match and If-Unmodified-Since, and returning 412 Precondition Failed
// Note that this function is currently used only for proxy cache,
// and the current RFCs have some conflicting opinions as to whether
// If-Match and If-Unmodified-Since can be used. https://github.com/httpwg/http-core/issues/1111
// Conditional request precedence:
// https://datatracker.ietf.org/doc/html/rfc9110#name-precedence-of-preconditions
// If-None-Match should be handled before If-Modified-Since.
// XXX: In nginx, IMS is actually checked first, which may cause compatibility issues
// for certain origins/clients.
if req.headers.contains_key(IF_NONE_MATCH) {
if let Some(etag) = resp.headers.get(ETAG) {
for inm in req.headers.get_all(IF_NONE_MATCH) {
if weak_validate_etag(inm.as_bytes(), etag.as_bytes()) {
return true;
}
}
}
// https://datatracker.ietf.org/doc/html/rfc9110#field.if-modified-since
// "MUST ignore If-Modified-Since if the request contains an If-None-Match header"
return false;
}
// GET/HEAD only https://datatracker.ietf.org/doc/html/rfc9110#field.if-modified-since
if matches!(req.method, http::Method::GET | http::Method::HEAD) {
if let Ok(Some(if_modified_since)) = req_header_as_http_date(req, &IF_MODIFIED_SINCE) {
if let Ok(Some(last_modified)) = resp_header_as_http_date(resp, &LAST_MODIFIED) {
if if_modified_since >= last_modified {
return true;
}
}
}
}
false
}
// Trim ASCII whitespace bytes from the start of the slice.
// This is pretty much copied from the nightly API.
// TODO: use `trim_ascii_start` when it stabilizes https://doc.rust-lang.org/std/primitive.slice.html#method.trim_ascii_start
fn trim_ascii_start(mut bytes: &[u8]) -> &[u8] {
while let [first, rest @ ..] = bytes {
if first.is_ascii_whitespace() {
bytes = rest;
} else {
break;
}
}
bytes
}
/// Search for an ETag matching `target_etag` from the input header, using
/// [weak comparison](https://datatracker.ietf.org/doc/html/rfc9110#section-8.8.3.2).
/// Multiple ETags can exist in the header as a comma-separated list.
///
/// Returns true if a matching ETag exists.
pub fn weak_validate_etag(input_etag_header: &[u8], target_etag: &[u8]) -> bool {
// ETag comparison: https://datatracker.ietf.org/doc/html/rfc9110#section-8.8.3.2
fn strip_weak_prefix(etag: &[u8]) -> &[u8] {
// Weak ETags are prefaced with `W/`
etag.strip_prefix(b"W/").unwrap_or(etag)
}
// https://datatracker.ietf.org/doc/html/rfc9110#section-13.1.2 unsafe method only
if input_etag_header == b"*" {
return true;
}
// The RFC defines ETags here: https://datatracker.ietf.org/doc/html/rfc9110#section-8.8.3
// The RFC requires ETags to be wrapped in double quotes, though some legacy origins or clients
// don't adhere to this.
// Unfortunately by allowing non-quoted etags, parsing becomes a little more complicated.
//
// This implementation uses nginx's algorithm for parsing ETags, which can handle both quoted
// and non-quoted ETags. It essentially does a substring comparison at each comma divider,
// searching for an exact match of the ETag (optional double quotes included) followed by
// either EOF or another comma.
//
// Clients and upstreams should still ideally adhere to quoted ETags to disambiguate
// situations where commas are contained within the ETag (allowed by the RFC).
// XXX: This nginx algorithm will handle matching against ETags with commas correctly, but only
// if the target ETag is a quoted RFC-compliant ETag.
//
// For example, consider an if-none-match header: `"xyzzy,xyz,x,y", "xyzzy"`.
// If the target ETag is double quoted as mandated by the RFC like `"xyz,x"`, this algorithm
// will correctly report no matching ETag.
// But if the target ETag is not double quoted like `xyz,x`, it will "incorrectly" match
// against the substring after the first comma inside the first quoted ETag.
// Search for the target at each comma delimiter
let target_etag = strip_weak_prefix(target_etag);
let mut remaining = strip_weak_prefix(input_etag_header);
while let Some(search_slice) = remaining.get(0..target_etag.len()) {
if search_slice == target_etag {
remaining = &remaining[target_etag.len()..];
// check if there's any content after the matched substring
// skip any whitespace
remaining = trim_ascii_start(remaining);
if matches!(remaining.first(), None | Some(b',')) {
// we are either at the end of the header, or at a comma delimiter
// which means this is a match
return true;
}
}
// find the next delimiter (ignore any remaining part of the non-matching etag)
let Some(next_delimiter_pos) = remaining.iter().position(|&b| b == b',') else {
break;
};
remaining = &remaining[next_delimiter_pos..];
// find the next etag slice to compare
// ignore extraneous delimiters and whitespace
let Some(next_etag_pos) = remaining
.iter()
.position(|&b| !b.is_ascii_whitespace() && b != b',')
else {
break;
};
remaining = &remaining[next_etag_pos..];
remaining = strip_weak_prefix(remaining);
}
// remaining length < target etag length
false
}
/// Utility function to parse an HTTP request header as an [HTTP-date](https://datatracker.ietf.org/doc/html/rfc9110#name-date-time-formats).
pub fn req_header_as_http_date<H>(req: &RequestHeader, header_name: H) -> Result<Option<HttpDate>>
where
H: AsHeaderName,
{
let Some(header_value) = req.headers.get(header_name) else {
return Ok(None);
};
Ok(Some(parse_bytes_as_http_date(header_value.as_bytes())?))
}
/// Utility function to parse an HTTP response header as an [HTTP-date](https://datatracker.ietf.org/doc/html/rfc9110#name-date-time-formats).
pub fn resp_header_as_http_date<H>(
resp: &ResponseHeader,
header_name: H,
) -> Result<Option<HttpDate>>
where
H: AsHeaderName,
{
let Some(header_value) = resp.headers.get(header_name) else {
return Ok(None);
};
Ok(Some(parse_bytes_as_http_date(header_value.as_bytes())?))
}
fn parse_bytes_as_http_date(bytes: &[u8]) -> Result<HttpDate> {
let input_time = std::str::from_utf8(bytes).explain_err(InvalidHTTPHeader, |_| {
"HTTP date has unsupported characters (bytes outside of UTF-8)"
})?;
Ok(parse_http_date(input_time)
.or_err(InvalidHTTPHeader, "Invalid HTTP date")?
.into())
}
/// Utility function to convert the input response header to a 304 Not Modified response.
pub fn to_304(resp: &mut ResponseHeader) {
// https://datatracker.ietf.org/doc/html/rfc9110#name-304-not-modified
// XXX: https://datatracker.ietf.org/doc/html/rfc9110#name-content-length
// "A server may send content-length in 304", but no common web server does it
// So we drop both content-length and content-type for consistency/less surprise
resp.set_status(StatusCode::NOT_MODIFIED).unwrap();
resp.remove_header(&CONTENT_LENGTH);
resp.remove_header(&CONTENT_TYPE);
// https://datatracker.ietf.org/doc/html/rfc9110#section-15.4.5-4
// "SHOULD NOT generate representation metadata other than the above listed fields
// unless said metadata exists for the purpose of guiding cache updates"
// Remove some more representation metadata headers
resp.remove_header(&TRANSFER_ENCODING);
// note that the following are also stripped by nginx
resp.remove_header(&CONTENT_ENCODING);
resp.remove_header(&ACCEPT_RANGES);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_if_modified_since() {
fn build_req(if_modified_since: &[u8]) -> RequestHeader {
let mut req = RequestHeader::build("GET", b"/", None).unwrap();
req.insert_header("If-Modified-Since", if_modified_since)
.unwrap();
req
}
fn build_resp(last_modified: &[u8]) -> ResponseHeader {
let mut resp = ResponseHeader::build(200, None).unwrap();
resp.insert_header("Last-Modified", last_modified).unwrap();
resp
}
// same date
let last_modified = b"Fri, 26 Mar 2010 00:05:00 GMT";
let req = build_req(b"Fri, 26 Mar 2010 00:05:00 GMT");
let resp = build_resp(last_modified);
assert!(not_modified_filter(&req, &resp));
// before
let req = build_req(b"Fri, 26 Mar 2010 00:03:00 GMT");
let resp = build_resp(last_modified);
assert!(!not_modified_filter(&req, &resp));
// after
let req = build_req(b"Sun, 28 Mar 2010 01:07:00 GMT");
let resp = build_resp(last_modified);
assert!(not_modified_filter(&req, &resp));
}
#[test]
fn test_weak_validate_etag() {
let target_weak_etag = br#"W/"xyzzy""#;
let target_etag = br#""xyzzy""#;
assert!(weak_validate_etag(b"*", target_weak_etag));
assert!(weak_validate_etag(b"*", target_etag));
assert!(weak_validate_etag(target_etag, target_etag));
assert!(weak_validate_etag(target_etag, target_weak_etag));
assert!(weak_validate_etag(target_weak_etag, target_etag));
assert!(weak_validate_etag(target_weak_etag, target_weak_etag));
let mismatch_weak_etag = br#"W/"abc""#;
let mismatch_etag = br#""abc""#;
assert!(!weak_validate_etag(mismatch_etag, target_etag));
assert!(!weak_validate_etag(mismatch_etag, target_weak_etag));
assert!(!weak_validate_etag(mismatch_weak_etag, target_etag));
assert!(!weak_validate_etag(mismatch_weak_etag, target_weak_etag));
let multiple_etags = br#"a, "xyzzy","r2d2xxxx", "c3piozzzz",zzzfoo"#;
assert!(weak_validate_etag(multiple_etags, target_etag));
assert!(weak_validate_etag(multiple_etags, target_weak_etag));
let multiple_mismatch_etags = br#"foobar", "r2d2xxxx", "c3piozzzz",zzzfoo"#;
assert!(!weak_validate_etag(multiple_mismatch_etags, target_etag));
assert!(!weak_validate_etag(
multiple_mismatch_etags,
target_weak_etag
));
let multiple_mismatch_etags =
br#"foobar", "r2d2xxxxyzzy", "c3piozzzz",zzzfoo, "xyzzy,xyzzy""#;
assert!(!weak_validate_etag(multiple_mismatch_etags, target_etag));
assert!(!weak_validate_etag(
multiple_mismatch_etags,
target_weak_etag
));
let target_comma_etag = br#"",,,""#;
let multiple_mismatch_etags = br#",", ",,,,", ,,,,,,,,",,",",,,,,,""#;
assert!(!weak_validate_etag(
multiple_mismatch_etags,
target_comma_etag
));
let multiple_etags = br#",", ",,,,", ,,,,,,,,",,,",",,,,,,""#;
assert!(weak_validate_etag(multiple_etags, target_comma_etag));
}
#[test]
fn test_weak_validate_etag_unquoted() {
// legacy unquoted etag
let target_unquoted = b"xyzzy";
assert!(weak_validate_etag(b"*", target_unquoted));
let strong_etag = br#""xyzzy""#;
assert!(!weak_validate_etag(strong_etag, target_unquoted));
assert!(!weak_validate_etag(target_unquoted, strong_etag));
let multiple_etags = br#"a, "r2d2xxxx", "c3piozzzz", xyzzy"#;
assert!(weak_validate_etag(multiple_etags, target_unquoted));
let multiple_mismatch_etags =
br#"foobar", "r2d2xxxxyzzy", "c3piozzzz",zzzfoo, "xyzzy,xyzzy""#;
assert!(!weak_validate_etag(
multiple_mismatch_etags,
target_unquoted
));
// in certain edge cases where commas are used alongside quoted ETags,
// the test can fail if target is unquoted (the last ETag is intended to be one ETag)
let multiple_mismatch_etags =
br#"foobar", "r2d2xxxxyzzy", "c3piozzzz",zzzfoo, "xyzzy,xyzzy,xy""#;
assert!(weak_validate_etag(multiple_mismatch_etags, target_unquoted));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/error_resp.rs | pingora-core/src/protocols/http/error_resp.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Error response generating utilities.
use http::header;
use once_cell::sync::Lazy;
use pingora_http::ResponseHeader;
use super::SERVER_NAME;
/// Generate an error response with the given status code.
///
/// This error response has a zero `Content-Length` and `Cache-Control: private, no-store`.
pub fn gen_error_response(code: u16) -> ResponseHeader {
let mut resp = ResponseHeader::build(code, Some(4)).unwrap();
resp.insert_header(header::SERVER, &SERVER_NAME[..])
.unwrap();
resp.insert_header(header::DATE, "Sun, 06 Nov 1994 08:49:37 GMT")
.unwrap(); // placeholder
resp.insert_header(header::CONTENT_LENGTH, "0").unwrap();
resp.insert_header(header::CACHE_CONTROL, "private, no-store")
.unwrap();
resp
}
/// Pre-generated 502 response
pub static HTTP_502_RESPONSE: Lazy<ResponseHeader> = Lazy::new(|| gen_error_response(502));
/// Pre-generated 400 response
pub static HTTP_400_RESPONSE: Lazy<ResponseHeader> = Lazy::new(|| gen_error_response(400));
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/bridge/grpc_web.rs | pingora-core/src/protocols/http/bridge/grpc_web.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::{BufMut, Bytes, BytesMut};
use http::{
header::{CONTENT_LENGTH, CONTENT_TYPE, TRANSFER_ENCODING},
HeaderMap,
};
use pingora_error::{ErrorType::ReadError, OrErr, Result};
use pingora_http::{RequestHeader, ResponseHeader};
/// Used for bridging gRPC to gRPC-web and vice-versa.
/// See gRPC-web [spec](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md) and
/// gRPC h2 [spec](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md) for more details.
#[derive(Default, PartialEq, Debug)]
pub enum GrpcWebCtx {
#[default]
Disabled,
Init,
Upgrade,
Trailers,
Done,
}
const GRPC: &str = "application/grpc";
const GRPC_WEB: &str = "application/grpc-web";
impl GrpcWebCtx {
pub fn init(&mut self) {
*self = Self::Init;
}
/// gRPC-web request is fed into this filter, if the module is initialized
/// we attempt to convert it to a gRPC request
pub fn request_header_filter(&mut self, req: &mut RequestHeader) {
if *self != Self::Init {
// not enabled
return;
}
let content_type = req
.headers
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or_default();
// check we have a valid grpc-web prefix
if !(content_type.len() >= GRPC_WEB.len()
&& content_type[..GRPC_WEB.len()].eq_ignore_ascii_case(GRPC_WEB))
{
// not gRPC-web
return;
}
// change content type to grpc
let ct = content_type.to_lowercase().replace(GRPC_WEB, GRPC);
req.insert_header(CONTENT_TYPE, ct).expect("insert header");
// The 'te' request header is used to detect incompatible proxies
// which are supposed to remove 'te' if it is unsupported.
// This header is required by gRPC over h2 protocol.
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
req.insert_header("te", "trailers").expect("insert header");
// For gRPC requests, EOS (end-of-stream) is indicated by the presence of the
// END_STREAM flag on the last received DATA frame.
// In scenarios where the Request stream needs to be closed
// but no data remains to be sent implementations
// MUST send an empty DATA frame with this flag set.
req.set_send_end_stream(false);
*self = Self::Upgrade
}
/// gRPC response is fed into this filter, if the module is in the bridge state
/// attempt to convert the response it to a gRPC-web response
pub fn response_header_filter(&mut self, resp: &mut ResponseHeader) {
if *self != Self::Upgrade {
// not an upgrade
return;
}
if resp.status.is_informational() {
// proxy informational statuses through
return;
}
let content_type = resp
.headers
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or_default();
// upstream h2, no reason to normalize case
if !content_type.starts_with(GRPC) {
// not gRPC
*self = Self::Disabled;
return;
}
// change content type to gRPC-web
let ct = content_type.replace(GRPC, GRPC_WEB);
resp.insert_header(CONTENT_TYPE, ct).expect("insert header");
// always use chunked for gRPC-web
resp.remove_header(&CONTENT_LENGTH);
resp.insert_header(TRANSFER_ENCODING, "chunked")
.expect("insert header");
*self = Self::Trailers
}
/// Used to convert gRPC trailers into gRPC-web trailers, note
/// gRPC-web trailers are encoded into the response body so we return
/// the encoded bytes here.
pub fn response_trailer_filter(
&mut self,
resp_trailers: &mut HeaderMap,
) -> Result<Option<Bytes>> {
/* Trailer header frame and trailer headers
0 - - 1 - - 2 - - 3 - - 4 - - 5 - - 6 - - 7 - - 8
| Ind | Length | Headers | <- trailer header indicator, length of headers
| Headers | <- rest is headers
| Headers |
*/
// TODO compressed trailer?
// grpc-web trailers frame head
const GRPC_WEB_TRAILER: u8 = 0x80;
// number of bytes in trailer header
const GRPC_TRAILER_HEADER_LEN: usize = 5;
// just some estimate
const DEFAULT_TRAILER_BUFFER_SIZE: usize = 256;
if *self != Self::Trailers {
// not an upgrade
*self = Self::Disabled;
return Ok(None);
}
// trailers are expected to arrive all at once encoded into a single trailers frame
// trailers in frame are separated by CRLFs
let mut buf = BytesMut::with_capacity(DEFAULT_TRAILER_BUFFER_SIZE);
let mut trailers = buf.split_off(GRPC_TRAILER_HEADER_LEN);
// iterate the key/value pairs and encode them into the tmp buffer
for (key, value) in resp_trailers.iter() {
// encode header
trailers.put_slice(key.as_ref());
trailers.put_slice(b":");
// encode value
trailers.put_slice(value.as_ref());
// encode header separator
trailers.put_slice(b"\r\n");
}
// ensure trailer length within u32
let len = trailers.len().try_into().or_err_with(ReadError, || {
format!("invalid gRPC trailer length: {}", trailers.len())
})?;
buf.put_u8(GRPC_WEB_TRAILER);
buf.put_u32(len);
buf.unsplit(trailers);
*self = Self::Done;
Ok(Some(buf.freeze()))
}
}
#[cfg(test)]
mod tests {
use super::*;
use http::{request::Request, response::Response, Version};
#[test]
fn non_grpc_web_request_ignored() {
let request = Request::get("https://pingora.dev/")
.header(CONTENT_TYPE, "application/grpc-we")
.version(Version::HTTP_2) // only set this to verify send_end_stream is configured
.body(())
.unwrap();
let mut request = request.into_parts().0.into();
let mut filter = GrpcWebCtx::default();
filter.init();
filter.request_header_filter(&mut request);
assert_eq!(filter, GrpcWebCtx::Init);
let headers = &request.headers;
assert_eq!(headers.get("te"), None);
assert_eq!(headers.get("application/grpc"), None);
assert_eq!(request.send_end_stream(), Some(true));
}
#[test]
fn grpc_web_request_module_disabled_ignored() {
let request = Request::get("https://pingora.dev/")
.header(CONTENT_TYPE, "application/grpc-web")
.version(Version::HTTP_2) // only set this to verify send_end_stream is configured
.body(())
.unwrap();
let mut request = request.into_parts().0.into();
// do not init
let mut filter = GrpcWebCtx::default();
filter.request_header_filter(&mut request);
assert_eq!(filter, GrpcWebCtx::Disabled);
let headers = &request.headers;
assert_eq!(headers.get("te"), None);
assert_eq!(headers.get(CONTENT_TYPE).unwrap(), "application/grpc-web");
assert_eq!(request.send_end_stream(), Some(true));
}
#[test]
fn grpc_web_request_upgrade() {
let request = Request::get("https://pingora.org/")
.header(CONTENT_TYPE, "application/gRPC-web+thrift")
.version(Version::HTTP_2) // only set this to verify send_end_stream is configured
.body(())
.unwrap();
let mut request = request.into_parts().0.into();
let mut filter = GrpcWebCtx::default();
filter.init();
filter.request_header_filter(&mut request);
assert_eq!(filter, GrpcWebCtx::Upgrade);
let headers = &request.headers;
assert_eq!(headers.get("te").unwrap(), "trailers");
assert_eq!(
headers.get(CONTENT_TYPE).unwrap(),
"application/grpc+thrift"
);
assert_eq!(request.send_end_stream(), Some(false));
}
#[test]
fn non_grpc_response_ignored() {
let response = Response::builder()
.header(CONTENT_TYPE, "text/html")
.header(CONTENT_LENGTH, "10")
.body(())
.unwrap();
let mut response = response.into_parts().0.into();
let mut filter = GrpcWebCtx::Upgrade;
filter.response_header_filter(&mut response);
assert_eq!(filter, GrpcWebCtx::Disabled);
let headers = &response.headers;
assert_eq!(headers.get(CONTENT_TYPE).unwrap(), "text/html");
assert_eq!(headers.get(CONTENT_LENGTH).unwrap(), "10");
}
#[test]
fn grpc_response_module_disabled_ignored() {
let response = Response::builder()
.header(CONTENT_TYPE, "application/grpc")
.body(())
.unwrap();
let mut response = response.into_parts().0.into();
let mut filter = GrpcWebCtx::default();
filter.response_header_filter(&mut response);
assert_eq!(filter, GrpcWebCtx::Disabled);
let headers = &response.headers;
assert_eq!(headers.get(CONTENT_TYPE).unwrap(), "application/grpc");
}
#[test]
fn grpc_response_upgrade() {
let response = Response::builder()
.header(CONTENT_TYPE, "application/grpc+proto")
.header(CONTENT_LENGTH, "0")
.body(())
.unwrap();
let mut response = response.into_parts().0.into();
let mut filter = GrpcWebCtx::Upgrade;
filter.response_header_filter(&mut response);
assert_eq!(filter, GrpcWebCtx::Trailers);
let headers = &response.headers;
assert_eq!(
headers.get(CONTENT_TYPE).unwrap(),
"application/grpc-web+proto"
);
assert_eq!(headers.get(TRANSFER_ENCODING).unwrap(), "chunked");
assert!(headers.get(CONTENT_LENGTH).is_none());
}
#[test]
fn grpc_response_informational_proxied() {
let response = Response::builder().status(100).body(()).unwrap();
let mut response = response.into_parts().0.into();
let mut filter = GrpcWebCtx::Upgrade;
filter.response_header_filter(&mut response);
assert_eq!(filter, GrpcWebCtx::Upgrade); // still upgrade
}
#[test]
fn grpc_response_trailer_headers_convert_to_byte_buf() {
let mut response = Response::builder()
.header("grpc-status", "0")
.header("grpc-message", "OK")
.body(())
.unwrap();
let response = response.headers_mut();
let mut filter = GrpcWebCtx::Trailers;
let buf = filter.response_trailer_filter(response).unwrap().unwrap();
assert_eq!(filter, GrpcWebCtx::Done);
let expected = b"grpc-status:0\r\ngrpc-message:OK\r\n";
let expected_len: u32 = expected.len() as u32; // 32 bytes
// assert the length prefix message frame
// [1 byte (header)| 4 byte (length) | 15 byte (grpc-status:0\r\n) | 17 bytes (grpc-message:OK\r\n)]
assert_eq!(0x80, buf[0]); // frame should start with trailer header
assert_eq!(expected_len.to_be_bytes(), buf[1..5]); // next 4 bytes length of trailer
assert_eq!(expected[..15], buf[5..20]); // grpc-status:0\r\n (15 bytes)
assert_eq!(expected[15..], buf[20..]); // grpc-message:OK\r\n (17 bytes)
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/bridge/mod.rs | pingora-core/src/protocols/http/bridge/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod grpc_web;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/custom/client.rs | pingora-core/src/protocols/http/custom/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use async_trait::async_trait;
use bytes::Bytes;
use futures::Stream;
use http::HeaderMap;
use pingora_error::Result;
use pingora_http::{RequestHeader, ResponseHeader};
use crate::protocols::{l4::socket::SocketAddr, Digest, UniqueIDType};
use super::{BodyWrite, CustomMessageWrite};
#[doc(hidden)]
#[async_trait]
pub trait Session: Send + Sync + Unpin + 'static {
async fn write_request_header(&mut self, req: Box<RequestHeader>, end: bool) -> Result<()>;
async fn write_request_body(&mut self, data: Bytes, end: bool) -> Result<()>;
async fn finish_request_body(&mut self) -> Result<()>;
fn set_read_timeout(&mut self, timeout: Option<Duration>);
fn set_write_timeout(&mut self, timeout: Option<Duration>);
async fn read_response_header(&mut self) -> Result<()>;
async fn read_response_body(&mut self) -> Result<Option<Bytes>>;
fn response_finished(&self) -> bool;
async fn shutdown(&mut self, code: u32, ctx: &str);
fn response_header(&self) -> Option<&ResponseHeader>;
fn digest(&self) -> Option<&Digest>;
fn digest_mut(&mut self) -> Option<&mut Digest>;
fn server_addr(&self) -> Option<&SocketAddr>;
fn client_addr(&self) -> Option<&SocketAddr>;
async fn read_trailers(&mut self) -> Result<Option<HeaderMap>>;
fn fd(&self) -> UniqueIDType;
async fn check_response_end_or_error(&mut self, headers: bool) -> Result<bool>;
fn take_request_body_writer(&mut self) -> Option<Box<dyn BodyWrite>>;
async fn finish_custom(&mut self) -> Result<()>;
fn take_custom_message_reader(
&mut self,
) -> Option<Box<dyn Stream<Item = Result<Bytes>> + Unpin + Send + Sync + 'static>>;
async fn drain_custom_messages(&mut self) -> Result<()>;
fn take_custom_message_writer(&mut self) -> Option<Box<dyn CustomMessageWrite>>;
}
#[doc(hidden)]
#[async_trait]
impl Session for () {
async fn write_request_header(&mut self, _req: Box<RequestHeader>, _end: bool) -> Result<()> {
unreachable!("client session: write_request_header")
}
async fn write_request_body(&mut self, _data: Bytes, _end: bool) -> Result<()> {
unreachable!("client session: write_request_body")
}
async fn finish_request_body(&mut self) -> Result<()> {
unreachable!("client session: finish_request_body")
}
fn set_read_timeout(&mut self, _timeout: Option<Duration>) {
unreachable!("client session: set_read_timeout")
}
fn set_write_timeout(&mut self, _timeout: Option<Duration>) {
unreachable!("client session: set_write_timeout")
}
async fn read_response_header(&mut self) -> Result<()> {
unreachable!("client session: read_response_header")
}
async fn read_response_body(&mut self) -> Result<Option<Bytes>> {
unreachable!("client session: read_response_body")
}
fn response_finished(&self) -> bool {
unreachable!("client session: response_finished")
}
async fn shutdown(&mut self, _code: u32, _ctx: &str) {
unreachable!("client session: shutdown")
}
fn response_header(&self) -> Option<&ResponseHeader> {
unreachable!("client session: response_header")
}
fn digest(&self) -> Option<&Digest> {
unreachable!("client session: digest")
}
fn digest_mut(&mut self) -> Option<&mut Digest> {
unreachable!("client session: digest_mut")
}
fn server_addr(&self) -> Option<&SocketAddr> {
unreachable!("client session: server_addr")
}
fn client_addr(&self) -> Option<&SocketAddr> {
unreachable!("client session: client_addr")
}
async fn finish_custom(&mut self) -> Result<()> {
unreachable!("client session: finish_custom")
}
async fn read_trailers(&mut self) -> Result<Option<HeaderMap>> {
unreachable!("client session: read_trailers")
}
fn fd(&self) -> UniqueIDType {
unreachable!("client session: fd")
}
async fn check_response_end_or_error(&mut self, _headers: bool) -> Result<bool> {
unreachable!("client session: check_response_end_or_error")
}
fn take_custom_message_reader(
&mut self,
) -> Option<Box<dyn Stream<Item = Result<Bytes>> + Unpin + Send + Sync + 'static>> {
unreachable!("client session: get_custom_message_reader")
}
async fn drain_custom_messages(&mut self) -> Result<()> {
unreachable!("client session: drain_custom_messages")
}
fn take_custom_message_writer(&mut self) -> Option<Box<dyn CustomMessageWrite>> {
unreachable!("client session: get_custom_message_writer")
}
fn take_request_body_writer(&mut self) -> Option<Box<dyn BodyWrite>> {
unreachable!("client session: take_request_body_writer")
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/custom/mod.rs | pingora-core/src/protocols/http/custom/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use async_trait::async_trait;
use bytes::Bytes;
use futures::Stream;
use log::debug;
use pingora_error::Result;
use tokio_stream::StreamExt;
pub mod client;
pub mod server;
pub const CUSTOM_MESSAGE_QUEUE_SIZE: usize = 128;
pub fn is_informational_except_101<T: PartialOrd<u32>>(code: T) -> bool {
// excluding `101 Switching Protocols`, because it's not followed by any other
// response and it's a final
// The WebSocket Protocol https://datatracker.ietf.org/doc/html/rfc6455
code > 99 && code < 200 && code != 101
}
#[async_trait]
pub trait CustomMessageWrite: Send + Sync + Unpin + 'static {
fn set_write_timeout(&mut self, timeout: Option<Duration>);
async fn write_custom_message(&mut self, msg: Bytes) -> Result<()>;
async fn finish_custom(&mut self) -> Result<()>;
}
#[doc(hidden)]
#[async_trait]
impl CustomMessageWrite for () {
fn set_write_timeout(&mut self, _timeout: Option<Duration>) {}
async fn write_custom_message(&mut self, msg: Bytes) -> Result<()> {
debug!("write_custom_message: {:?}", msg);
Ok(())
}
async fn finish_custom(&mut self) -> Result<()> {
debug!("finish_custom");
Ok(())
}
}
#[async_trait]
pub trait BodyWrite: Send + Sync + Unpin + 'static {
async fn write_all_buf(&mut self, data: &mut Bytes) -> Result<()>;
async fn finish(&mut self) -> Result<()>;
}
pub async fn drain_custom_messages(
reader: Option<Box<dyn Stream<Item = Result<Bytes>> + Unpin + Send + Sync + 'static>>,
) -> Result<()> {
let Some(mut reader) = reader else {
return Ok(());
};
while let Some(res) = reader.next().await {
let msg = res?;
debug!("consume_custom_messages: {msg:?}");
}
Ok(())
}
#[macro_export]
macro_rules! custom_session {
($base_obj:ident . $($method_tokens:tt)+) => {
if let Some(custom_session) = $base_obj.as_custom_mut() {
#[allow(clippy::semicolon_if_nothing_returned)]
custom_session.$($method_tokens)+;
}
};
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/custom/server.rs | pingora-core/src/protocols/http/custom/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use async_trait::async_trait;
use bytes::Bytes;
use futures::Stream;
use http::HeaderMap;
use pingora_error::Result;
use pingora_http::{RequestHeader, ResponseHeader};
use crate::protocols::{http::HttpTask, l4::socket::SocketAddr, Digest};
use super::CustomMessageWrite;
#[doc(hidden)]
#[async_trait]
pub trait Session: Send + Sync + Unpin + 'static {
fn req_header(&self) -> &RequestHeader;
fn req_header_mut(&mut self) -> &mut RequestHeader;
async fn read_body_bytes(&mut self) -> Result<Option<Bytes>>;
async fn drain_request_body(&mut self) -> Result<()>;
async fn write_response_header(&mut self, resp: Box<ResponseHeader>, end: bool) -> Result<()>;
async fn write_response_header_ref(&mut self, resp: &ResponseHeader, end: bool) -> Result<()>;
async fn write_body(&mut self, data: Bytes, end: bool) -> Result<()>;
async fn write_trailers(&mut self, trailers: HeaderMap) -> Result<()>;
async fn response_duplex_vec(&mut self, tasks: Vec<HttpTask>) -> Result<bool>;
fn set_read_timeout(&mut self, timeout: Option<Duration>);
fn get_read_timeout(&self) -> Option<Duration>;
fn set_write_timeout(&mut self, timeout: Option<Duration>);
fn get_write_timeout(&self) -> Option<Duration>;
fn set_total_drain_timeout(&mut self, timeout: Option<Duration>);
fn get_total_drain_timeout(&self) -> Option<Duration>;
fn request_summary(&self) -> String;
fn response_written(&self) -> Option<&ResponseHeader>;
async fn shutdown(&mut self, code: u32, ctx: &str);
fn is_body_done(&mut self) -> bool;
async fn finish(&mut self) -> Result<()>;
fn is_body_empty(&mut self) -> bool;
async fn read_body_or_idle(&mut self, no_body_expected: bool) -> Result<Option<Bytes>>;
fn body_bytes_sent(&self) -> usize;
fn body_bytes_read(&self) -> usize;
fn digest(&self) -> Option<&Digest>;
fn digest_mut(&mut self) -> Option<&mut Digest>;
fn client_addr(&self) -> Option<&SocketAddr>;
fn server_addr(&self) -> Option<&SocketAddr>;
fn pseudo_raw_h1_request_header(&self) -> Bytes;
fn enable_retry_buffering(&mut self);
fn retry_buffer_truncated(&self) -> bool;
fn get_retry_buffer(&self) -> Option<Bytes>;
async fn finish_custom(&mut self) -> Result<()>;
fn take_custom_message_reader(
&mut self,
) -> Option<Box<dyn Stream<Item = Result<Bytes>> + Unpin + Send + Sync + 'static>>;
fn take_custom_message_writer(&mut self) -> Option<Box<dyn CustomMessageWrite>>;
fn restore_custom_message_writer(&mut self, writer: Box<dyn CustomMessageWrite>) -> Result<()>;
}
#[doc(hidden)]
#[async_trait]
impl Session for () {
fn req_header(&self) -> &RequestHeader {
unreachable!("server session: req_header")
}
fn req_header_mut(&mut self) -> &mut RequestHeader {
unreachable!("server session: req_header_mut")
}
async fn read_body_bytes(&mut self) -> Result<Option<Bytes>> {
unreachable!("server session: read_body_bytes")
}
async fn drain_request_body(&mut self) -> Result<()> {
unreachable!("server session: drain_request_body")
}
async fn write_response_header(
&mut self,
_resp: Box<ResponseHeader>,
_end: bool,
) -> Result<()> {
unreachable!("server session: write_response_header")
}
async fn write_response_header_ref(
&mut self,
_resp: &ResponseHeader,
_end: bool,
) -> Result<()> {
unreachable!("server session: write_response_header_ref")
}
async fn write_body(&mut self, _data: Bytes, _end: bool) -> Result<()> {
unreachable!("server session: write_body")
}
async fn write_trailers(&mut self, _trailers: HeaderMap) -> Result<()> {
unreachable!("server session: write_trailers")
}
async fn response_duplex_vec(&mut self, _tasks: Vec<HttpTask>) -> Result<bool> {
unreachable!("server session: response_duplex_vec")
}
fn set_read_timeout(&mut self, _timeout: Option<Duration>) {
unreachable!("server session: set_read_timeout")
}
fn get_read_timeout(&self) -> Option<Duration> {
unreachable!("server_session: get_read_timeout")
}
fn set_write_timeout(&mut self, _timeout: Option<Duration>) {
unreachable!("server session: set_write_timeout")
}
fn get_write_timeout(&self) -> Option<Duration> {
unreachable!("server_session: get_write_timeout")
}
fn set_total_drain_timeout(&mut self, _timeout: Option<Duration>) {
unreachable!("server session: set_total_drain_timeout")
}
fn get_total_drain_timeout(&self) -> Option<Duration> {
unreachable!("server_session: get_total_drain_timeout")
}
fn request_summary(&self) -> String {
unreachable!("server session: request_summary")
}
fn response_written(&self) -> Option<&ResponseHeader> {
unreachable!("server session: response_written")
}
async fn shutdown(&mut self, _code: u32, _ctx: &str) {
unreachable!("server session: shutdown")
}
fn is_body_done(&mut self) -> bool {
unreachable!("server session: is_body_done")
}
async fn finish(&mut self) -> Result<()> {
unreachable!("server session: finish")
}
fn is_body_empty(&mut self) -> bool {
unreachable!("server session: is_body_empty")
}
async fn read_body_or_idle(&mut self, _no_body_expected: bool) -> Result<Option<Bytes>> {
unreachable!("server session: read_body_or_idle")
}
fn body_bytes_sent(&self) -> usize {
unreachable!("server session: body_bytes_sent")
}
fn body_bytes_read(&self) -> usize {
unreachable!("server session: body_bytes_read")
}
fn digest(&self) -> Option<&Digest> {
unreachable!("server session: digest")
}
fn digest_mut(&mut self) -> Option<&mut Digest> {
unreachable!("server session: digest_mut")
}
fn client_addr(&self) -> Option<&SocketAddr> {
unreachable!("server session: client_addr")
}
fn server_addr(&self) -> Option<&SocketAddr> {
unreachable!("server session: server_addr")
}
fn pseudo_raw_h1_request_header(&self) -> Bytes {
unreachable!("server session: pseudo_raw_h1_request_header")
}
fn enable_retry_buffering(&mut self) {
unreachable!("server session: enable_retry_bufferings")
}
fn retry_buffer_truncated(&self) -> bool {
unreachable!("server session: retry_buffer_truncated")
}
fn get_retry_buffer(&self) -> Option<Bytes> {
unreachable!("server session: get_retry_buffer")
}
async fn finish_custom(&mut self) -> Result<()> {
unreachable!("server session: finish_custom")
}
fn take_custom_message_reader(
&mut self,
) -> Option<Box<dyn Stream<Item = Result<Bytes>> + Unpin + Send + Sync + 'static>> {
unreachable!("server session: get_custom_message_reader")
}
fn take_custom_message_writer(&mut self) -> Option<Box<dyn CustomMessageWrite>> {
unreachable!("server session: get_custom_message_writer")
}
fn restore_custom_message_writer(
&mut self,
_writer: Box<dyn CustomMessageWrite>,
) -> Result<()> {
unreachable!("server session: restore_custom_message_writer")
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/subrequest/mod.rs | pingora-core/src/protocols/http/subrequest/mod.rs | pub(crate) mod body;
pub(crate) mod dummy;
pub mod server;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/subrequest/dummy.rs | pingora-core/src/protocols/http/subrequest/dummy.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::protocols::raw_connect::ProxyDigest;
use crate::protocols::{
GetProxyDigest, GetSocketDigest, GetTimingDigest, Peek, SocketDigest, Ssl, TimingDigest,
UniqueID, UniqueIDType,
};
use async_trait::async_trait;
use core::pin::Pin;
use core::task::{Context, Poll};
use std::io::Cursor;
use std::sync::Arc;
use tokio::io::{AsyncRead, AsyncWrite, Error, ReadBuf};
// An async IO stream that returns the request when being read from and dumps the data to the void
// when being write to
#[derive(Debug)]
pub(crate) struct DummyIO(Cursor<Vec<u8>>);
impl DummyIO {
pub fn new(read_bytes: &[u8]) -> Self {
DummyIO(Cursor::new(Vec::from(read_bytes)))
}
}
impl AsyncRead for DummyIO {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), Error>> {
if self.0.position() < self.0.get_ref().len() as u64 {
Pin::new(&mut self.0).poll_read(cx, buf)
} else {
// all data is read, pending forever otherwise the stream is considered closed
Poll::Pending
}
}
}
impl AsyncWrite for DummyIO {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, Error>> {
Poll::Ready(Ok(buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
Poll::Ready(Ok(()))
}
}
impl UniqueID for DummyIO {
fn id(&self) -> UniqueIDType {
0 // placeholder
}
}
impl Ssl for DummyIO {}
impl GetTimingDigest for DummyIO {
fn get_timing_digest(&self) -> Vec<Option<TimingDigest>> {
vec![]
}
}
impl GetProxyDigest for DummyIO {
fn get_proxy_digest(&self) -> Option<Arc<ProxyDigest>> {
None
}
}
impl GetSocketDigest for DummyIO {
fn get_socket_digest(&self) -> Option<Arc<SocketDigest>> {
None
}
}
impl Peek for DummyIO {}
#[async_trait]
impl crate::protocols::Shutdown for DummyIO {
async fn shutdown(&mut self) -> () {}
}
#[tokio::test]
async fn test_dummy_io() {
use futures::FutureExt;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let mut dummy = DummyIO::new(&[1, 2]);
let res = dummy.read_u8().await;
assert_eq!(res.unwrap(), 1);
let res = dummy.read_u8().await;
assert_eq!(res.unwrap(), 2);
let res = dummy.read_u8().now_or_never();
assert!(res.is_none()); // pending forever
let res = dummy.write_u8(0).await;
assert!(res.is_ok());
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/subrequest/body.rs | pingora-core/src/protocols/http/subrequest/body.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Subrequest body reader and writer.
//!
//! This implementation is very similar to v1 if not identical in many cases.
//! However it is generally much simpler because it does not have to handle
//! wire format bytes, simply basic checks such as content-length and when the
//! underlying channel (sender or receiver) is closed.
use bytes::Bytes;
use log::{debug, trace, warn};
use pingora_error::{
Error,
ErrorType::{self, *},
OrErr, Result,
};
use std::fmt::Debug;
use tokio::sync::{mpsc, oneshot};
use crate::protocols::http::HttpTask;
use http::HeaderMap;
pub const PREMATURE_BODY_END: ErrorType = ErrorType::new("PrematureBodyEnd");
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ParseState {
ToStart,
Complete(usize), // total size
Partial(usize, usize), // size read, remaining size
Done(usize), // done but there is error, size read
UntilClose(usize), // read until connection closed, size read
}
type PS = ParseState;
pub struct BodyReader {
pub body_state: ParseState,
notify_wants_body: Option<oneshot::Sender<()>>,
}
impl BodyReader {
pub fn new(notify_wants_body: Option<oneshot::Sender<()>>) -> Self {
BodyReader {
body_state: PS::ToStart,
notify_wants_body,
}
// TODO: if wants body signal is None, init empty
}
pub fn need_init(&self) -> bool {
matches!(self.body_state, PS::ToStart)
}
pub fn init_content_length(&mut self, cl: usize) {
match cl {
0 => self.body_state = PS::Complete(0),
_ => {
self.body_state = PS::Partial(0, cl);
}
}
}
pub fn init_until_close(&mut self) {
self.body_state = PS::UntilClose(0);
}
pub fn body_done(&self) -> bool {
matches!(self.body_state, PS::Complete(_) | PS::Done(_))
}
pub fn body_empty(&self) -> bool {
self.body_state == PS::Complete(0)
}
pub async fn read_body(&mut self, rx: &mut mpsc::Receiver<HttpTask>) -> Result<Option<Bytes>> {
match self.body_state {
PS::Complete(_) => Ok(None),
PS::Done(_) => Ok(None),
PS::Partial(_, _) => self.do_read_body(rx).await,
PS::UntilClose(_) => self.do_read_body_until_closed(rx).await,
PS::ToStart => panic!("need to init BodyReader first"),
}
}
pub async fn do_read_body(
&mut self,
rx: &mut mpsc::Receiver<HttpTask>,
) -> Result<Option<Bytes>> {
if let Some(notify) = self.notify_wants_body.take() {
// fine if downstream isn't actively being read
let _ = notify.send(());
}
let (bytes, end) = match rx.recv().await {
Some(HttpTask::Body(bytes, end)) => (bytes, end),
Some(task) => {
// TODO: return an error into_down for Failed?
return Error::e_explain(
InternalError,
format!("Unexpected HttpTask {task:?} while reading body (subrequest)"),
);
}
None => (None, true), // downstream ended
};
match self.body_state {
PS::Partial(read, to_read) => {
let n = bytes.as_ref().map_or(0, |b| b.len());
debug!(
"BodyReader body_state: {:?}, read data from IO: {n} (subrequest)",
self.body_state,
);
if bytes.is_none() {
self.body_state = PS::Done(read);
return Error::e_explain(ConnectionClosed, format!(
"Peer prematurely closed connection with {to_read} bytes of body remaining to read (subrequest)",
));
}
if end && n < to_read {
// TODO: this doesn't flush the bytes we did receive to upstream
self.body_state = PS::Done(read + n);
return Error::e_explain(PREMATURE_BODY_END, format!(
"Peer prematurely ended body with {} bytes of body remaining to read (subrequest)",
to_read - n
));
}
if n >= to_read {
if n > to_read {
warn!(
"Peer sent more data then expected: extra {}\
bytes, discarding them (subrequest)",
n - to_read
);
}
self.body_state = PS::Complete(read + to_read);
Ok(bytes.map(|b| b.slice(0..to_read)))
} else {
self.body_state = PS::Partial(read + n, to_read - n);
Ok(bytes)
}
}
_ => panic!("wrong body state: {:?} (subrequest)", self.body_state),
}
}
pub async fn do_read_body_until_closed(
&mut self,
rx: &mut mpsc::Receiver<HttpTask>,
) -> Result<Option<Bytes>> {
if let Some(notify) = self.notify_wants_body.take() {
// fine if downstream isn't active, receiver will indicate this
let _ = notify.send(());
}
let (bytes, end) = match rx.recv().await {
Some(HttpTask::Body(bytes, end)) => (bytes, end),
Some(task) => {
return Error::e_explain(
InternalError,
format!("Unexpected HttpTask {task:?} while reading body (subrequest)"),
);
}
None => (None, true), // downstream ended
};
let n = bytes.as_ref().map_or(0, |b| b.len());
match self.body_state {
PS::UntilClose(read) => {
if bytes.is_none() {
self.body_state = PS::Complete(read);
Ok(None)
} else if end {
// explicit end also signifies completion
self.body_state = PS::Complete(read + n);
Ok(bytes)
} else {
self.body_state = PS::UntilClose(read + n);
Ok(bytes)
}
}
_ => panic!("wrong body state: {:?} (subrequest)", self.body_state),
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BodyMode {
ToSelect,
ContentLength(usize, usize), // total length to write, bytes already written
UntilClose(usize), //bytes written
Complete(usize), //bytes written
}
type BM = BodyMode;
pub struct BodyWriter {
pub body_mode: BodyMode,
}
impl BodyWriter {
pub fn new() -> Self {
BodyWriter {
body_mode: BM::ToSelect,
}
}
pub fn init_until_close(&mut self) {
self.body_mode = BM::UntilClose(0);
}
pub fn init_content_length(&mut self, cl: usize) {
self.body_mode = BM::ContentLength(cl, 0);
}
pub async fn write_body(
&mut self,
sender: &mut mpsc::Sender<HttpTask>,
bytes: Bytes,
) -> Result<Option<usize>> {
trace!("Writing Body, size: {} (subrequest)", bytes.len());
match self.body_mode {
BM::Complete(_) => Ok(None),
BM::ContentLength(_, _) => self.do_write_body(sender, bytes).await,
BM::UntilClose(_) => self.do_write_until_close_body(sender, bytes).await,
BM::ToSelect => panic!("wrong body phase: ToSelect (subrequest)"),
}
}
pub fn finished(&self) -> bool {
match self.body_mode {
BM::Complete(_) => true,
BM::ContentLength(total, written) => written >= total,
_ => false,
}
}
async fn do_write_body(
&mut self,
tx: &mut mpsc::Sender<HttpTask>,
bytes: Bytes,
) -> Result<Option<usize>> {
match self.body_mode {
BM::ContentLength(total, written) => {
if written >= total {
// already written full length
return Ok(None);
}
let mut to_write = total - written;
if to_write < bytes.len() {
warn!("Trying to write data over content-length (subrequest): {total}");
} else {
to_write = bytes.len();
}
let res = tx.send(HttpTask::Body(Some(bytes), false)).await;
match res {
Ok(()) => {
self.body_mode = BM::ContentLength(total, written + to_write);
Ok(Some(to_write))
}
Err(e) => Error::e_because(WriteError, "while writing body (subrequest)", e),
}
}
_ => panic!("wrong body mode: {:?} (subrequest)", self.body_mode),
}
}
async fn do_write_until_close_body(
&mut self,
tx: &mut mpsc::Sender<HttpTask>,
bytes: Bytes,
) -> Result<Option<usize>> {
match self.body_mode {
BM::UntilClose(written) => {
let res = tx.send(HttpTask::Body(Some(bytes.clone()), false)).await;
match res {
Ok(()) => {
self.body_mode = BM::UntilClose(written + bytes.len());
Ok(Some(bytes.len()))
}
Err(e) => Error::e_because(WriteError, "while writing body (subrequest)", e),
}
}
_ => panic!("wrong body mode: {:?} (subrequest)", self.body_mode),
}
}
pub async fn finish(&mut self, sender: &mut mpsc::Sender<HttpTask>) -> Result<Option<usize>> {
match self.body_mode {
BM::Complete(_) => Ok(None),
BM::ContentLength(_, _) => self.do_finish_body(sender).await,
BM::UntilClose(_) => self.do_finish_until_close_body(sender).await,
BM::ToSelect => Ok(None),
}
}
async fn do_finish_body(&mut self, tx: &mut mpsc::Sender<HttpTask>) -> Result<Option<usize>> {
match self.body_mode {
BM::ContentLength(total, written) => {
self.body_mode = BM::Complete(written);
if written < total {
return Error::e_explain(
PREMATURE_BODY_END,
format!("Content-length: {total} bytes written: {written} (subrequest)"),
);
}
tx.send(HttpTask::Done).await.or_err(
WriteError,
"while sending done task to downstream (subrequest)",
)?;
Ok(Some(written))
}
_ => panic!("wrong body mode: {:?} (subrequest)", self.body_mode),
}
}
async fn do_finish_until_close_body(
&mut self,
tx: &mut mpsc::Sender<HttpTask>,
) -> Result<Option<usize>> {
match self.body_mode {
BM::UntilClose(written) => {
self.body_mode = BM::Complete(written);
tx.send(HttpTask::Done).await.or_err(
WriteError,
"while sending done task to downstream (subrequest)",
)?;
Ok(Some(written))
}
_ => panic!("wrong body mode: {:?} (subrequest)", self.body_mode),
}
}
pub async fn write_trailers(
&mut self,
tx: &mut mpsc::Sender<HttpTask>,
trailers: Option<Box<HeaderMap>>,
) -> Result<()> {
// TODO more safeguards e.g. trailers after end of stream
tx.send(HttpTask::Trailer(trailers)).await.or_err(
WriteError,
"while writing response trailers to downstream (subrequest)",
)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
const TASK_BUFFER_SIZE: usize = 4;
#[tokio::test]
async fn read_with_body_content_length() {
init_log();
let input = b"abc";
let (tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_reader = BodyReader::new(None);
body_reader.init_content_length(3);
tx.send(HttpTask::Body(Some(Bytes::from(&input[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input[..]);
assert_eq!(body_reader.body_state, ParseState::Complete(3));
}
#[tokio::test]
async fn read_with_body_content_length_2() {
init_log();
let input1 = b"a";
let input2 = b"bc";
let (tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_reader = BodyReader::new(None);
body_reader.init_content_length(3);
tx.send(HttpTask::Body(Some(Bytes::from(&input1[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input1[..]);
assert_eq!(body_reader.body_state, ParseState::Partial(1, 2));
tx.send(HttpTask::Body(Some(Bytes::from(&input2[..])), true))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input2[..]);
assert_eq!(body_reader.body_state, ParseState::Complete(3));
}
#[tokio::test]
async fn read_with_body_content_length_empty_task() {
init_log();
let input1 = b"a";
let input2 = b""; // zero length body task
let (tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_reader = BodyReader::new(None);
body_reader.init_content_length(3);
tx.send(HttpTask::Body(Some(Bytes::from(&input1[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input1[..]);
assert_eq!(body_reader.body_state, ParseState::Partial(1, 2));
// subrequest can allow empty body tasks
tx.send(HttpTask::Body(Some(Bytes::from(&input2[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input2[..]);
assert_eq!(body_reader.body_state, ParseState::Partial(1, 2));
// premature end of stream still errors
tx.send(HttpTask::Body(Some(Bytes::from(&input2[..])), true))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap_err();
assert_eq!(&PREMATURE_BODY_END, res.etype());
assert_eq!(body_reader.body_state, ParseState::Done(1));
}
#[tokio::test]
async fn read_with_body_content_length_less() {
init_log();
let input1 = b"a";
let (tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_reader = BodyReader::new(None);
body_reader.init_content_length(3);
tx.send(HttpTask::Body(Some(Bytes::from(&input1[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input1[..]);
assert_eq!(body_reader.body_state, ParseState::Partial(1, 2));
drop(tx);
let res = body_reader.read_body(&mut rx).await.unwrap_err();
assert_eq!(&ConnectionClosed, res.etype());
assert_eq!(body_reader.body_state, ParseState::Done(1));
}
#[tokio::test]
async fn read_with_body_content_length_more() {
init_log();
let input1 = b"a";
let input2 = b"bcd";
let (tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_reader = BodyReader::new(None);
body_reader.init_content_length(3);
tx.send(HttpTask::Body(Some(Bytes::from(&input1[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input1[..]);
assert_eq!(body_reader.body_state, ParseState::Partial(1, 2));
tx.send(HttpTask::Body(Some(Bytes::from(&input2[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input2[0..2]);
assert_eq!(body_reader.body_state, ParseState::Complete(3));
}
#[tokio::test]
async fn read_with_body_until_close() {
init_log();
let input1 = b"a";
let input2 = b""; // zero length body but not actually close
let (tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_reader = BodyReader::new(None);
body_reader.init_until_close();
tx.send(HttpTask::Body(Some(Bytes::from(&input1[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input1[..]);
assert_eq!(body_reader.body_state, ParseState::UntilClose(1));
tx.send(HttpTask::Body(Some(Bytes::from(&input2[..])), false))
.await
.unwrap();
let res = body_reader.read_body(&mut rx).await.unwrap().unwrap();
assert_eq!(res, &input2[..]);
assert_eq!(body_reader.body_state, ParseState::UntilClose(1));
// sending end closed
drop(tx);
let res = body_reader.read_body(&mut rx).await.unwrap();
assert_eq!(res, None);
assert_eq!(body_reader.body_state, ParseState::Complete(1));
}
#[tokio::test]
async fn write_body_cl() {
init_log();
let output = b"a";
let (mut tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_writer = BodyWriter::new();
body_writer.init_content_length(1);
assert_eq!(body_writer.body_mode, BodyMode::ContentLength(1, 0));
let res = body_writer
.write_body(&mut tx, Bytes::from(&output[..]))
.await
.unwrap()
.unwrap();
assert_eq!(res, 1);
assert_eq!(body_writer.body_mode, BodyMode::ContentLength(1, 1));
// write again, over the limit
let res = body_writer
.write_body(&mut tx, Bytes::from(&output[..]))
.await
.unwrap();
assert_eq!(res, None);
assert_eq!(body_writer.body_mode, BodyMode::ContentLength(1, 1));
let res = body_writer.finish(&mut tx).await.unwrap().unwrap();
assert_eq!(res, 1);
assert_eq!(body_writer.body_mode, BodyMode::Complete(1));
// only one body task written
match rx.try_recv().unwrap() {
HttpTask::Body(b, end) => {
assert_eq!(b.unwrap(), &output[..]);
assert!(!end);
}
task => panic!("unexpected task {task:?}"),
}
assert!(matches!(rx.try_recv().unwrap(), HttpTask::Done));
drop(tx);
assert_eq!(
rx.try_recv().unwrap_err(),
mpsc::error::TryRecvError::Disconnected
);
}
#[tokio::test]
async fn write_body_until_close() {
init_log();
let data = b"a";
let (mut tx, mut rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let mut body_writer = BodyWriter::new();
body_writer.init_until_close();
assert_eq!(body_writer.body_mode, BodyMode::UntilClose(0));
let res = body_writer
.write_body(&mut tx, Bytes::from(&data[..]))
.await
.unwrap()
.unwrap();
assert_eq!(res, 1);
assert_eq!(body_writer.body_mode, BodyMode::UntilClose(1));
match rx.try_recv().unwrap() {
HttpTask::Body(b, end) => {
assert_eq!(b.unwrap().as_ref(), data);
assert!(!end);
}
task => panic!("unexpected task {task:?}"),
}
let res = body_writer
.write_body(&mut tx, Bytes::from(&data[..]))
.await
.unwrap()
.unwrap();
assert_eq!(res, 1);
assert_eq!(body_writer.body_mode, BodyMode::UntilClose(2));
let res = body_writer.finish(&mut tx).await.unwrap().unwrap();
assert_eq!(res, 2);
assert_eq!(body_writer.body_mode, BodyMode::Complete(2));
match rx.try_recv().unwrap() {
HttpTask::Body(b, end) => {
assert_eq!(b.unwrap().as_ref(), data);
assert!(!end);
}
task => panic!("unexpected task {task:?}"),
}
assert!(matches!(rx.try_recv().unwrap(), HttpTask::Done));
assert_eq!(rx.try_recv().unwrap_err(), mpsc::error::TryRecvError::Empty);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/subrequest/server.rs | pingora-core/src/protocols/http/subrequest/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # HTTP server session for subrequests
//!
//! This server session is _very_ similar to the implementation for v1, if not
//! identical in many cases. Though in theory subrequests are HTTP version
//! agnostic in reality this means that they must interpret any version-specific
//! idiosyncracies such as Connection: upgrade headers in H1 because they
//! "stand-in" for the actual main Session when running proxy logic. As much as
//! possible they should defer downstream-specific logic to the actual downstream
//! session and act more or less as a pipe.
//!
//! The session also instantiates a [`SubrequestHandle`] that contains necessary
//! communication channels with the subrequest, to make it possible to send
//! and receive data.
//!
//! Its write calls will send `HttpTask`s to the handle channels, instead of
//! flushing to an actual underlying stream.
//!
//! Connection reuse and keep-alive are not supported because there is no
//! actual underlying stream, only transient channels per request.
use bytes::Bytes;
use http::HeaderValue;
use http::{header, header::AsHeaderName, HeaderMap, Method, Version};
use log::{debug, trace, warn};
use pingora_error::{Error, ErrorType::*, OkOrErr, Result};
use pingora_http::{RequestHeader, ResponseHeader};
use pingora_timeout::timeout;
use std::time::Duration;
use tokio::sync::{mpsc, oneshot};
use super::body::{BodyReader, BodyWriter};
use crate::protocols::http::{
body_buffer::FixedBuffer,
server::Session as GenericHttpSession,
subrequest::dummy::DummyIO,
v1::common::{header_value_content_length, is_header_value_chunked_encoding, BODY_BUF_LIMIT},
v1::server::HttpSession as SessionV1,
HttpTask,
};
use crate::protocols::{Digest, SocketAddr};
/// The HTTP server session
pub struct HttpSession {
// these are only options because we allow dropping them separately on shutdown
tx: Option<mpsc::Sender<HttpTask>>,
rx: Option<mpsc::Receiver<HttpTask>>,
// Currently subrequest session is initialized via a dummy SessionV1 only
// TODO: need to be able to indicate H2 / other HTTP versions here
v1_inner: Box<SessionV1>,
read_req_header: bool,
response_written: Option<ResponseHeader>,
read_timeout: Option<Duration>,
write_timeout: Option<Duration>,
total_drain_timeout: Option<Duration>,
body_bytes_sent: usize,
body_bytes_read: usize,
retry_buffer: Option<FixedBuffer>,
body_reader: BodyReader,
body_writer: BodyWriter,
upgraded: bool,
// TODO: likely doesn't need to be a separate bool when/if moving away from dummy SessionV1
clear_request_body_headers: bool,
digest: Option<Box<Digest>>,
}
/// A handle to the subrequest session itself to interact or read from it.
pub struct SubrequestHandle {
/// Channel sender (for subrequest input)
pub tx: mpsc::Sender<HttpTask>,
/// Channel receiver (for subrequest output)
pub rx: mpsc::Receiver<HttpTask>,
/// Indicates when subrequest wants to start reading body input
// TODO: use when piping subrequest input/output
pub subreq_wants_body: oneshot::Receiver<()>,
}
impl SubrequestHandle {
/// Spawn a task to drain received HttpTasks.
pub fn drain_tasks(mut self) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
let _tx = self.tx; // keep handle to sender alive
while self.rx.recv().await.is_some() {}
trace!("subrequest dropped");
})
}
}
impl HttpSession {
/// Create a new http server session for a subrequest.
/// The created session needs to call [`Self::read_request()`] first before performing
/// any other operations.
pub fn new_from_session(session: &GenericHttpSession) -> (Self, SubrequestHandle) {
let v1_inner = SessionV1::new(Box::new(DummyIO::new(&session.to_h1_raw())));
let digest = session.digest().cloned();
// allow buffering a small number of tasks, otherwise exert backpressure
const CHANNEL_BUFFER_SIZE: usize = 4;
let (downstream_tx, downstream_rx) = mpsc::channel(CHANNEL_BUFFER_SIZE);
let (upstream_tx, upstream_rx) = mpsc::channel(CHANNEL_BUFFER_SIZE);
let (wants_body_tx, wants_body_rx) = oneshot::channel();
(
HttpSession {
v1_inner: Box::new(v1_inner),
tx: Some(upstream_tx),
rx: Some(downstream_rx),
body_reader: BodyReader::new(Some(wants_body_tx)),
body_writer: BodyWriter::new(),
read_req_header: false,
response_written: None,
read_timeout: None,
write_timeout: None,
total_drain_timeout: None,
body_bytes_sent: 0,
body_bytes_read: 0,
retry_buffer: None,
upgraded: false,
clear_request_body_headers: false,
digest: digest.map(Box::new),
},
SubrequestHandle {
tx: downstream_tx,
rx: upstream_rx,
subreq_wants_body: wants_body_rx,
},
)
}
/// Read the request header. Return `Ok(Some(n))` where the read and parsing are successful.
pub async fn read_request(&mut self) -> Result<Option<usize>> {
let res = self.v1_inner.read_request().await?;
if res.is_none() {
// this is when h1 client closes the connection without sending data,
// which shouldn't be the case for a subrequest session just created
return Error::e_explain(InternalError, "no session request header provided");
}
self.read_req_header = true;
if self.clear_request_body_headers {
// indicated that we wanted to clear these headers in the past, do so now
self.clear_request_body_headers();
}
Ok(res)
}
/// Validate the request header read. This function must be called after the request header
/// read.
/// # Panics
/// this function and most other functions will panic if called before [`Self::read_request()`]
pub fn validate_request(&self) -> Result<()> {
self.v1_inner.validate_request()
}
/// Return a reference of the `RequestHeader` this session read
/// # Panics
/// this function and most other functions will panic if called before [`Self::read_request()`]
pub fn req_header(&self) -> &RequestHeader {
self.v1_inner.req_header()
}
/// Return a mutable reference of the `RequestHeader` this session read
/// # Panics
/// this function and most other functions will panic if called before [`Self::read_request()`]
pub fn req_header_mut(&mut self) -> &mut RequestHeader {
self.v1_inner.req_header_mut()
}
/// Get the header value for the given header name
/// If there are multiple headers under the same name, the first one will be returned
/// Use `self.req_header().header.get_all(name)` to get all the headers under the same name
pub fn get_header(&self, name: impl AsHeaderName) -> Option<&HeaderValue> {
self.v1_inner.get_header(name)
}
/// Return the method of this request. None if the request is not read yet.
pub(super) fn get_method(&self) -> Option<&http::Method> {
self.v1_inner.get_method()
}
/// Return the path of the request (i.e., the `/hello?1` of `GET /hello?1 HTTP1.1`)
/// An empty slice will be used if there is no path or the request is not read yet
pub(super) fn get_path(&self) -> &[u8] {
self.v1_inner.get_path()
}
/// Return the host header of the request. An empty slice will be used if there is no host header
pub(super) fn get_host(&self) -> &[u8] {
self.v1_inner.get_host()
}
/// Return a string `$METHOD $PATH, Host: $HOST`. Mostly for logging and debug purpose
pub fn request_summary(&self) -> String {
format!(
"{} {}, Host: {} (subrequest)",
self.get_method().map_or("-", |r| r.as_str()),
String::from_utf8_lossy(self.get_path()),
String::from_utf8_lossy(self.get_host())
)
}
/// Is the request a upgrade request
pub fn is_upgrade_req(&self) -> bool {
self.v1_inner.is_upgrade_req()
}
/// Get the request header as raw bytes, `b""` when the header doesn't exist
pub fn get_header_bytes(&self, name: impl AsHeaderName) -> &[u8] {
self.v1_inner.get_header_bytes(name)
}
/// Read the request body. `Ok(None)` when there is no (more) body to read.
pub async fn read_body_bytes(&mut self) -> Result<Option<Bytes>> {
let read = self.read_body().await?;
Ok(read.inspect(|b| {
self.body_bytes_read += b.len();
if let Some(buffer) = self.retry_buffer.as_mut() {
buffer.write_to_buffer(b);
}
}))
}
async fn do_read_body(&mut self) -> Result<Option<Bytes>> {
self.init_body_reader();
self.body_reader
.read_body(self.rx.as_mut().expect("rx valid before shutdown"))
.await
}
/// Read the body bytes with timeout.
async fn read_body(&mut self) -> Result<Option<Bytes>> {
match self.read_timeout {
Some(t) => match timeout(t, self.do_read_body()).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ReadTimedout,
format!("reading body, timeout: {t:?} (subrequest)"),
),
},
None => self.do_read_body().await,
}
}
async fn do_drain_request_body(&mut self) -> Result<()> {
loop {
match self.read_body_bytes().await {
Ok(Some(_)) => { /* continue to drain */ }
Ok(None) => return Ok(()), // done
Err(e) => return Err(e),
}
}
}
/// Drain the request body. `Ok(())` when there is no (more) body to read.
pub async fn drain_request_body(&mut self) -> Result<()> {
if self.is_body_done() {
return Ok(());
}
match self.total_drain_timeout {
Some(t) => match timeout(t, self.do_drain_request_body()).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ReadTimedout,
format!("draining body, timeout: {t:?} (subrequest)"),
),
},
None => self.do_drain_request_body().await,
}
}
/// Whether there is no (more) body to be read.
pub fn is_body_done(&mut self) -> bool {
self.init_body_reader();
self.body_reader.body_done()
}
/// Whether the request has an empty body
/// Because HTTP 1.1 clients have to send either `Content-Length` or `Transfer-Encoding` in order
/// to signal the server that it will send the body, this function returns accurate results even
/// only when the request header is just read.
pub fn is_body_empty(&mut self) -> bool {
self.init_body_reader();
self.body_reader.body_empty()
}
/// Write the response header to the client.
/// This function can be called more than once to send 1xx informational headers excluding 101.
pub async fn write_response_header(&mut self, header: Box<ResponseHeader>) -> Result<()> {
if let Some(resp) = self.response_written.as_ref() {
if !resp.status.is_informational() || self.upgraded {
warn!("Respond header is already sent, cannot send again (subrequest)");
return Ok(());
}
}
// XXX: don't add additional downstream headers, unlike h1, subreq is mostly treated as a pipe
// Allow informational header (excluding 101) to pass through without affecting the state
// of the request
if header.status == 101 || !header.status.is_informational() {
// reset request body to done for incomplete upgrade handshakes
if let Some(upgrade_ok) = self.is_upgrade(&header) {
if upgrade_ok {
debug!("ok upgrade handshake");
// For ws we use HTTP1_0 do_read_body_until_closed
//
// On ws close the initiator sends a close frame and
// then waits for a response from the peer, once it receives
// a response it closes the conn. After receiving a
// control frame indicating the connection should be closed,
// a peer discards any further data received.
// https://www.rfc-editor.org/rfc/rfc6455#section-1.4
self.upgraded = true;
} else {
debug!("bad upgrade handshake!");
// reset request body buf and mark as done
// safe to reset an upgrade because it doesn't have body
self.body_reader.init_content_length(0);
}
}
self.init_body_writer(&header);
}
// TODO propagate h2 end
debug!("send response header (subrequest)");
match self
.tx
.as_mut()
.expect("tx valid before shutdown")
.send(HttpTask::Header(header.clone(), false))
.await
{
Ok(()) => {
self.response_written = Some(*header);
Ok(())
}
Err(e) => Error::e_because(WriteError, "writing response header", e),
}
}
/// Return the response header if it is already sent.
pub fn response_written(&self) -> Option<&ResponseHeader> {
self.response_written.as_ref()
}
/// `Some(true)` if the this is a successful upgrade
/// `Some(false)` if the request is an upgrade but the response refuses it
/// `None` if the request is not an upgrade.
pub fn is_upgrade(&self, header: &ResponseHeader) -> Option<bool> {
self.v1_inner.is_upgrade(header)
}
fn init_body_writer(&mut self, header: &ResponseHeader) {
use http::StatusCode;
/* the following responses don't have body 204, 304, and HEAD */
if matches!(
header.status,
StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED
) || self.get_method() == Some(&Method::HEAD)
{
self.body_writer.init_content_length(0);
return;
}
if header.status.is_informational() && header.status != StatusCode::SWITCHING_PROTOCOLS {
// 1xx response, not enough to init body
return;
}
if self.is_upgrade(header) == Some(true) {
self.body_writer.init_until_close();
} else {
let te_value = header.headers.get(http::header::TRANSFER_ENCODING);
if is_header_value_chunked_encoding(te_value) {
// transfer-encoding takes priority over content-length
self.body_writer.init_until_close();
} else {
let content_length =
header_value_content_length(header.headers.get(http::header::CONTENT_LENGTH));
match content_length {
Some(length) => {
self.body_writer.init_content_length(length);
}
None => {
/* TODO: 1. connection: keepalive cannot be used,
2. mark connection must be closed */
self.body_writer.init_until_close();
}
}
}
}
}
/// Same as [`Self::write_response_header()`] but takes a reference.
pub async fn write_response_header_ref(&mut self, resp: &ResponseHeader) -> Result<()> {
self.write_response_header(Box::new(resp.clone())).await
}
async fn do_write_body(&mut self, buf: Bytes) -> Result<Option<usize>> {
let written = self
.body_writer
.write_body(self.tx.as_mut().expect("tx valid before shutdown"), buf)
.await;
if let Ok(Some(num_bytes)) = written {
self.body_bytes_sent += num_bytes;
}
written
}
/// Write response body to the client. Return `Ok(None)` when there shouldn't be more body
/// to be written, e.g., writing more bytes than what the `Content-Length` header suggests
pub async fn write_body(&mut self, buf: Bytes) -> Result<Option<usize>> {
// TODO: check if the response header is written
match self.write_timeout {
Some(t) => match timeout(t, self.do_write_body(buf)).await {
Ok(res) => res,
Err(_) => Error::e_explain(WriteTimedout, format!("writing body, timeout: {t:?}")),
},
None => self.do_write_body(buf).await,
}
}
fn maybe_force_close_body_reader(&mut self) {
if self.upgraded && !self.body_reader.body_done() {
// response is done, reset the request body to close
self.body_reader.init_content_length(0);
}
}
/// Signal that there is no more body to write.
/// This call will try to flush the buffer if there is any un-flushed data.
/// For chunked encoding response, this call will also send the last chunk.
/// For upgraded sessions, this call will also close the reading of the client body.
pub async fn finish(&mut self) -> Result<Option<usize>> {
let res = self
.body_writer
.finish(self.tx.as_mut().expect("tx valid before shutdown"))
.await?;
self.maybe_force_close_body_reader();
Ok(res)
}
/// Return how many response body bytes (application, not wire) already sent downstream
pub fn body_bytes_sent(&self) -> usize {
self.body_bytes_sent
}
/// Return how many request body bytes (application, not wire) already read from downstream
pub fn body_bytes_read(&self) -> usize {
self.body_bytes_read
}
fn is_chunked_encoding(&self) -> bool {
is_header_value_chunked_encoding(self.get_header(header::TRANSFER_ENCODING))
}
/// Clear body-related subrequest headers.
///
/// This is ok to call before the request is read; the headers will then be cleared after
/// reading the request header.
pub fn clear_request_body_headers(&mut self) {
self.clear_request_body_headers = true;
if self.read_req_header {
let req = self.v1_inner.req_header_mut();
req.remove_header(&header::CONTENT_LENGTH);
req.remove_header(&header::TRANSFER_ENCODING);
req.remove_header(&header::CONTENT_TYPE);
req.remove_header(&header::CONTENT_ENCODING);
}
}
fn init_body_reader(&mut self) {
if self.body_reader.need_init() {
// reset retry buffer
if let Some(buffer) = self.retry_buffer.as_mut() {
buffer.clear();
}
if self.req_header().version == Version::HTTP_11 && self.is_upgrade_req() {
self.body_reader.init_until_close();
return;
}
if self.is_chunked_encoding() {
// if chunked encoding, content-length should be ignored
// TE is not visible at subrequest HttpTask level
// so this means read until request closure
self.body_reader.init_until_close();
} else {
let cl = header_value_content_length(self.get_header(header::CONTENT_LENGTH));
match cl {
Some(i) => {
self.body_reader.init_content_length(i);
}
None => {
match self.req_header().version {
Version::HTTP_11 => {
// Per RFC assume no body by default in HTTP 1.1
self.body_reader.init_content_length(0);
}
_ => {
self.body_reader.init_until_close();
}
}
}
}
}
}
}
pub fn retry_buffer_truncated(&self) -> bool {
self.retry_buffer
.as_ref()
.map_or_else(|| false, |r| r.is_truncated())
}
pub fn enable_retry_buffering(&mut self) {
if self.retry_buffer.is_none() {
self.retry_buffer = Some(FixedBuffer::new(BODY_BUF_LIMIT))
}
}
pub fn get_retry_buffer(&self) -> Option<Bytes> {
self.retry_buffer.as_ref().and_then(|b| {
if b.is_truncated() {
None
} else {
b.get_buffer()
}
})
}
/// This function will (async) block forever until the client closes the connection.
pub async fn idle(&mut self) -> Result<HttpTask> {
let rx = self.rx.as_mut().expect("rx valid before shutdown");
let mut task = rx
.recv()
.await
.or_err(ReadError, "during HTTP idle state")?;
// just consume empty body or done messages, the downstream channel is not a real
// connection and only used for this one request
while matches!(&task, HttpTask::Done)
|| matches!(&task, HttpTask::Body(b, _) if b.as_ref().is_none_or(|b| b.is_empty()))
{
task = rx
.recv()
.await
.or_err(ReadError, "during HTTP idle state")?;
}
Ok(task)
}
/// This function will return body bytes (same as [`Self::read_body_bytes()`]), but after
/// the client body finishes (`Ok(None)` is returned), calling this function again will block
/// forever, same as [`Self::idle()`].
pub async fn read_body_or_idle(&mut self, no_body_expected: bool) -> Result<Option<Bytes>> {
if no_body_expected || self.is_body_done() {
let read_task = self.idle().await?;
Error::e_explain(
ConnectError,
format!("Sent unexpected task {read_task:?} after end of body (subrequest)"),
)
} else {
self.read_body_bytes().await
}
}
/// Return the raw bytes of the request header.
pub fn get_headers_raw_bytes(&self) -> Bytes {
self.v1_inner.get_headers_raw_bytes()
}
/// Close the subrequest channels, indicating that no more data will be sent
/// or received. This is expected to be called before dropping the `Session` itself.
pub fn shutdown(&mut self) {
drop(self.tx.take());
drop(self.rx.take());
}
/// Sets the downstream read timeout. This will trigger if we're unable
/// to read from the subrequest channels after `timeout`.
pub fn set_read_timeout(&mut self, timeout: Option<Duration>) {
self.read_timeout = timeout;
}
/// Get the downstream read timeout.
pub fn get_read_timeout(&self) -> Option<Duration> {
self.read_timeout
}
/// Sets the downstream write timeout. This will trigger if we're unable
/// to write to the subrequest channel after `timeout`.
pub fn set_write_timeout(&mut self, timeout: Option<Duration>) {
self.write_timeout = timeout;
}
/// Get the downstream write timeout.
pub fn get_write_timeout(&self) -> Option<Duration> {
self.write_timeout
}
/// Sets the total drain timeout.
/// Note that the downstream read timeout still applies between body byte reads.
pub fn set_total_drain_timeout(&mut self, timeout: Option<Duration>) {
self.total_drain_timeout = timeout;
}
/// Get the downstream total drain timeout.
pub fn get_total_drain_timeout(&self) -> Option<Duration> {
self.total_drain_timeout
}
/// Return the [Digest], this is originally from the main request.
pub fn digest(&self) -> Option<&Digest> {
self.digest.as_deref()
}
/// Return a mutable [Digest] reference.
pub fn digest_mut(&mut self) -> Option<&mut Digest> {
self.digest.as_deref_mut()
}
/// Return the client (peer) address of the main request.
pub fn client_addr(&self) -> Option<&SocketAddr> {
self.digest()
.and_then(|d| d.socket_digest.as_ref())
.map(|d| d.peer_addr())?
}
/// Return the server (local) address of the main request.
pub fn server_addr(&self) -> Option<&SocketAddr> {
self.digest()
.and_then(|d| d.socket_digest.as_ref())
.map(|d| d.local_addr())?
}
/// Write a `100 Continue` response to the client.
pub async fn write_continue_response(&mut self) -> Result<()> {
// only send if we haven't already
if self.response_written.is_none() {
// size hint Some(0) because default is 8
return self
.write_response_header(Box::new(ResponseHeader::build(100, Some(0)).unwrap()))
.await;
}
Ok(())
}
async fn response_duplex(&mut self, task: HttpTask) -> Result<bool> {
let end_stream = match task {
HttpTask::Header(header, end_stream) => {
self.write_response_header(header)
.await
.map_err(|e| e.into_down())?;
end_stream
}
HttpTask::Body(data, end_stream) => match data {
Some(d) => {
if !d.is_empty() {
self.write_body(d).await.map_err(|e| e.into_down())?;
}
end_stream
}
None => end_stream,
},
HttpTask::Trailer(trailers) => {
self.write_trailers(trailers).await?;
true
}
HttpTask::Done => true,
HttpTask::Failed(e) => return Err(e),
};
if end_stream {
// no-op if body wasn't initialized or is finished already
self.finish().await.map_err(|e| e.into_down())?;
}
Ok(end_stream || self.body_writer.finished())
}
// TODO: use vectored write to avoid copying
pub async fn response_duplex_vec(&mut self, mut tasks: Vec<HttpTask>) -> Result<bool> {
// TODO: send httptask failed on each error?
let n_tasks = tasks.len();
if n_tasks == 1 {
// fallback to single operation to avoid copy
return self.response_duplex(tasks.pop().unwrap()).await;
}
let mut end_stream = false;
for task in tasks.into_iter() {
end_stream = match task {
HttpTask::Header(header, end_stream) => {
self.write_response_header(header)
.await
.map_err(|e| e.into_down())?;
end_stream
}
HttpTask::Body(data, end_stream) => match data {
Some(d) => {
if !d.is_empty() {
self.write_body(d).await.map_err(|e| e.into_down())?;
}
end_stream
}
None => end_stream,
},
HttpTask::Done => {
// write done
// we'll send HttpTask::Done at the end of this loop in finish
true
}
HttpTask::Trailer(trailers) => {
self.write_trailers(trailers).await?;
true
}
HttpTask::Failed(e) => {
// write failed
// error should also be returned when sender drops
return Err(e);
}
} || end_stream; // safe guard in case `end` in tasks flips from true to false
}
if end_stream {
// no-op if body wasn't initialized or is finished already
self.finish().await.map_err(|e| e.into_down())?;
}
Ok(end_stream || self.body_writer.finished())
}
/// Write response trailers to the client, this also closes the stream.
pub async fn write_trailers(&mut self, trailers: Option<Box<HeaderMap>>) -> Result<()> {
self.body_writer
.write_trailers(
self.tx.as_mut().expect("tx valid before shutdown"),
trailers,
)
.await
}
}
#[cfg(test)]
mod tests_stream {
use super::*;
use crate::protocols::http::subrequest::body::{BodyMode, ParseState};
use http::StatusCode;
use std::str;
use tokio_test::io::Builder;
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
async fn session_from_input(input: &[u8]) -> (HttpSession, SubrequestHandle) {
let mock_io = Builder::new().read(input).build();
let mut http_stream = GenericHttpSession::new_http1(Box::new(mock_io));
http_stream.read_request().await.unwrap();
let (mut http_stream, handle) = HttpSession::new_from_session(&http_stream);
http_stream.read_request().await.unwrap();
(http_stream, handle)
}
async fn build_upgrade_req(upgrade: &str, conn: &str) -> (HttpSession, SubrequestHandle) {
let input = format!("GET / HTTP/1.1\r\nHost: pingora.org\r\nUpgrade: {upgrade}\r\nConnection: {conn}\r\n\r\n");
session_from_input(input.as_bytes()).await
}
async fn build_req() -> (HttpSession, SubrequestHandle) {
let input = "GET / HTTP/1.1\r\nHost: pingora.org\r\n\r\n".to_string();
session_from_input(input.as_bytes()).await
}
#[tokio::test]
async fn read_basic() {
init_log();
let input = b"GET / HTTP/1.1\r\n\r\n";
let (http_stream, _handle) = session_from_input(input).await;
assert_eq!(0, http_stream.req_header().headers.len());
assert_eq!(Method::GET, http_stream.req_header().method);
assert_eq!(b"/", http_stream.req_header().uri.path().as_bytes());
}
#[tokio::test]
async fn read_upgrade_req() {
// http 1.0
let input = b"GET / HTTP/1.0\r\nHost: pingora.org\r\nUpgrade: websocket\r\nConnection: upgrade\r\n\r\n";
let (http_stream, _handle) = session_from_input(input).await;
assert!(!http_stream.is_upgrade_req());
// different method
let input = b"POST / HTTP/1.1\r\nHost: pingora.org\r\nUpgrade: websocket\r\nConnection: upgrade\r\n\r\n";
let (http_stream, _handle) = session_from_input(input).await;
assert!(http_stream.is_upgrade_req());
// missing upgrade header
let input = b"GET / HTTP/1.1\r\nHost: pingora.org\r\nConnection: upgrade\r\n\r\n";
let (http_stream, _handle) = session_from_input(input).await;
assert!(!http_stream.is_upgrade_req());
// no connection header
let input = b"GET / HTTP/1.1\r\nHost: pingora.org\r\nUpgrade: WebSocket\r\n\r\n";
let (http_stream, _handle) = session_from_input(input).await;
assert!(http_stream.is_upgrade_req());
let (http_stream, _handle) = build_upgrade_req("websocket", "Upgrade").await;
assert!(http_stream.is_upgrade_req());
// mixed case
let (http_stream, _handle) = build_upgrade_req("WebSocket", "Upgrade").await;
assert!(http_stream.is_upgrade_req());
}
#[tokio::test]
async fn read_upgrade_req_with_1xx_response() {
let (mut http_stream, _handle) = build_upgrade_req("websocket", "upgrade").await;
assert!(http_stream.is_upgrade_req());
let mut response = ResponseHeader::build(StatusCode::CONTINUE, None).unwrap();
response.set_version(http::Version::HTTP_11);
http_stream
.write_response_header(Box::new(response))
.await
.unwrap();
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v1/client.rs | pingora-core/src/protocols/http/v1/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP/1.x client session
use bytes::{BufMut, Bytes, BytesMut};
use http::{header, header::AsHeaderName, HeaderValue, StatusCode, Version};
use log::{debug, trace};
use pingora_error::{Error, ErrorType::*, OrErr, Result, RetryType};
use pingora_http::{HMap, IntoCaseHeaderName, RequestHeader, ResponseHeader};
use pingora_timeout::timeout;
use std::io::ErrorKind;
use std::str;
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use super::body::{BodyReader, BodyWriter};
use super::common::*;
use crate::protocols::http::HttpTask;
use crate::protocols::{Digest, SocketAddr, Stream, UniqueID, UniqueIDType};
use crate::utils::{BufRef, KVRef};
/// The HTTP 1.x client session
pub struct HttpSession {
buf: Bytes,
pub(crate) underlying_stream: Stream,
raw_header: Option<BufRef>,
preread_body: Option<BufRef>,
body_reader: BodyReader,
body_writer: BodyWriter,
// timeouts:
/// The read timeout, which will be applied to both reading the header and the body.
/// The timeout is reset on every read. This is not a timeout on the overall duration of the
/// response.
pub read_timeout: Option<Duration>,
/// The write timeout which will be applied to both writing request header and body.
/// The timeout is reset on every write. This is not a timeout on the overall duration of the
/// request.
pub write_timeout: Option<Duration>,
keepalive_timeout: KeepaliveStatus,
pub(crate) digest: Box<Digest>,
response_header: Option<Box<ResponseHeader>>,
request_written: Option<Box<RequestHeader>>,
bytes_sent: usize,
/// Total response body payload bytes received from upstream
body_recv: usize,
upgraded: bool,
}
/// HTTP 1.x client session
impl HttpSession {
/// Create a new http client session from an established (TCP or TLS) [`Stream`].
pub fn new(stream: Stream) -> Self {
// TODO: maybe we should put digest in the connection itself
let digest = Box::new(Digest {
ssl_digest: stream.get_ssl_digest(),
timing_digest: stream.get_timing_digest(),
proxy_digest: stream.get_proxy_digest(),
socket_digest: stream.get_socket_digest(),
});
HttpSession {
underlying_stream: stream,
buf: Bytes::new(), // zero size, will be replaced by parsed header later
raw_header: None,
preread_body: None,
body_reader: BodyReader::new(true),
body_writer: BodyWriter::new(),
keepalive_timeout: KeepaliveStatus::Off,
response_header: None,
request_written: None,
read_timeout: None,
write_timeout: None,
digest,
bytes_sent: 0,
body_recv: 0,
upgraded: false,
}
}
/// Write the request header to the server
/// After the request header is sent. The caller can either start reading the response or
/// sending request body if any.
pub async fn write_request_header(&mut self, req: Box<RequestHeader>) -> Result<usize> {
// TODO: make sure this can only be called once
// init body writer
self.init_req_body_writer(&req);
let to_wire = http_req_header_to_wire(&req).unwrap();
trace!("Writing request header: {to_wire:?}");
let write_fut = self.underlying_stream.write_all(to_wire.as_ref());
match self.write_timeout {
Some(t) => match timeout(t, write_fut).await {
Ok(res) => res,
Err(_) => Err(std::io::Error::from(ErrorKind::TimedOut)),
},
None => write_fut.await,
}
.map_err(|e| match e.kind() {
ErrorKind::TimedOut => {
Error::because(WriteTimedout, "while writing request headers (timeout)", e)
}
_ => Error::because(WriteError, "while writing request headers", e),
})?;
self.underlying_stream
.flush()
.await
.or_err(WriteError, "flushing request header")?;
// write was successful
self.request_written = Some(req);
Ok(to_wire.len())
}
async fn do_write_body(&mut self, buf: &[u8]) -> Result<Option<usize>> {
let written = self
.body_writer
.write_body(&mut self.underlying_stream, buf)
.await;
if let Ok(Some(num_bytes)) = written {
self.bytes_sent += num_bytes;
}
written
}
/// Write request body. Return Ok(None) if no more body should be written, either due to
/// Content-Length or the last chunk is already sent
pub async fn write_body(&mut self, buf: &[u8]) -> Result<Option<usize>> {
// TODO: verify that request header is sent already
match self.write_timeout {
Some(t) => match timeout(t, self.do_write_body(buf)).await {
Ok(res) => res,
Err(_) => Error::e_explain(WriteTimedout, format!("writing body, timeout: {t:?}")),
},
None => self.do_write_body(buf).await,
}
}
fn maybe_force_close_body_reader(&mut self) {
if self.upgraded && !self.body_reader.body_done() {
// request is done, reset the response body to close
self.body_reader.init_content_length(0, b"");
}
}
/// Flush local buffer and notify the server by sending the last chunk if chunked encoding is
/// used.
pub async fn finish_body(&mut self) -> Result<Option<usize>> {
let res = self.body_writer.finish(&mut self.underlying_stream).await?;
self.underlying_stream
.flush()
.await
.or_err(WriteError, "flushing body")?;
self.maybe_force_close_body_reader();
Ok(res)
}
// Validate the response header read. This function must be called after the response header
// read.
fn validate_response(&self) -> Result<()> {
let resp_header = self
.response_header
.as_ref()
.expect("response header must be read");
// ad-hoc checks
super::common::check_dup_content_length(&resp_header.headers)?;
Ok(())
}
/// Read the response header from the server
/// This function can be called multiple times, if the headers received are just informational
/// headers.
pub async fn read_response(&mut self) -> Result<usize> {
self.buf.clear();
let mut buf = BytesMut::with_capacity(INIT_HEADER_BUF_SIZE);
let mut already_read: usize = 0;
loop {
if already_read > MAX_HEADER_SIZE {
/* NOTE: this check only blocks second read. The first large read is allowed
since the buf is already allocated. The goal is to avoid slowly bloating
this buffer */
return Error::e_explain(
InvalidHTTPHeader,
format!("Response header larger than {MAX_HEADER_SIZE}"),
);
}
let read_fut = self.underlying_stream.read_buf(&mut buf);
let read_result = match self.read_timeout {
Some(t) => timeout(t, read_fut)
.await
.map_err(|_| Error::explain(ReadTimedout, "while reading response headers"))?,
None => read_fut.await,
};
let n = match read_result {
Ok(n) => match n {
0 => {
let mut e = Error::explain(
ConnectionClosed,
format!(
"while reading response headers, bytes already read: {already_read}",
),
);
e.retry = RetryType::ReusedOnly;
return Err(e);
}
_ => {
n /* read n bytes, continue */
}
},
Err(e) => {
let true_io_error = e.raw_os_error().is_some();
let mut e = Error::because(
ReadError,
format!(
"while reading response headers, bytes already read: {already_read}",
),
e,
);
// Likely OSError, typical if a previously reused connection drops it
if true_io_error {
e.retry = RetryType::ReusedOnly;
} // else: not safe to retry TLS error
return Err(e);
}
};
already_read += n;
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
let mut resp = httparse::Response::new(&mut headers);
let parsed = parse_resp_buffer(&mut resp, &buf);
match parsed {
HeaderParseState::Complete(s) => {
self.raw_header = Some(BufRef(0, s));
self.preread_body = Some(BufRef(s, already_read));
let base = buf.as_ptr() as usize;
let mut header_refs = Vec::<KVRef>::with_capacity(resp.headers.len());
// Note: resp.headers has the correct number of headers
// while header_refs doesn't as it is still empty
let _num_headers = populate_headers(base, &mut header_refs, resp.headers);
let mut response_header = Box::new(ResponseHeader::build(
resp.code.unwrap(),
Some(resp.headers.len()),
)?);
response_header.set_version(match resp.version {
Some(1) => Version::HTTP_11,
Some(0) => Version::HTTP_10,
_ => Version::HTTP_09,
});
response_header.set_reason_phrase(resp.reason)?;
let buf = buf.freeze();
for header in header_refs {
let header_name = header.get_name_bytes(&buf);
let header_name = header_name.into_case_header_name();
let value_bytes = header.get_value_bytes(&buf);
let header_value = if cfg!(debug_assertions) {
// from_maybe_shared_unchecked() in debug mode still checks whether
// the header value is valid, which breaks the _obsolete_multiline
// support. To work around this, in debug mode, we replace CRLF with
// whitespace
if let Some(p) = value_bytes.windows(CRLF.len()).position(|w| w == CRLF)
{
let mut new_header = Vec::from_iter(value_bytes);
new_header[p] = b' ';
new_header[p + 1] = b' ';
unsafe {
http::HeaderValue::from_maybe_shared_unchecked(new_header)
}
} else {
unsafe {
http::HeaderValue::from_maybe_shared_unchecked(value_bytes)
}
}
} else {
// safe because this is from what we parsed
unsafe { http::HeaderValue::from_maybe_shared_unchecked(value_bytes) }
};
response_header
.append_header(header_name, header_value)
.or_err(InvalidHTTPHeader, "while parsing request header")?;
}
self.buf = buf;
self.upgraded = self.is_upgrade(&response_header).unwrap_or(false);
self.response_header = Some(response_header);
self.validate_response()?;
return Ok(s);
}
HeaderParseState::Partial => { /* continue the loop */ }
HeaderParseState::Invalid(e) => {
return Error::e_because(
InvalidHTTPHeader,
format!("buf: {}", buf.escape_ascii()),
e,
);
}
}
}
}
/// Similar to [`Self::read_response()`], read the response header and then return a copy of it.
pub async fn read_resp_header_parts(&mut self) -> Result<Box<ResponseHeader>> {
self.read_response().await?;
// safe to unwrap because it is just read
Ok(Box::new(self.resp_header().unwrap().clone()))
}
/// Return a reference of the [`ResponseHeader`] if the response is read
pub fn resp_header(&self) -> Option<&ResponseHeader> {
self.response_header.as_deref()
}
/// Get the header value for the given header name from the response header
/// If there are multiple headers under the same name, the first one will be returned
/// Use `self.resp_header().header.get_all(name)` to get all the headers under the same name
/// Always return `None` if the response is not read yet.
pub fn get_header(&self, name: impl AsHeaderName) -> Option<&HeaderValue> {
self.response_header
.as_ref()
.and_then(|h| h.headers.get(name))
}
/// Get the request header as raw bytes, `b""` when the header doesn't exist or response not read
pub fn get_header_bytes(&self, name: impl AsHeaderName) -> &[u8] {
self.get_header(name).map_or(b"", |v| v.as_bytes())
}
/// Return the status code of the response if read
pub fn get_status(&self) -> Option<StatusCode> {
self.response_header.as_ref().map(|h| h.status)
}
async fn do_read_body(&mut self) -> Result<Option<BufRef>> {
self.init_body_reader();
self.body_reader
.read_body(&mut self.underlying_stream)
.await
}
/// Read the response body into the internal buffer.
/// Return `Ok(Some(ref)) after a successful read.
/// Return `Ok(None)` if there is no more body to read.
pub async fn read_body_ref(&mut self) -> Result<Option<&[u8]>> {
let result = match self.read_timeout {
Some(t) => match timeout(t, self.do_read_body()).await {
Ok(res) => res,
Err(_) => Error::e_explain(ReadTimedout, format!("reading body, timeout: {t:?}")),
},
None => self.do_read_body().await,
};
result.map(|maybe_body| {
maybe_body.map(|body_ref| {
let slice = self.body_reader.get_body(&body_ref);
self.body_recv = self.body_recv.saturating_add(slice.len());
slice
})
})
}
/// Similar to [`Self::read_body_ref`] but return `Bytes` instead of a slice reference.
pub async fn read_body_bytes(&mut self) -> Result<Option<Bytes>> {
let read = self.read_body_ref().await?;
Ok(read.map(Bytes::copy_from_slice))
}
/// Upstream response body bytes received (payload only; excludes headers/framing).
pub fn body_bytes_received(&self) -> usize {
self.body_recv
}
/// Whether there is no more body to read.
pub fn is_body_done(&mut self) -> bool {
self.init_body_reader();
self.body_reader.body_done()
}
pub(super) fn get_headers_raw(&self) -> &[u8] {
// TODO: these get_*() could panic. handle them better
self.raw_header.as_ref().unwrap().get(&self.buf[..])
}
/// Get the raw response header bytes
pub fn get_headers_raw_bytes(&self) -> Bytes {
self.raw_header.as_ref().unwrap().get_bytes(&self.buf)
}
fn set_keepalive(&mut self, seconds: Option<u64>) {
match seconds {
Some(sec) => {
if sec > 0 {
self.keepalive_timeout = KeepaliveStatus::Timeout(Duration::from_secs(sec));
} else {
self.keepalive_timeout = KeepaliveStatus::Infinite;
}
}
None => {
self.keepalive_timeout = KeepaliveStatus::Off;
}
}
}
/// Apply keepalive settings according to the server's response
/// For HTTP 1.1, assume keepalive as long as there is no `Connection: Close` request header.
/// For HTTP 1.0, only keepalive if there is an explicit header `Connection: keep-alive`.
pub fn respect_keepalive(&mut self) {
if self.get_status() == Some(StatusCode::SWITCHING_PROTOCOLS) {
// make sure the connection is closed at the end when 101/upgrade is used
self.set_keepalive(None);
return;
}
if self.body_reader.has_bytes_overread() {
// if more bytes sent than expected, there are likely more bytes coming
// so don't reuse this connection
self.set_keepalive(None);
return;
}
if let Some(keepalive) = self.is_connection_keepalive() {
if keepalive {
let (timeout, _max_use) = self.get_keepalive_values();
// TODO: respect max_use
match timeout {
Some(d) => self.set_keepalive(Some(d)),
None => self.set_keepalive(Some(0)), // infinite
}
} else {
self.set_keepalive(None);
}
} else if self.resp_header().map(|h| h.version) == Some(Version::HTTP_11) {
self.set_keepalive(Some(0)); // on by default for http 1.1
} else {
self.set_keepalive(None); // off by default for http 1.0
}
}
// Whether this session will be kept alive
pub fn will_keepalive(&self) -> bool {
// TODO: check self.body_writer. If it is http1.0 type then keepalive
// cannot be used because the connection close is the signal of end body
!matches!(self.keepalive_timeout, KeepaliveStatus::Off)
}
fn is_connection_keepalive(&self) -> Option<bool> {
let request_keepalive = self
.request_written
.as_ref()
.and_then(|req| is_buf_keepalive(req.headers.get(header::CONNECTION)));
match request_keepalive {
// ignore what the server sends if request disables keepalive explicitly
Some(false) => Some(false),
_ => is_buf_keepalive(self.get_header(header::CONNECTION)),
}
}
/// `Keep-Alive: timeout=5, max=1000` => 5, 1000
/// This is defined in the below spec, this not part of any RFC, so
/// it's behavior is different on different platforms.
/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Keep-Alive
fn get_keepalive_values(&self) -> (Option<u64>, Option<usize>) {
let Some(keep_alive_header) = self.get_header("Keep-Alive") else {
return (None, None);
};
let Ok(header_value) = str::from_utf8(keep_alive_header.as_bytes()) else {
return (None, None);
};
let mut timeout = None;
let mut max = None;
for param in header_value.split(',') {
let parts = param.split_once('=').map(|(k, v)| (k.trim(), v));
match parts {
Some(("timeout", timeout_value)) => timeout = timeout_value.trim().parse().ok(),
Some(("max", max_value)) => max = max_value.trim().parse().ok(),
_ => {}
}
}
(timeout, max)
}
/// Close the connection abruptly. This allows to signal the server that the connection is closed
/// before dropping [`HttpSession`]
pub async fn shutdown(&mut self) {
let _ = self.underlying_stream.shutdown().await;
}
/// Consume `self`, if the connection can be reused, the underlying stream will be returned.
/// The returned connection can be kept in a connection pool so that next time the same
/// server is being contacted. A new client session can be created via [`Self::new()`].
/// If the connection cannot be reused, the underlying stream will be closed and `None` will be
/// returned.
pub async fn reuse(mut self) -> Option<Stream> {
// TODO: this function is unnecessarily slow for keepalive case
// because that case does not need async
match self.keepalive_timeout {
KeepaliveStatus::Off => {
debug!("HTTP shutdown connection");
self.shutdown().await;
None
}
_ => Some(self.underlying_stream),
}
}
fn init_body_reader(&mut self) {
if self.body_reader.need_init() {
/* follow https://tools.ietf.org/html/rfc7230#section-3.3.3 */
let preread_body = self.preread_body.as_ref().unwrap().get(&self.buf[..]);
if let Some(req) = self.request_written.as_ref() {
if req.method == http::method::Method::HEAD {
self.body_reader.init_content_length(0, preread_body);
return;
}
}
let upgraded = if let Some(code) = self.get_status() {
match code.as_u16() {
101 => self.is_upgrade_req(),
100..=199 => {
// informational headers, not enough to init body reader
return;
}
204 | 304 => {
// no body by definition
self.body_reader.init_content_length(0, preread_body);
return;
}
_ => false,
}
} else {
false
};
if upgraded {
self.body_reader.init_http10(preread_body);
} else if self.is_chunked_encoding() {
// if chunked encoding, content-length should be ignored
self.body_reader.init_chunked(preread_body);
} else if let Some(cl) = self.get_content_length() {
self.body_reader.init_content_length(cl, preread_body);
} else {
self.body_reader.init_http10(preread_body);
}
}
}
/// Whether this request is for upgrade
pub fn is_upgrade_req(&self) -> bool {
match self.request_written.as_deref() {
Some(req) => is_upgrade_req(req),
None => false,
}
}
/// `Some(true)` if the this is a successful upgrade
/// `Some(false)` if the request is an upgrade but the response refuses it
/// `None` if the request is not an upgrade.
fn is_upgrade(&self, header: &ResponseHeader) -> Option<bool> {
if self.is_upgrade_req() {
Some(is_upgrade_resp(header))
} else {
None
}
}
fn get_content_length(&self) -> Option<usize> {
buf_to_content_length(
self.get_header(header::CONTENT_LENGTH)
.map(|v| v.as_bytes()),
)
}
fn is_chunked_encoding(&self) -> bool {
is_header_value_chunked_encoding(self.get_header(header::TRANSFER_ENCODING))
}
fn init_req_body_writer(&mut self, header: &RequestHeader) {
if is_upgrade_req(header) {
self.body_writer.init_http10();
} else {
self.init_body_writer_comm(&header.headers)
}
}
fn init_body_writer_comm(&mut self, headers: &HMap) {
let te_value = headers.get(http::header::TRANSFER_ENCODING);
if is_header_value_chunked_encoding(te_value) {
// transfer-encoding takes priority over content-length
self.body_writer.init_chunked();
} else {
let content_length =
header_value_content_length(headers.get(http::header::CONTENT_LENGTH));
match content_length {
Some(length) => {
self.body_writer.init_content_length(length);
}
None => {
/* TODO: 1. connection: keepalive cannot be used,
2. mark connection must be closed */
self.body_writer.init_http10();
}
}
}
}
// should (continue to) try to read response header or start reading response body
fn should_read_resp_header(&self) -> bool {
match self.get_status().map(|s| s.as_u16()) {
Some(101) => false, // switching protocol successful, no more header to read
Some(100..=199) => true, // only informational header read
Some(_) => false,
None => true, // no response code, no header read yet
}
}
pub async fn read_response_task(&mut self) -> Result<HttpTask> {
if self.should_read_resp_header() {
let resp_header = self.read_resp_header_parts().await?;
let end_of_body = self.is_body_done();
debug!("Response header: {resp_header:?}");
trace!(
"Raw Response header: {:?}",
str::from_utf8(self.get_headers_raw()).unwrap()
);
Ok(HttpTask::Header(resp_header, end_of_body))
} else if self.is_body_done() {
// no body
debug!("Response is done");
Ok(HttpTask::Done)
} else {
/* need to read body */
let body = self.read_body_bytes().await?;
let end_of_body = self.is_body_done();
debug!(
"Response body: {} bytes, end: {end_of_body}",
body.as_ref().map_or(0, |b| b.len())
);
trace!("Response body: {body:?}");
Ok(HttpTask::Body(body, end_of_body))
}
// TODO: support h1 trailer
}
/// Return the [Digest] of the connection
///
/// For reused connection, the timing in the digest will reflect its initial handshakes
/// The caller should check if the connection is reused to avoid misuse the timing field.
pub fn digest(&self) -> &Digest {
&self.digest
}
/// Return a mutable [Digest] reference for the connection.
pub fn digest_mut(&mut self) -> &mut Digest {
&mut self.digest
}
/// Return the server (peer) address recorded in the connection digest.
pub fn server_addr(&self) -> Option<&SocketAddr> {
self.digest()
.socket_digest
.as_ref()
.map(|d| d.peer_addr())?
}
/// Return the client (local) address recorded in the connection digest.
pub fn client_addr(&self) -> Option<&SocketAddr> {
self.digest()
.socket_digest
.as_ref()
.map(|d| d.local_addr())?
}
/// Get the reference of the [Stream] that this HTTP session is operating upon.
pub fn stream(&self) -> &Stream {
&self.underlying_stream
}
/// Consume `self`, the underlying [Stream] will be returned and can be used
/// directly, for example, in the case of HTTP upgrade. It is not flushed
/// prior to being returned.
pub fn into_inner(self) -> Stream {
self.underlying_stream
}
}
#[inline]
fn parse_resp_buffer<'buf>(
resp: &mut httparse::Response<'_, 'buf>,
buf: &'buf [u8],
) -> HeaderParseState {
let mut parser = httparse::ParserConfig::default();
parser.allow_spaces_after_header_name_in_responses(true);
parser.allow_obsolete_multiline_headers_in_responses(true);
let res = match parser.parse_response(resp, buf) {
Ok(s) => s,
Err(e) => {
return HeaderParseState::Invalid(e);
}
};
match res {
httparse::Status::Complete(s) => HeaderParseState::Complete(s),
_ => HeaderParseState::Partial,
}
}
// TODO: change it to to_buf
#[inline]
pub fn http_req_header_to_wire(req: &RequestHeader) -> Option<BytesMut> {
let mut buf = BytesMut::with_capacity(512);
// Request-Line
let method = req.method.as_str().as_bytes();
buf.put_slice(method);
buf.put_u8(b' ');
buf.put_slice(req.raw_path());
buf.put_u8(b' ');
let version = match req.version {
Version::HTTP_09 => "HTTP/0.9",
Version::HTTP_10 => "HTTP/1.0",
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2",
_ => {
return None; /*TODO: unsupported version */
}
};
buf.put_slice(version.as_bytes());
buf.put_slice(CRLF);
// headers
req.header_to_h1_wire(&mut buf);
buf.put_slice(CRLF);
Some(buf)
}
impl UniqueID for HttpSession {
fn id(&self) -> UniqueIDType {
self.underlying_stream.id()
}
}
#[cfg(test)]
mod tests_stream {
use super::*;
use crate::protocols::http::v1::body::ParseState;
use crate::ErrorType;
use tokio_test::io::Builder;
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[tokio::test]
async fn read_basic_response() {
init_log();
let input = b"HTTP/1.1 200 OK\r\n\r\n";
let mock_io = Builder::new().read(&input[..]).build();
let mut http_stream = HttpSession::new(Box::new(mock_io));
let res = http_stream.read_response().await;
assert_eq!(input.len(), res.unwrap());
assert_eq!(0, http_stream.resp_header().unwrap().headers.len());
}
#[tokio::test]
async fn read_response_custom_reason() {
init_log();
let input = b"HTTP/1.1 200 Just Fine\r\n\r\n";
let mock_io = Builder::new().read(&input[..]).build();
let mut http_stream = HttpSession::new(Box::new(mock_io));
let res = http_stream.read_response().await;
assert_eq!(input.len(), res.unwrap());
assert_eq!(
http_stream.resp_header().unwrap().get_reason_phrase(),
Some("Just Fine")
);
}
#[tokio::test]
async fn read_response_default() {
init_log();
let input_header = b"HTTP/1.1 200 OK\r\n\r\n";
let input_body = b"abc";
let input_close = b""; // simulating close
let mock_io = Builder::new()
.read(&input_header[..])
.read(&input_body[..])
.read(&input_close[..])
.build();
let mut http_stream = HttpSession::new(Box::new(mock_io));
let res = http_stream.read_response().await;
assert_eq!(input_header.len(), res.unwrap());
let res = http_stream.read_body_ref().await.unwrap();
assert_eq!(res.unwrap(), input_body);
assert_eq!(http_stream.body_reader.body_state, ParseState::HTTP1_0(3));
let res = http_stream.read_body_ref().await.unwrap();
assert_eq!(res, None);
assert_eq!(http_stream.body_reader.body_state, ParseState::Complete(3));
}
#[tokio::test]
async fn body_bytes_received_content_length() {
init_log();
let input_header = b"HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\n";
let input_body = b"abc";
let input_close = b""; // simulating close
let mock_io = Builder::new()
.read(&input_header[..])
.read(&input_body[..])
.read(&input_close[..])
.build();
let mut http = HttpSession::new(Box::new(mock_io));
http.read_response().await.unwrap();
let _ = http.read_body_ref().await.unwrap();
let _ = http.read_body_ref().await.unwrap();
assert_eq!(http.body_bytes_received(), 3);
}
#[tokio::test]
async fn body_bytes_received_chunked() {
init_log();
let input_header = b"HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n";
let input_body = b"3\r\nabc\r\n0\r\n\r\n";
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v1/mod.rs | pingora-core/src/protocols/http/v1/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP/1.x implementation
pub(crate) mod body;
pub mod client;
pub mod common;
pub mod server;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v1/body.rs | pingora-core/src/protocols/http/v1/body.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::{Buf, BufMut, Bytes, BytesMut};
use log::{debug, trace, warn};
use pingora_error::{
Error,
ErrorType::{self, *},
OrErr, Result,
};
use std::fmt::Debug;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use crate::protocols::l4::stream::AsyncWriteVec;
use crate::utils::BufRef;
// TODO: make this dynamically adjusted
const BODY_BUFFER_SIZE: usize = 1024 * 64;
// limit how much incomplete chunk-size and chunk-ext to buffer
const PARTIAL_CHUNK_HEAD_LIMIT: usize = 1024 * 8;
// Trailers: https://datatracker.ietf.org/doc/html/rfc9112#section-7.1.2
// TODO: proper trailer handling and parsing
// generally trailers are an uncommonly used HTTP/1.1 feature, this is a somewhat
// arbitrary cap on trailer size after the 0 chunk size (like header buf)
const TRAILER_SIZE_LIMIT: usize = 1024 * 64;
const LAST_CHUNK: &[u8; 5] = &[b'0', CR, LF, CR, LF];
const CR: u8 = b'\r';
const LF: u8 = b'\n';
const CRLF: &[u8; 2] = &[CR, LF];
// This is really the CRLF end of the last trailer (or 0 chunk), + the last CRLF.
const TRAILERS_END: &[u8; 4] = &[CR, LF, CR, LF];
pub const INVALID_CHUNK: ErrorType = ErrorType::new("InvalidChunk");
pub const INVALID_TRAILER_END: ErrorType = ErrorType::new("InvalidTrailerEnd");
pub const PREMATURE_BODY_END: ErrorType = ErrorType::new("PrematureBodyEnd");
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ParseState {
ToStart,
// Complete: total size (contetn-length)
Complete(usize),
// Partial: size read, remaining size (content-length)
Partial(usize, usize),
// Chunked: Chunked encoding, prior to the final 0\r\n chunk.
// size read, next to read in current buf start, read in current buf start, remaining chunked size to read from IO
Chunked(usize, usize, usize, usize),
// ChunkedFinal: Final section once the 0\r\n chunk is read.
// size read, trailer sizes parsed so far, use existing buf end, trailers end read
ChunkedFinal(usize, usize, usize, u8),
// Done: done but there is error, size read
Done(usize),
// HTTP1_0: read until connection closed, size read
HTTP1_0(usize),
}
type PS = ParseState;
impl ParseState {
pub fn finish(&self, additional_bytes: usize) -> Self {
match self {
PS::Partial(read, to_read) => PS::Complete(read + to_read),
PS::Chunked(read, _, _, _) => PS::Complete(read + additional_bytes),
PS::ChunkedFinal(read, _, _, _) => PS::Complete(read + additional_bytes),
PS::HTTP1_0(read) => PS::Complete(read + additional_bytes),
_ => self.clone(), /* invalid transaction */
}
}
pub fn done(&self, additional_bytes: usize) -> Self {
match self {
PS::Partial(read, _) => PS::Done(read + additional_bytes),
PS::Chunked(read, _, _, _) => PS::Done(read + additional_bytes),
PS::ChunkedFinal(read, _, _, _) => PS::Done(read + additional_bytes),
PS::HTTP1_0(read) => PS::Done(read + additional_bytes),
_ => self.clone(), /* invalid transaction */
}
}
pub fn read_final_chunk(&self, remaining_buf_size: usize) -> Self {
match self {
PS::Chunked(read, _, _, _) => {
// The BodyReader is currently expected to copy the remaining buf
// into self.body_buf.
//
// the 2 == the CRLF from the last chunk-size, 0 + CRLF
// because ChunkedFinal is looking for CRLF + CRLF to end
// the whole message.
// This extra 2 bytes technically ends up cutting into the max trailers size,
// which we consider fine for now until full trailers support.
PS::ChunkedFinal(*read, 0, remaining_buf_size, 2)
}
PS::ChunkedFinal(..) => panic!("already read final chunk"),
_ => self.clone(), /* invalid transaction */
}
}
pub fn partial_chunk(&self, bytes_read: usize, bytes_to_read: usize) -> Self {
match self {
PS::Chunked(read, _, _, _) => PS::Chunked(read + bytes_read, 0, 0, bytes_to_read),
PS::ChunkedFinal(..) => panic!("chunked transactions not applicable after final chunk"),
_ => self.clone(), /* invalid transaction */
}
}
pub fn multi_chunk(&self, bytes_read: usize, buf_start_index: usize) -> Self {
match self {
PS::Chunked(read, _, buf_end, _) => {
PS::Chunked(read + bytes_read, buf_start_index, *buf_end, 0)
}
PS::ChunkedFinal(..) => panic!("chunked transactions not applicable after final chunk"),
_ => self.clone(), /* invalid transaction */
}
}
pub fn partial_chunk_head(&self, head_end: usize, head_size: usize) -> Self {
match self {
/* inform reader to read more to form a legal chunk */
PS::Chunked(read, _, _, _) => PS::Chunked(*read, 0, head_end, head_size),
PS::ChunkedFinal(..) => panic!("chunked transactions not applicable after final chunk"),
_ => self.clone(), /* invalid transaction */
}
}
pub fn new_buf(&self, buf_end: usize) -> Self {
match self {
PS::Chunked(read, _, _, _) => PS::Chunked(*read, 0, buf_end, 0),
PS::ChunkedFinal(..) => panic!("chunked transactions not applicable after final chunk"),
_ => self.clone(), /* invalid transaction */
}
}
}
pub struct BodyReader {
pub body_state: ParseState,
pub body_buf: Option<BytesMut>,
pub body_buf_size: usize,
rewind_buf_len: usize,
upstream: bool,
body_buf_overread: Option<BytesMut>,
}
impl BodyReader {
pub fn new(upstream: bool) -> Self {
BodyReader {
body_state: PS::ToStart,
body_buf: None,
body_buf_size: BODY_BUFFER_SIZE,
rewind_buf_len: 0,
upstream,
body_buf_overread: None,
}
}
pub fn need_init(&self) -> bool {
matches!(self.body_state, PS::ToStart)
}
pub fn reinit(&mut self) {
self.body_state = PS::ToStart;
}
fn prepare_buf(&mut self, buf_to_rewind: &[u8]) {
let mut body_buf = BytesMut::with_capacity(self.body_buf_size);
if !buf_to_rewind.is_empty() {
self.rewind_buf_len = buf_to_rewind.len();
// TODO: this is still 1 copy. Make it zero
body_buf.put_slice(buf_to_rewind);
}
if self.body_buf_size > buf_to_rewind.len() {
//body_buf.resize(self.body_buf_size, 0);
unsafe {
body_buf.set_len(self.body_buf_size);
}
}
self.body_buf = Some(body_buf);
}
pub fn init_chunked(&mut self, buf_to_rewind: &[u8]) {
self.body_state = PS::Chunked(0, 0, 0, 0);
self.prepare_buf(buf_to_rewind);
}
pub fn init_content_length(&mut self, cl: usize, buf_to_rewind: &[u8]) {
match cl {
0 => self.body_state = PS::Complete(0),
_ => {
self.prepare_buf(buf_to_rewind);
self.body_state = PS::Partial(0, cl);
}
}
}
pub fn init_http10(&mut self, buf_to_rewind: &[u8]) {
self.prepare_buf(buf_to_rewind);
self.body_state = PS::HTTP1_0(0);
}
pub fn get_body(&self, buf_ref: &BufRef) -> &[u8] {
// TODO: these get_*() could panic. handle them better
buf_ref.get(self.body_buf.as_ref().unwrap())
}
#[allow(dead_code)]
pub fn get_body_overread(&self) -> Option<&[u8]> {
self.body_buf_overread.as_deref()
}
pub fn has_bytes_overread(&self) -> bool {
self.get_body_overread().is_some_and(|b| !b.is_empty())
}
pub fn body_done(&self) -> bool {
matches!(self.body_state, PS::Complete(_) | PS::Done(_))
}
pub fn body_empty(&self) -> bool {
self.body_state == PS::Complete(0)
}
fn finish_body_buf(&mut self, end_of_body: usize, total_read: usize) {
let body_buf_mut = self.body_buf.as_mut().expect("must have read body buf");
// remove unused buffer
body_buf_mut.truncate(total_read);
let overread_bytes = body_buf_mut.split_off(end_of_body);
self.body_buf_overread = (!overread_bytes.is_empty()).then_some(overread_bytes);
}
pub async fn read_body<S>(&mut self, stream: &mut S) -> Result<Option<BufRef>>
where
S: AsyncRead + Unpin + Send,
{
match self.body_state {
PS::Complete(_) => Ok(None),
PS::Done(_) => Ok(None),
PS::Partial(_, _) => self.do_read_body(stream).await,
PS::Chunked(..) => self.do_read_chunked_body(stream).await,
PS::ChunkedFinal(..) => self.do_read_chunked_body_final(stream).await,
PS::HTTP1_0(_) => self.do_read_body_until_closed(stream).await,
PS::ToStart => panic!("need to init BodyReader first"),
}
}
pub async fn do_read_body<S>(&mut self, stream: &mut S) -> Result<Option<BufRef>>
where
S: AsyncRead + Unpin + Send,
{
let mut body_buf = self.body_buf.as_deref_mut().unwrap();
let mut n = self.rewind_buf_len;
self.rewind_buf_len = 0; // we only need to read rewind data once
if n == 0 {
// downstream should not discard remaining data if peer sent more.
if !self.upstream {
if let PS::Partial(_, to_read) = self.body_state {
if to_read < body_buf.len() {
body_buf = &mut body_buf[..to_read];
}
}
}
/* Need to actually read */
n = stream
.read(body_buf)
.await
.or_err(ReadError, "when reading body")?;
}
match self.body_state {
PS::Partial(read, to_read) => {
debug!(
"BodyReader body_state: {:?}, read data from IO: {n}",
self.body_state
);
if n == 0 {
self.body_state = PS::Done(read);
Error::e_explain(ConnectionClosed, format!(
"Peer prematurely closed connection with {} bytes of body remaining to read",
to_read
))
} else if n >= to_read {
if n > to_read {
warn!(
"Peer sent more data then expected: extra {}\
bytes, discarding them",
n - to_read
)
}
self.body_state = PS::Complete(read + to_read);
self.finish_body_buf(to_read, n);
Ok(Some(BufRef::new(0, to_read)))
} else {
self.body_state = PS::Partial(read + n, to_read - n);
Ok(Some(BufRef::new(0, n)))
}
}
_ => panic!("wrong body state: {:?}", self.body_state),
}
}
pub async fn do_read_body_until_closed<S>(&mut self, stream: &mut S) -> Result<Option<BufRef>>
where
S: AsyncRead + Unpin + Send,
{
let body_buf = self.body_buf.as_deref_mut().unwrap();
let mut n = self.rewind_buf_len;
self.rewind_buf_len = 0; // we only need to read rewind data once
if n == 0 {
/* Need to actually read */
n = stream
.read(body_buf)
.await
.or_err(ReadError, "when reading body")?;
}
match self.body_state {
PS::HTTP1_0(read) => {
if n == 0 {
self.body_state = PS::Complete(read);
Ok(None)
} else {
self.body_state = PS::HTTP1_0(read + n);
Ok(Some(BufRef::new(0, n)))
}
}
_ => panic!("wrong body state: {:?}", self.body_state),
}
}
pub async fn do_read_chunked_body<S>(&mut self, stream: &mut S) -> Result<Option<BufRef>>
where
S: AsyncRead + Unpin + Send,
{
match self.body_state {
PS::Chunked(
total_read,
existing_buf_start,
mut existing_buf_end,
mut expecting_from_io,
) => {
if existing_buf_start == 0 {
// read a new buf from IO
let body_buf = self.body_buf.as_deref_mut().unwrap();
if existing_buf_end == 0 {
existing_buf_end = self.rewind_buf_len;
self.rewind_buf_len = 0; // we only need to read rewind data once
if existing_buf_end == 0 {
existing_buf_end = stream
.read(body_buf)
.await
.or_err(ReadError, "when reading body")?;
}
} else {
/* existing_buf_end != 0 this is partial chunk head */
/* copy the #expecting_from_io bytes until index existing_buf_end
* to the front and read more to form a valid chunk head.
* existing_buf_end is the end of the partial head and
* expecting_from_io is the len of it */
body_buf
.copy_within(existing_buf_end - expecting_from_io..existing_buf_end, 0);
let new_bytes = stream
.read(&mut body_buf[expecting_from_io..])
.await
.or_err(ReadError, "when reading body")?;
if new_bytes == 0 {
self.body_state = self.body_state.done(0);
return Error::e_explain(
ConnectionClosed,
format!(
"Connection prematurely closed without the termination chunk \
(partial chunk head), read {total_read} bytes"
),
);
}
/* more data is read, extend the buffer */
existing_buf_end = expecting_from_io + new_bytes;
expecting_from_io = 0;
}
self.body_state = self.body_state.new_buf(existing_buf_end);
}
if existing_buf_end == 0 {
self.body_state = self.body_state.done(0);
Error::e_explain(
ConnectionClosed,
format!(
"Connection prematurely closed without the termination chunk, \
read {total_read} bytes"
),
)
} else {
if expecting_from_io > 0 {
let body_buf = self.body_buf.as_ref().unwrap();
trace!(
"partial chunk payload, expecting_from_io: {}, \
existing_buf_end {}, buf: {:?}",
expecting_from_io,
existing_buf_end,
self.body_buf.as_ref().unwrap()[..existing_buf_end].escape_ascii()
);
// partial chunk payload, will read more
if expecting_from_io >= existing_buf_end + 2 {
// not enough (doesn't contain CRLF end)
self.body_state = self.body_state.partial_chunk(
existing_buf_end,
expecting_from_io - existing_buf_end,
);
return Ok(Some(BufRef::new(0, existing_buf_end)));
}
/* could be expecting DATA + CRLF or just CRLF */
let payload_size = expecting_from_io.saturating_sub(2);
/* expecting_from_io < existing_buf_end + 2 */
let need_lf_only = expecting_from_io == 1; // otherwise we need the whole CRLF
if expecting_from_io > existing_buf_end {
// potentially:
// | CR | LF |
// | |
// (existing_buf_end)
// |
// (expecting_from_io)
if payload_size < existing_buf_end {
Self::validate_crlf(
&mut self.body_state,
&body_buf[payload_size..existing_buf_end],
need_lf_only,
false,
)?;
}
} else {
// expecting_from_io <= existing_buf_end
// chunk CRLF end should end here
assert!(Self::validate_crlf(
&mut self.body_state,
&body_buf[payload_size..expecting_from_io],
need_lf_only,
false,
)?);
}
if expecting_from_io >= existing_buf_end {
self.body_state = self
.body_state
.partial_chunk(payload_size, expecting_from_io - existing_buf_end);
return Ok(Some(BufRef::new(0, payload_size)));
}
/* expecting_from_io < existing_buf_end */
self.body_state =
self.body_state.multi_chunk(payload_size, expecting_from_io);
return Ok(Some(BufRef::new(0, payload_size)));
}
let (buf_res, last_chunk_size_end) =
self.parse_chunked_buf(existing_buf_start, existing_buf_end)?;
if buf_res.is_some() {
if let Some(idx) = last_chunk_size_end {
// just read the last 0 + CRLF, but not final end CRLF
// copy the rest of the buffer to the start of the body_buf
// so we can parse the remaining bytes as trailers / end
let body_buf = self.body_buf.as_deref_mut().unwrap();
trace!(
"last chunk size end buf {:?}",
&body_buf[..existing_buf_end].escape_ascii(),
);
body_buf.copy_within(idx..existing_buf_end, 0);
}
}
Ok(buf_res)
}
}
_ => panic!("wrong body state: {:?}", self.body_state),
}
}
// Returns: BufRef of next body chunk,
// terminating chunk-size index end if read completely (0 + CRLF).
// Note input indices are absolute (to body_buf).
fn parse_chunked_buf(
&mut self,
buf_index_start: usize,
buf_index_end: usize,
) -> Result<(Option<BufRef>, Option<usize>)> {
let buf = &self.body_buf.as_ref().unwrap()[buf_index_start..buf_index_end];
let chunk_status = httparse::parse_chunk_size(buf);
match chunk_status {
Ok(status) => {
match status {
httparse::Status::Complete((payload_index, chunk_size)) => {
// TODO: Check chunk_size overflow
trace!(
"Got size {chunk_size}, payload_index: {payload_index}, chunk: {:?}",
String::from_utf8_lossy(buf).escape_default(),
);
let chunk_size = chunk_size as usize;
// https://github.com/seanmonstar/httparse/issues/149
// httparse does not treat zero-size chunk differently, it does not check
// that terminating chunk is 0 + double CRLF
if chunk_size == 0 {
/* terminating chunk, also need to handle trailer. */
let chunk_end_index = payload_index + 2;
return if chunk_end_index <= buf.len()
&& buf[payload_index..chunk_end_index] == CRLF[..]
{
// full terminating CRLF MAY exist in current buf
// Skip ChunkedFinal state and go directly to Complete
// as optimization.
self.body_state = self.body_state.finish(0);
self.finish_body_buf(
buf_index_start + chunk_end_index,
buf_index_end,
);
Ok((None, Some(buf_index_start + payload_index)))
} else {
// Indicate start of parsing final chunked trailers,
// with remaining buf to read
self.body_state = self.body_state.read_final_chunk(
buf_index_end - (buf_index_start + payload_index),
);
Ok((
Some(BufRef::new(0, 0)),
Some(buf_index_start + payload_index),
))
};
}
// chunk-size CRLF [payload_index] byte*[chunk_size] CRLF
let data_end_index = payload_index + chunk_size;
let chunk_end_index = data_end_index + 2;
if chunk_end_index >= buf.len() {
// no multi chunk in this buf
let actual_size = if data_end_index > buf.len() {
buf.len() - payload_index
} else {
chunk_size
};
let crlf_start = chunk_end_index.saturating_sub(2);
if crlf_start < buf.len() {
Self::validate_crlf(
&mut self.body_state,
&buf[crlf_start..],
false,
false,
)?;
}
// else need to read more to get to CRLF
self.body_state = self
.body_state
.partial_chunk(actual_size, chunk_end_index - buf.len());
return Ok((
Some(BufRef::new(buf_index_start + payload_index, actual_size)),
None,
));
}
/* got multiple chunks, return the first */
assert!(Self::validate_crlf(
&mut self.body_state,
&buf[data_end_index..chunk_end_index],
false,
false,
)?);
self.body_state = self
.body_state
.multi_chunk(chunk_size, buf_index_start + chunk_end_index);
Ok((
Some(BufRef::new(buf_index_start + payload_index, chunk_size)),
None,
))
}
httparse::Status::Partial => {
if buf.len() > PARTIAL_CHUNK_HEAD_LIMIT {
// https://datatracker.ietf.org/doc/html/rfc9112#name-chunk-extensions
// "A server ought to limit the total length of chunk extensions received"
// The buf.len() here is the total length of chunk-size + chunk-ext seen
// so far. This check applies to both server and client
self.body_state = self.body_state.done(0);
Error::e_explain(INVALID_CHUNK, "Chunk ext over limit")
} else {
self.body_state =
self.body_state.partial_chunk_head(buf_index_end, buf.len());
Ok((Some(BufRef::new(0, 0)), None))
}
}
}
}
Err(e) => {
let context = format!("Invalid chunked encoding: {e:?}");
debug!(
"{context}, {:?}",
String::from_utf8_lossy(buf).escape_default()
);
self.body_state = self.body_state.done(0);
Error::e_explain(INVALID_CHUNK, context)
}
}
}
pub async fn do_read_chunked_body_final<S>(&mut self, stream: &mut S) -> Result<Option<BufRef>>
where
S: AsyncRead + Unpin + Send,
{
// parse section after last-chunk: https://datatracker.ietf.org/doc/html/rfc9112#section-7.1
// This is the section after the final chunk we're trying to read, which can include
// HTTP1 trailers (currently we just discard them).
// Really we are just waiting for a consecutive CRLF + CRLF to end the body.
match self.body_state {
PS::ChunkedFinal(read, trailers_read, existing_buf_end, end_read) => {
let body_buf = self.body_buf.as_deref_mut().unwrap();
let (buf, n) = if existing_buf_end != 0 {
// finish rest of buf that was read with Chunked state
// existing_buf_end is non-zero only once
self.body_state = PS::ChunkedFinal(read, trailers_read, 0, end_read);
(&body_buf[..existing_buf_end], existing_buf_end)
} else {
let n = stream
.read(body_buf)
.await
.or_err(ReadError, "when reading trailers end")?;
(&body_buf[..n], n)
};
if n == 0 {
self.body_state = PS::Done(read);
return Error::e_explain(
ConnectionClosed,
format!(
"Connection prematurely closed without the termination chunk, \
read {read} bytes, {trailers_read} trailer bytes"
),
);
}
let mut start = 0;
// try to find end within the current IO buffer
while start < n {
// Adjusts body state through each iteration to add trailers read
// Each iteration finds the next CR or LF to advance the buf
let (trailers_read, end_read) = match self.body_state {
PS::ChunkedFinal(_, new_trailers_read, _, new_end_read) => {
(new_trailers_read, new_end_read)
}
_ => unreachable!(),
};
let mut buf = &buf[start..n];
trace!(
"Parsing chunk end for buf {:?}",
String::from_utf8_lossy(buf).escape_default(),
);
if end_read == 0 {
// find the next CRLF sequence / potential end
let (trailers_read, no_crlf) =
if let Some(p) = buf.iter().position(|b| *b == CR || *b == LF) {
buf = &buf[p..];
start += p;
(trailers_read + p, false)
} else {
// consider this all trailer bytes
(trailers_read + (n - start), true)
};
if trailers_read > TRAILER_SIZE_LIMIT {
self.body_state = self.body_state.done(0);
return Error::e_explain(
INVALID_TRAILER_END,
"Trailer size over limit",
);
}
self.body_state = PS::ChunkedFinal(read, trailers_read, 0, 0);
if no_crlf {
// break and allow polling read body again
break;
}
}
match Self::parse_trailers_end(&mut self.body_state, buf)? {
TrailersEndParseState::NotEnd(next_parse_index) => {
trace!(
"Parsing chunk end for buf {:?}, resume at {next_parse_index}",
String::from_utf8_lossy(buf).escape_default(),
);
start += next_parse_index;
}
TrailersEndParseState::Complete(end_idx) => {
trace!(
"Parsing chunk end for buf {:?}, finished at {end_idx}",
String::from_utf8_lossy(buf).escape_default(),
);
self.finish_body_buf(start + end_idx, n);
return Ok(None);
}
}
}
}
_ => panic!("wrong body state: {:?}", self.body_state),
}
// indicate final section is not done
Ok(Some(BufRef(0, 0)))
}
// Parses up to one CRLF at a time to determine if, given the body state,
// we've parsed a full trailer end.
// Panics if empty buffer is given.
fn parse_trailers_end(
body_state: &mut ParseState,
buf: &[u8],
) -> Result<TrailersEndParseState> {
assert!(!buf.is_empty(), "parse_trailers_end given empty buffer");
match body_state.clone() {
PS::ChunkedFinal(read, trailers_read, _, end_read) => {
// Look at the body buf we just read and see if it matches
// the ending CRLF + CRLF sequence.
let end_read = end_read as usize;
assert!(end_read < TRAILERS_END.len());
let to_read = std::cmp::min(buf.len(), TRAILERS_END.len() - end_read);
let buf = &buf[..to_read];
// If the start of the buf is not CRLF and we are not in the middle of reading a
// valid CRLF sequence, return to let caller seek for next CRLF
if end_read % 2 == 0 && buf[0] != CR && buf[0] != LF {
trace!(
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v1/server.rs | pingora-core/src/protocols/http/v1/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP/1.x server session
use bstr::ByteSlice;
use bytes::Bytes;
use bytes::{BufMut, BytesMut};
use http::header::{CONTENT_LENGTH, TRANSFER_ENCODING};
use http::HeaderValue;
use http::{header, header::AsHeaderName, Method, Version};
use log::{debug, warn};
use once_cell::sync::Lazy;
use percent_encoding::{percent_encode, AsciiSet, CONTROLS};
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use pingora_http::{IntoCaseHeaderName, RequestHeader, ResponseHeader};
use pingora_timeout::timeout;
use regex::bytes::Regex;
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use super::body::{BodyReader, BodyWriter};
use super::common::*;
use crate::protocols::http::{body_buffer::FixedBuffer, date, HttpTask};
use crate::protocols::{Digest, SocketAddr, Stream};
use crate::utils::{BufRef, KVRef};
/// The HTTP 1.x server session
pub struct HttpSession {
underlying_stream: Stream,
/// The buf that holds the raw request header + possibly a portion of request body
/// Request body can appear here because they could arrive with the same read() that
/// sends the request header.
buf: Bytes,
/// A slice reference to `buf` which points to the exact range of request header
raw_header: Option<BufRef>,
/// A slice reference to `buf` which points to the range of a portion of request body if any
preread_body: Option<BufRef>,
/// A state machine to track how to read the request body
body_reader: BodyReader,
/// A state machine to track how to write the response body
body_writer: BodyWriter,
/// An internal buffer to buf multiple body writes to reduce the underlying syscalls
body_write_buf: BytesMut,
/// Track how many application (not on the wire) body bytes already sent
body_bytes_sent: usize,
/// Track how many application (not on the wire) body bytes already read
body_bytes_read: usize,
/// Whether to update headers like connection, Date
update_resp_headers: bool,
/// timeouts:
keepalive_timeout: KeepaliveStatus,
read_timeout: Option<Duration>,
write_timeout: Option<Duration>,
/// How long to wait to make downstream session reusable, if body needs to be drained.
total_drain_timeout: Option<Duration>,
/// A copy of the response that is already written to the client
response_written: Option<Box<ResponseHeader>>,
/// The parsed request header
request_header: Option<Box<RequestHeader>>,
/// An internal buffer that holds a copy of the request body up to a certain size
retry_buffer: Option<FixedBuffer>,
/// Whether this session is an upgraded session. This flag is calculated when sending the
/// response header to the client.
upgraded: bool,
/// Digest to track underlying connection metrics
digest: Box<Digest>,
/// Minimum send rate to the client
min_send_rate: Option<usize>,
/// When this is enabled informational response headers will not be proxied downstream
ignore_info_resp: bool,
/// Disable keepalive if response is sent before downstream body is finished
close_on_response_before_downstream_finish: bool,
}
impl HttpSession {
/// Create a new http server session from an established (TCP or TLS) [`Stream`].
/// The created session needs to call [`Self::read_request()`] first before performing
/// any other operations.
pub fn new(underlying_stream: Stream) -> Self {
// TODO: maybe we should put digest in the connection itself
let digest = Box::new(Digest {
ssl_digest: underlying_stream.get_ssl_digest(),
timing_digest: underlying_stream.get_timing_digest(),
proxy_digest: underlying_stream.get_proxy_digest(),
socket_digest: underlying_stream.get_socket_digest(),
});
HttpSession {
underlying_stream,
buf: Bytes::new(), // zero size, with be replaced by parsed header later
raw_header: None,
preread_body: None,
body_reader: BodyReader::new(false),
body_writer: BodyWriter::new(),
body_write_buf: BytesMut::new(),
keepalive_timeout: KeepaliveStatus::Off,
update_resp_headers: true,
response_written: None,
request_header: None,
read_timeout: Some(Duration::from_secs(60)),
write_timeout: None,
total_drain_timeout: None,
body_bytes_sent: 0,
body_bytes_read: 0,
retry_buffer: None,
upgraded: false,
digest,
min_send_rate: None,
ignore_info_resp: false,
close_on_response_before_downstream_finish: false,
}
}
/// Read the request header. Return `Ok(Some(n))` where the read and parsing are successful.
/// Return `Ok(None)` when the client closed the connection without sending any data, which
/// is common on a reused connection.
pub async fn read_request(&mut self) -> Result<Option<usize>> {
const MAX_ERR_BUF_LEN: usize = 2048;
self.buf.clear();
let mut buf = BytesMut::with_capacity(INIT_HEADER_BUF_SIZE);
let mut already_read: usize = 0;
loop {
if already_read > MAX_HEADER_SIZE {
/* NOTE: this check only blocks second read. The first large read is allowed
since the buf is already allocated. The goal is to avoid slowly bloating
this buffer */
return Error::e_explain(
InvalidHTTPHeader,
format!("Request header larger than {MAX_HEADER_SIZE}"),
);
}
let read_result = {
let read_event = self.underlying_stream.read_buf(&mut buf);
match self.keepalive_timeout {
KeepaliveStatus::Timeout(d) => match timeout(d, read_event).await {
Ok(res) => res,
Err(e) => {
debug!("keepalive timeout {d:?} reached, {e}");
return Ok(None);
}
},
KeepaliveStatus::Infinite => {
// FIXME: this should only apply to reads between requests
read_event.await
}
KeepaliveStatus::Off => match self.read_timeout {
Some(t) => match timeout(t, read_event).await {
Ok(res) => res,
Err(e) => {
debug!("read timeout {t:?} reached, {e}");
return Error::e_explain(ReadTimedout, format!("timeout: {t:?}"));
}
},
None => read_event.await,
},
}
};
let n = match read_result {
Ok(n_read) => {
if n_read == 0 {
if already_read > 0 {
return Error::e_explain(
ConnectionClosed,
format!(
"while reading request headers, bytes already read: {}",
already_read
),
);
} else {
/* common when client decides to close a keepalived session */
debug!("Client prematurely closed connection with 0 byte sent");
return Ok(None);
}
}
n_read
}
Err(e) => {
if already_read > 0 {
return Error::e_because(ReadError, "while reading request headers", e);
}
/* nothing harmful since we have not ready any thing yet */
return Ok(None);
}
};
already_read += n;
// Use loop as GOTO to retry escaped request buffer, not a real loop
loop {
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
let mut req = httparse::Request::new(&mut headers);
let parsed = parse_req_buffer(&mut req, &buf);
match parsed {
HeaderParseState::Complete(s) => {
self.raw_header = Some(BufRef(0, s));
self.preread_body = Some(BufRef(s, already_read));
// We have the header name and values we parsed to be just 0 copy Bytes
// referencing the original buf. That requires we convert the buf from
// BytesMut to Bytes. But `req` holds a reference to `buf`. So we use the
// `KVRef`s to record the offset of each piece of data, drop `req`, convert
// buf, the do the 0 copy update
let base = buf.as_ptr() as usize;
let mut header_refs = Vec::<KVRef>::with_capacity(req.headers.len());
// Note: req.headers has the correct number of headers
// while header_refs doesn't as it is still empty
let _num_headers = populate_headers(base, &mut header_refs, req.headers);
let mut request_header = Box::new(RequestHeader::build(
req.method.unwrap_or(""),
// we path httparse to allow unsafe bytes in the str
req.path.unwrap_or("").as_bytes(),
Some(req.headers.len()),
)?);
request_header.set_version(match req.version {
Some(1) => Version::HTTP_11,
Some(0) => Version::HTTP_10,
_ => Version::HTTP_09,
});
let buf = buf.freeze();
for header in header_refs {
let header_name = header.get_name_bytes(&buf);
let header_name = header_name.into_case_header_name();
let value_bytes = header.get_value_bytes(&buf);
// safe because this is from what we parsed
let header_value = unsafe {
http::HeaderValue::from_maybe_shared_unchecked(value_bytes)
};
request_header
.append_header(header_name, header_value)
.or_err(InvalidHTTPHeader, "while parsing request header")?;
}
let contains_transfer_encoding =
request_header.headers.contains_key(TRANSFER_ENCODING);
let contains_content_length =
request_header.headers.contains_key(CONTENT_LENGTH);
// Transfer encoding overrides content length, so when
// both are present, we can remove content length. This
// is per https://datatracker.ietf.org/doc/html/rfc9112#section-6.3
if contains_content_length && contains_transfer_encoding {
request_header.remove_header(&CONTENT_LENGTH);
}
self.buf = buf;
self.request_header = Some(request_header);
self.body_reader.reinit();
self.response_written = None;
self.respect_keepalive();
self.validate_request()?;
return Ok(Some(s));
}
HeaderParseState::Partial => {
break; /* continue the read loop */
}
HeaderParseState::Invalid(e) => match e {
httparse::Error::Token | httparse::Error::Version => {
// try to escape URI
if let Some(new_buf) = escape_illegal_request_line(&buf) {
buf = new_buf;
already_read = buf.len();
} else {
debug!("Invalid request header from {:?}", self.underlying_stream);
buf.truncate(MAX_ERR_BUF_LEN);
return Error::e_because(
InvalidHTTPHeader,
format!("buf: {}", buf.escape_ascii()),
e,
);
}
}
_ => {
debug!("Invalid request header from {:?}", self.underlying_stream);
buf.truncate(MAX_ERR_BUF_LEN);
return Error::e_because(
InvalidHTTPHeader,
format!("buf: {:?}", buf.as_bstr()),
e,
);
}
},
}
}
}
}
/// Validate the request header read. This function must be called after the request header
/// read.
/// # Panics
/// this function and most other functions will panic if called before [`Self::read_request()`]
pub fn validate_request(&self) -> Result<()> {
let req_header = self.req_header();
// ad-hoc checks
super::common::check_dup_content_length(&req_header.headers)?;
Ok(())
}
/// Return a reference of the `RequestHeader` this session read
/// # Panics
/// this function and most other functions will panic if called before [`Self::read_request()`]
pub fn req_header(&self) -> &RequestHeader {
self.request_header
.as_ref()
.expect("Request header is not read yet")
}
/// Return a mutable reference of the `RequestHeader` this session read
/// # Panics
/// this function and most other functions will panic if called before [`Self::read_request()`]
pub fn req_header_mut(&mut self) -> &mut RequestHeader {
self.request_header
.as_mut()
.expect("Request header is not read yet")
}
/// Get the header value for the given header name
/// If there are multiple headers under the same name, the first one will be returned
/// Use `self.req_header().header.get_all(name)` to get all the headers under the same name
pub fn get_header(&self, name: impl AsHeaderName) -> Option<&HeaderValue> {
self.request_header
.as_ref()
.and_then(|h| h.headers.get(name))
}
/// Return the method of this request. None if the request is not read yet.
pub(crate) fn get_method(&self) -> Option<&http::Method> {
self.request_header.as_ref().map(|r| &r.method)
}
/// Return the path of the request (i.e., the `/hello?1` of `GET /hello?1 HTTP1.1`)
/// An empty slice will be used if there is no path or the request is not read yet
pub(crate) fn get_path(&self) -> &[u8] {
self.request_header.as_ref().map_or(b"", |r| r.raw_path())
}
/// Return the host header of the request. An empty slice will be used if there is no host header
pub(crate) fn get_host(&self) -> &[u8] {
self.request_header
.as_ref()
.and_then(|h| h.headers.get(header::HOST))
.map_or(b"", |h| h.as_bytes())
}
/// Return a string `$METHOD $PATH, Host: $HOST`. Mostly for logging and debug purpose
pub fn request_summary(&self) -> String {
format!(
"{} {}, Host: {}",
self.get_method().map_or("-", |r| r.as_str()),
String::from_utf8_lossy(self.get_path()),
String::from_utf8_lossy(self.get_host())
)
}
/// Is the request a upgrade request
pub fn is_upgrade_req(&self) -> bool {
match self.request_header.as_deref() {
Some(req) => is_upgrade_req(req),
None => false,
}
}
/// Get the request header as raw bytes, `b""` when the header doesn't exist
pub fn get_header_bytes(&self, name: impl AsHeaderName) -> &[u8] {
self.get_header(name).map_or(b"", |v| v.as_bytes())
}
/// Read the request body. `Ok(None)` when there is no (more) body to read.
pub async fn read_body_bytes(&mut self) -> Result<Option<Bytes>> {
let read = self.read_body().await?;
Ok(read.map(|b| {
let bytes = Bytes::copy_from_slice(self.get_body(&b));
self.body_bytes_read += bytes.len();
if let Some(buffer) = self.retry_buffer.as_mut() {
buffer.write_to_buffer(&bytes);
}
bytes
}))
}
async fn do_read_body(&mut self) -> Result<Option<BufRef>> {
self.init_body_reader();
self.body_reader
.read_body(&mut self.underlying_stream)
.await
}
/// Read the body into the internal buffer
async fn read_body(&mut self) -> Result<Option<BufRef>> {
match self.read_timeout {
Some(t) => match timeout(t, self.do_read_body()).await {
Ok(res) => res,
Err(_) => Error::e_explain(ReadTimedout, format!("reading body, timeout: {t:?}")),
},
None => self.do_read_body().await,
}
}
async fn do_drain_request_body(&mut self) -> Result<()> {
loop {
match self.read_body_bytes().await {
Ok(Some(_)) => { /* continue to drain */ }
Ok(None) => return Ok(()), // done
Err(e) => return Err(e),
}
}
}
/// Drain the request body. `Ok(())` when there is no (more) body to read.
pub async fn drain_request_body(&mut self) -> Result<()> {
if self.is_body_done() {
return Ok(());
}
match self.total_drain_timeout {
Some(t) => match timeout(t, self.do_drain_request_body()).await {
Ok(res) => res,
Err(_) => Error::e_explain(ReadTimedout, format!("draining body, timeout: {t:?}")),
},
None => self.do_drain_request_body().await,
}
}
/// Whether there is no (more) body to be read.
pub fn is_body_done(&mut self) -> bool {
self.init_body_reader();
self.body_reader.body_done()
}
/// Whether the request has an empty body
/// Because HTTP 1.1 clients have to send either `Content-Length` or `Transfer-Encoding` in order
/// to signal the server that it will send the body, this function returns accurate results even
/// only when the request header is just read.
pub fn is_body_empty(&mut self) -> bool {
self.init_body_reader();
self.body_reader.body_empty()
}
/// Write the response header to the client.
/// This function can be called more than once to send 1xx informational headers excluding 101.
pub async fn write_response_header(&mut self, mut header: Box<ResponseHeader>) -> Result<()> {
if header.status.is_informational() && self.ignore_info_resp(header.status.into()) {
debug!("ignoring informational headers");
return Ok(());
}
if let Some(resp) = self.response_written.as_ref() {
if !resp.status.is_informational() || self.upgraded {
warn!("Respond header is already sent, cannot send again");
return Ok(());
}
}
if self.close_on_response_before_downstream_finish && !self.is_body_done() {
debug!("set connection close before downstream finish");
self.set_keepalive(None);
}
// no need to add these headers to 1xx responses
if !header.status.is_informational() && self.update_resp_headers {
/* update headers */
header.insert_header(header::DATE, date::get_cached_date())?;
// TODO: make these lazy static
let connection_value = if self.will_keepalive() {
"keep-alive"
} else {
"close"
};
header.insert_header(header::CONNECTION, connection_value)?;
}
if header.status == 101 {
// make sure the connection is closed at the end when 101/upgrade is used
self.set_keepalive(None);
}
// Allow informational header (excluding 101) to pass through without affecting the state
// of the request
if header.status == 101 || !header.status.is_informational() {
// reset request body to done for incomplete upgrade handshakes
if let Some(upgrade_ok) = self.is_upgrade(&header) {
if upgrade_ok {
debug!("ok upgrade handshake");
// For ws we use HTTP1_0 do_read_body_until_closed
//
// On ws close the initiator sends a close frame and
// then waits for a response from the peer, once it receives
// a response it closes the conn. After receiving a
// control frame indicating the connection should be closed,
// a peer discards any further data received.
// https://www.rfc-editor.org/rfc/rfc6455#section-1.4
self.upgraded = true;
} else {
debug!("bad upgrade handshake!");
// reset request body buf and mark as done
// safe to reset an upgrade because it doesn't have body
self.body_reader.init_content_length(0, b"");
}
}
self.init_body_writer(&header);
}
// Don't have to flush response with content length because it is less
// likely to be real time communication. So do flush when
// 1.1xx response: client needs to see it before the rest of response
// 2.No content length: the response could be generated in real time
let flush = header.status.is_informational()
|| header.headers.get(header::CONTENT_LENGTH).is_none();
let mut write_buf = BytesMut::with_capacity(INIT_HEADER_BUF_SIZE);
http_resp_header_to_buf(&header, &mut write_buf).unwrap();
match self.underlying_stream.write_all(&write_buf).await {
Ok(()) => {
// flush the stream if 1xx header or there is no response body
if flush || self.body_writer.finished() {
self.underlying_stream
.flush()
.await
.or_err(WriteError, "flushing response header")?;
}
self.response_written = Some(header);
self.body_bytes_sent += write_buf.len();
Ok(())
}
Err(e) => Error::e_because(WriteError, "writing response header", e),
}
}
/// Return the response header if it is already sent.
pub fn response_written(&self) -> Option<&ResponseHeader> {
self.response_written.as_deref()
}
/// `Some(true)` if the this is a successful upgrade
/// `Some(false)` if the request is an upgrade but the response refuses it
/// `None` if the request is not an upgrade.
pub fn is_upgrade(&self, header: &ResponseHeader) -> Option<bool> {
if self.is_upgrade_req() {
Some(is_upgrade_resp(header))
} else {
None
}
}
fn set_keepalive(&mut self, seconds: Option<u64>) {
match seconds {
Some(sec) => {
if sec > 0 {
self.keepalive_timeout = KeepaliveStatus::Timeout(Duration::from_secs(sec));
} else {
self.keepalive_timeout = KeepaliveStatus::Infinite;
}
}
None => {
self.keepalive_timeout = KeepaliveStatus::Off;
}
}
}
pub fn get_keepalive_timeout(&self) -> Option<u64> {
match self.keepalive_timeout {
KeepaliveStatus::Timeout(d) => Some(d.as_secs()),
KeepaliveStatus::Infinite => Some(0),
KeepaliveStatus::Off => None,
}
}
/// Return whether the session will be keepalived for connection reuse.
pub fn will_keepalive(&self) -> bool {
// TODO: check self.body_writer. If it is http1.0 type then keepalive
// cannot be used because the connection close is the signal of end body
!matches!(self.keepalive_timeout, KeepaliveStatus::Off)
}
// `Keep-Alive: timeout=5, max=1000` => 5, 1000
fn get_keepalive_values(&self) -> (Option<u64>, Option<usize>) {
// TODO: implement this parsing
(None, None)
}
fn ignore_info_resp(&self, status: u16) -> bool {
// ignore informational response if ignore flag is set and it's not an Upgrade and Expect: 100-continue isn't set
self.ignore_info_resp && status != 101 && !(status == 100 && self.is_expect_continue_req())
}
fn is_expect_continue_req(&self) -> bool {
match self.request_header.as_deref() {
Some(req) => is_expect_continue_req(req),
None => false,
}
}
fn is_connection_keepalive(&self) -> Option<bool> {
is_buf_keepalive(self.get_header(header::CONNECTION))
}
// calculate write timeout from min_send_rate if set, otherwise return write_timeout
fn write_timeout(&self, buf_len: usize) -> Option<Duration> {
let Some(min_send_rate) = self.min_send_rate.filter(|r| *r > 0) else {
return self.write_timeout;
};
// min timeout is 1s
let ms = (buf_len.max(min_send_rate) as f64 / min_send_rate as f64) * 1000.0;
// truncates unrealistically large values (we'll be out of memory before this happens)
Some(Duration::from_millis(ms as u64))
}
/// Apply keepalive settings according to the client
/// For HTTP 1.1, assume keepalive as long as there is no `Connection: Close` request header.
/// For HTTP 1.0, only keepalive if there is an explicit header `Connection: keep-alive`.
pub fn respect_keepalive(&mut self) {
if let Some(keepalive) = self.is_connection_keepalive() {
if keepalive {
let (timeout, _max_use) = self.get_keepalive_values();
// TODO: respect max_use
match timeout {
Some(d) => self.set_keepalive(Some(d)),
None => self.set_keepalive(Some(0)), // infinite
}
} else {
self.set_keepalive(None);
}
} else if self.req_header().version == Version::HTTP_11 {
self.set_keepalive(Some(0)); // on by default for http 1.1
} else {
self.set_keepalive(None); // off by default for http 1.0
}
}
fn init_body_writer(&mut self, header: &ResponseHeader) {
use http::StatusCode;
/* the following responses don't have body 204, 304, and HEAD */
if matches!(
header.status,
StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED
) || self.get_method() == Some(&Method::HEAD)
{
self.body_writer.init_content_length(0);
return;
}
if header.status.is_informational() && header.status != StatusCode::SWITCHING_PROTOCOLS {
// 1xx response, not enough to init body
return;
}
if self.is_upgrade(header) == Some(true) {
self.body_writer.init_http10();
} else {
init_body_writer_comm(&mut self.body_writer, &header.headers);
}
}
/// Same as [`Self::write_response_header()`] but takes a reference.
pub async fn write_response_header_ref(&mut self, resp: &ResponseHeader) -> Result<()> {
self.write_response_header(Box::new(resp.clone())).await
}
async fn do_write_body(&mut self, buf: &[u8]) -> Result<Option<usize>> {
let written = self
.body_writer
.write_body(&mut self.underlying_stream, buf)
.await;
if let Ok(Some(num_bytes)) = written {
self.body_bytes_sent += num_bytes;
}
written
}
/// Write response body to the client. Return `Ok(None)` when there shouldn't be more body
/// to be written, e.g., writing more bytes than what the `Content-Length` header suggests
pub async fn write_body(&mut self, buf: &[u8]) -> Result<Option<usize>> {
// TODO: check if the response header is written
match self.write_timeout(buf.len()) {
Some(t) => match timeout(t, self.do_write_body(buf)).await {
Ok(res) => res,
Err(_) => Error::e_explain(WriteTimedout, format!("writing body, timeout: {t:?}")),
},
None => self.do_write_body(buf).await,
}
}
async fn do_write_body_buf(&mut self) -> Result<Option<usize>> {
// Don't flush empty chunks, they are considered end of body for chunks
if self.body_write_buf.is_empty() {
return Ok(None);
}
let written = self
.body_writer
.write_body(&mut self.underlying_stream, &self.body_write_buf)
.await;
if let Ok(Some(num_bytes)) = written {
self.body_bytes_sent += num_bytes;
}
// make sure this buf is safe to reuse
self.body_write_buf.clear();
written
}
async fn write_body_buf(&mut self) -> Result<Option<usize>> {
match self.write_timeout(self.body_write_buf.len()) {
Some(t) => match timeout(t, self.do_write_body_buf()).await {
Ok(res) => res,
Err(_) => Error::e_explain(WriteTimedout, format!("writing body, timeout: {t:?}")),
},
None => self.do_write_body_buf().await,
}
}
fn maybe_force_close_body_reader(&mut self) {
if self.upgraded && !self.body_reader.body_done() {
// response is done, reset the request body to close
self.body_reader.init_content_length(0, b"");
}
}
/// Signal that there is no more body to write.
/// This call will try to flush the buffer if there is any un-flushed data.
/// For chunked encoding response, this call will also send the last chunk.
/// For upgraded sessions, this call will also close the reading of the client body.
pub async fn finish_body(&mut self) -> Result<Option<usize>> {
let res = self.body_writer.finish(&mut self.underlying_stream).await?;
self.underlying_stream
.flush()
.await
.or_err(WriteError, "flushing body")?;
self.maybe_force_close_body_reader();
Ok(res)
}
/// Return how many response body bytes (application, not wire) already sent downstream
pub fn body_bytes_sent(&self) -> usize {
self.body_bytes_sent
}
/// Return how many request body bytes (application, not wire) already read from downstream
pub fn body_bytes_read(&self) -> usize {
self.body_bytes_read
}
fn is_chunked_encoding(&self) -> bool {
is_header_value_chunked_encoding(self.get_header(header::TRANSFER_ENCODING))
}
fn get_content_length(&self) -> Option<usize> {
buf_to_content_length(
self.get_header(header::CONTENT_LENGTH)
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v1/common.rs | pingora-core/src/protocols/http/v1/common.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Common functions and constants
use http::{header, HeaderValue};
use log::warn;
use pingora_error::Result;
use pingora_http::{HMap, RequestHeader, ResponseHeader};
use std::str;
use std::time::Duration;
use super::body::BodyWriter;
use crate::utils::KVRef;
pub(super) const MAX_HEADERS: usize = 256;
pub(super) const INIT_HEADER_BUF_SIZE: usize = 4096;
pub(super) const MAX_HEADER_SIZE: usize = 1048575;
pub(crate) const BODY_BUF_LIMIT: usize = 1024 * 64;
pub const CRLF: &[u8; 2] = b"\r\n";
pub const HEADER_KV_DELIMITER: &[u8; 2] = b": ";
pub(super) enum HeaderParseState {
Complete(usize),
Partial,
Invalid(httparse::Error),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(super) enum KeepaliveStatus {
Timeout(Duration),
Infinite,
Off,
}
struct ConnectionValue {
keep_alive: bool,
upgrade: bool,
close: bool,
}
impl ConnectionValue {
fn new() -> Self {
ConnectionValue {
keep_alive: false,
upgrade: false,
close: false,
}
}
fn close(mut self) -> Self {
self.close = true;
self
}
fn upgrade(mut self) -> Self {
self.upgrade = true;
self
}
fn keep_alive(mut self) -> Self {
self.keep_alive = true;
self
}
}
fn parse_connection_header(value: &[u8]) -> ConnectionValue {
// only parse keep-alive, close, and upgrade tokens
// https://www.rfc-editor.org/rfc/rfc9110.html#section-7.6.1
const KEEP_ALIVE: &str = "keep-alive";
const CLOSE: &str = "close";
const UPGRADE: &str = "upgrade";
// fast path
if value.eq_ignore_ascii_case(CLOSE.as_bytes()) {
ConnectionValue::new().close()
} else if value.eq_ignore_ascii_case(KEEP_ALIVE.as_bytes()) {
ConnectionValue::new().keep_alive()
} else if value.eq_ignore_ascii_case(UPGRADE.as_bytes()) {
ConnectionValue::new().upgrade()
} else {
// slow path, parse the connection value
let mut close = false;
let mut upgrade = false;
let value = str::from_utf8(value).unwrap_or("");
for token in value
.split(',')
.map(|s| s.trim())
.filter(|&x| !x.is_empty())
{
if token.eq_ignore_ascii_case(CLOSE) {
close = true;
} else if token.eq_ignore_ascii_case(UPGRADE) {
upgrade = true;
}
if upgrade && close {
return ConnectionValue::new().upgrade().close();
}
}
if close {
ConnectionValue::new().close()
} else if upgrade {
ConnectionValue::new().upgrade()
} else {
ConnectionValue::new()
}
}
}
pub(crate) fn init_body_writer_comm(body_writer: &mut BodyWriter, headers: &HMap) {
let te_value = headers.get(http::header::TRANSFER_ENCODING);
if is_header_value_chunked_encoding(te_value) {
// transfer-encoding takes priority over content-length
body_writer.init_chunked();
} else {
let content_length = header_value_content_length(headers.get(http::header::CONTENT_LENGTH));
match content_length {
Some(length) => {
body_writer.init_content_length(length);
}
None => {
/* TODO: 1. connection: keepalive cannot be used,
2. mark connection must be closed */
body_writer.init_http10();
}
}
}
}
#[inline]
pub fn is_header_value_chunked_encoding(header_value: Option<&http::header::HeaderValue>) -> bool {
match header_value {
Some(value) => value.as_bytes().eq_ignore_ascii_case(b"chunked"),
None => false,
}
}
pub fn is_upgrade_req(req: &RequestHeader) -> bool {
req.version == http::Version::HTTP_11 && req.headers.get(header::UPGRADE).is_some()
}
pub fn is_expect_continue_req(req: &RequestHeader) -> bool {
req.version == http::Version::HTTP_11
// https://www.rfc-editor.org/rfc/rfc9110#section-10.1.1
&& req.headers.get(header::EXPECT).is_some_and(|v| {
v.as_bytes().eq_ignore_ascii_case(b"100-continue")
})
}
// Unlike the upgrade check on request, this function doesn't check the Upgrade or Connection header
// because when seeing 101, we assume the server accepts to switch protocol.
// In reality it is not common that some servers don't send all the required headers to establish
// websocket connections.
pub fn is_upgrade_resp(header: &ResponseHeader) -> bool {
header.status == 101 && header.version == http::Version::HTTP_11
}
#[inline]
pub fn header_value_content_length(
header_value: Option<&http::header::HeaderValue>,
) -> Option<usize> {
match header_value {
Some(value) => buf_to_content_length(Some(value.as_bytes())),
None => None,
}
}
#[inline]
pub(super) fn buf_to_content_length(header_value: Option<&[u8]>) -> Option<usize> {
match header_value {
Some(buf) => {
match str::from_utf8(buf) {
// check valid string
Ok(str_cl_value) => match str_cl_value.parse::<i64>() {
Ok(cl_length) => {
if cl_length >= 0 {
Some(cl_length as usize)
} else {
warn!("negative content-length header value {cl_length}");
None
}
}
Err(_) => {
warn!("invalid content-length header value {str_cl_value}");
None
}
},
Err(_) => {
warn!("invalid content-length header encoding");
None
}
}
}
None => None,
}
}
#[inline]
pub(super) fn is_buf_keepalive(header_value: Option<&HeaderValue>) -> Option<bool> {
header_value.and_then(|value| {
let value = parse_connection_header(value.as_bytes());
if value.keep_alive {
Some(true)
} else if value.close {
Some(false)
} else {
None
}
})
}
#[inline]
pub(super) fn populate_headers(
base: usize,
header_ref: &mut Vec<KVRef>,
headers: &[httparse::Header],
) -> usize {
let mut used_header_index = 0;
for header in headers.iter() {
if !header.name.is_empty() {
header_ref.push(KVRef::new(
header.name.as_ptr() as usize - base,
header.name.len(),
header.value.as_ptr() as usize - base,
header.value.len(),
));
used_header_index += 1;
}
}
used_header_index
}
// RFC 7230:
// If a message is received without Transfer-Encoding and with
// either multiple Content-Length header fields having differing
// field-values or a single Content-Length header field having an
// invalid value, then the message framing is invalid and the
// recipient MUST treat it as an unrecoverable error.
pub(super) fn check_dup_content_length(headers: &HMap) -> Result<()> {
if headers.get(header::TRANSFER_ENCODING).is_some() {
// If TE header, ignore CL
return Ok(());
}
let mut cls = headers.get_all(header::CONTENT_LENGTH).into_iter();
if cls.next().is_none() {
// no CL header is fine.
return Ok(());
}
if cls.next().is_some() {
// duplicated CL is bad
return crate::Error::e_explain(
crate::ErrorType::InvalidHTTPHeader,
"duplicated Content-Length header",
);
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use http::{
header::{CONTENT_LENGTH, TRANSFER_ENCODING},
StatusCode, Version,
};
#[test]
fn test_check_dup_content_length() {
let mut headers = HMap::new();
assert!(check_dup_content_length(&headers).is_ok());
headers.append(CONTENT_LENGTH, "1".try_into().unwrap());
assert!(check_dup_content_length(&headers).is_ok());
headers.append(CONTENT_LENGTH, "2".try_into().unwrap());
assert!(check_dup_content_length(&headers).is_err());
headers.append(TRANSFER_ENCODING, "chunkeds".try_into().unwrap());
assert!(check_dup_content_length(&headers).is_ok());
}
#[test]
fn test_is_upgrade_resp() {
let mut response = ResponseHeader::build(StatusCode::SWITCHING_PROTOCOLS, None).unwrap();
response.set_version(Version::HTTP_11);
response.insert_header("Upgrade", "websocket").unwrap();
response.insert_header("Connection", "upgrade").unwrap();
assert!(is_upgrade_resp(&response));
// wrong http version
response.set_version(Version::HTTP_10);
response.insert_header("Upgrade", "websocket").unwrap();
response.insert_header("Connection", "upgrade").unwrap();
assert!(!is_upgrade_resp(&response));
// not 101
response.set_status(StatusCode::OK).unwrap();
response.set_version(Version::HTTP_11);
assert!(!is_upgrade_resp(&response));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/compression/brotli.rs | pingora-core/src/protocols/http/compression/brotli.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::Encode;
use super::COMPRESSION_ERROR;
use brotli::{CompressorWriter, DecompressorWriter};
use bytes::Bytes;
use pingora_error::{OrErr, Result};
use std::io::Write;
use std::time::{Duration, Instant};
pub struct Decompressor {
decompress: DecompressorWriter<Vec<u8>>,
total_in: usize,
total_out: usize,
duration: Duration,
}
impl Decompressor {
pub fn new() -> Self {
Decompressor {
// default buf is 4096 if 0 is used, TODO: figure out the significance of this value
decompress: DecompressorWriter::new(vec![], 0),
total_in: 0,
total_out: 0,
duration: Duration::new(0, 0),
}
}
}
impl Encode for Decompressor {
fn encode(&mut self, input: &[u8], end: bool) -> Result<Bytes> {
const MAX_INIT_COMPRESSED_SIZE_CAP: usize = 4 * 1024;
// Brotli compress ratio can be 3.5 to 4.5
const ESTIMATED_COMPRESSION_RATIO: usize = 4;
let start = Instant::now();
self.total_in += input.len();
// cap the buf size amplification, there is a DoS risk of always allocate
// 4x the memory of the input buffer
let reserve_size = if input.len() < MAX_INIT_COMPRESSED_SIZE_CAP {
input.len() * ESTIMATED_COMPRESSION_RATIO
} else {
input.len()
};
self.decompress.get_mut().reserve(reserve_size);
self.decompress
.write_all(input)
.or_err(COMPRESSION_ERROR, "while decompress Brotli")?;
// write to vec will never fail. The only possible error is that the input data
// is invalid (not brotli compressed)
if end {
self.decompress
.flush()
.or_err(COMPRESSION_ERROR, "while decompress Brotli")?;
}
self.total_out += self.decompress.get_ref().len();
self.duration += start.elapsed();
Ok(std::mem::take(self.decompress.get_mut()).into()) // into() Bytes will drop excess capacity
}
fn stat(&self) -> (&'static str, usize, usize, Duration) {
("de-brotli", self.total_in, self.total_out, self.duration)
}
}
pub struct Compressor {
compress: CompressorWriter<Vec<u8>>,
total_in: usize,
total_out: usize,
duration: Duration,
}
impl Compressor {
pub fn new(level: u32) -> Self {
Compressor {
// buf_size:4096 , lgwin:19 TODO: fine tune these
compress: CompressorWriter::new(vec![], 4096, level, 19),
total_in: 0,
total_out: 0,
duration: Duration::new(0, 0),
}
}
}
impl Encode for Compressor {
fn encode(&mut self, input: &[u8], end: bool) -> Result<Bytes> {
// reserve at most 16k
const MAX_INIT_COMPRESSED_BUF_SIZE: usize = 16 * 1024;
let start = Instant::now();
self.total_in += input.len();
// reserve at most input size, cap at 16k, compressed output should be smaller
self.compress
.get_mut()
.reserve(std::cmp::min(MAX_INIT_COMPRESSED_BUF_SIZE, input.len()));
self.compress
.write_all(input)
.or_err(COMPRESSION_ERROR, "while compress Brotli")?;
// write to vec will never fail.
if end {
self.compress
.flush()
.or_err(COMPRESSION_ERROR, "while compress Brotli")?;
}
self.total_out += self.compress.get_ref().len();
self.duration += start.elapsed();
Ok(std::mem::take(self.compress.get_mut()).into()) // into() Bytes will drop excess capacity
}
fn stat(&self) -> (&'static str, usize, usize, Duration) {
("brotli", self.total_in, self.total_out, self.duration)
}
}
#[cfg(test)]
mod tests_stream {
use super::*;
#[test]
fn decompress_brotli_data() {
let mut compressor = Decompressor::new();
let decompressed = compressor
.encode(
&[
0x1f, 0x0f, 0x00, 0xf8, 0x45, 0x07, 0x87, 0x3e, 0x10, 0xfb, 0x55, 0x92, 0xec,
0x12, 0x09, 0xcc, 0x38, 0xdd, 0x51, 0x1e,
],
true,
)
.unwrap();
assert_eq!(&decompressed[..], &b"adcdefgabcdefgh\n"[..]);
}
#[test]
fn compress_brotli_data() {
let mut compressor = Compressor::new(11);
let compressed = compressor.encode(&b"adcdefgabcdefgh\n"[..], true).unwrap();
assert_eq!(
&compressed[..],
&[
0x85, 0x07, 0x00, 0xf8, 0x45, 0x07, 0x87, 0x3e, 0x10, 0xfb, 0x55, 0x92, 0xec, 0x12,
0x09, 0xcc, 0x38, 0xdd, 0x51, 0x1e,
],
);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/compression/gzip.rs | pingora-core/src/protocols/http/compression/gzip.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{Encode, COMPRESSION_ERROR};
use bytes::Bytes;
use flate2::write::{GzDecoder, GzEncoder};
use pingora_error::{OrErr, Result};
use std::io::Write;
use std::time::{Duration, Instant};
pub struct Decompressor {
decompress: GzDecoder<Vec<u8>>,
total_in: usize,
total_out: usize,
duration: Duration,
}
impl Decompressor {
pub fn new() -> Self {
Decompressor {
decompress: GzDecoder::new(vec![]),
total_in: 0,
total_out: 0,
duration: Duration::new(0, 0),
}
}
}
impl Encode for Decompressor {
fn encode(&mut self, input: &[u8], end: bool) -> Result<Bytes> {
const MAX_INIT_COMPRESSED_SIZE_CAP: usize = 4 * 1024;
const ESTIMATED_COMPRESSION_RATIO: usize = 3; // estimated 2.5-3x compression
let start = Instant::now();
self.total_in += input.len();
// cap the buf size amplification, there is a DoS risk of always allocate
// 3x the memory of the input buffer
let reserve_size = if input.len() < MAX_INIT_COMPRESSED_SIZE_CAP {
input.len() * ESTIMATED_COMPRESSION_RATIO
} else {
input.len()
};
self.decompress.get_mut().reserve(reserve_size);
self.decompress
.write_all(input)
.or_err(COMPRESSION_ERROR, "while decompress Gzip")?;
// write to vec will never fail, only possible error is that the input data
// was not actually gzip compressed
if end {
self.decompress
.try_finish()
.or_err(COMPRESSION_ERROR, "while decompress Gzip")?;
}
self.total_out += self.decompress.get_ref().len();
self.duration += start.elapsed();
Ok(std::mem::take(self.decompress.get_mut()).into()) // into() Bytes will drop excess capacity
}
fn stat(&self) -> (&'static str, usize, usize, Duration) {
("de-gzip", self.total_in, self.total_out, self.duration)
}
}
pub struct Compressor {
// TODO: enum for other compression algorithms
compress: GzEncoder<Vec<u8>>,
total_in: usize,
total_out: usize,
duration: Duration,
}
impl Compressor {
pub fn new(level: u32) -> Compressor {
Compressor {
compress: GzEncoder::new(vec![], flate2::Compression::new(level)),
total_in: 0,
total_out: 0,
duration: Duration::new(0, 0),
}
}
}
impl Encode for Compressor {
// infallible because compression can take any data
fn encode(&mut self, input: &[u8], end: bool) -> Result<Bytes> {
// reserve at most 16k
const MAX_INIT_COMPRESSED_BUF_SIZE: usize = 16 * 1024;
let start = Instant::now();
self.total_in += input.len();
self.compress
.get_mut()
.reserve(std::cmp::min(MAX_INIT_COMPRESSED_BUF_SIZE, input.len()));
self.write_all(input).unwrap(); // write to vec, should never fail
if end {
self.try_finish().unwrap(); // write to vec, should never fail
}
self.total_out += self.compress.get_ref().len();
self.duration += start.elapsed();
Ok(std::mem::take(self.compress.get_mut()).into()) // into() Bytes will drop excess capacity
}
fn stat(&self) -> (&'static str, usize, usize, Duration) {
("gzip", self.total_in, self.total_out, self.duration)
}
}
use std::ops::{Deref, DerefMut};
impl Deref for Decompressor {
type Target = GzDecoder<Vec<u8>>;
fn deref(&self) -> &Self::Target {
&self.decompress
}
}
impl DerefMut for Decompressor {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.decompress
}
}
impl Deref for Compressor {
type Target = GzEncoder<Vec<u8>>;
fn deref(&self) -> &Self::Target {
&self.compress
}
}
impl DerefMut for Compressor {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.compress
}
}
#[cfg(test)]
mod tests_stream {
use super::*;
#[test]
fn gzip_data() {
let mut compressor = Compressor::new(6);
let compressed = compressor.encode(b"abcdefg", true).unwrap();
// gzip magic headers
assert_eq!(&compressed[..3], &[0x1f, 0x8b, 0x08]);
// check the crc32 footer
assert_eq!(
&compressed[compressed.len() - 9..],
&[0, 166, 106, 42, 49, 7, 0, 0, 0]
);
assert_eq!(compressor.total_in, 7);
assert_eq!(compressor.total_out, compressed.len());
assert!(compressor.get_ref().is_empty());
}
#[test]
fn gunzip_data() {
let mut decompressor = Decompressor::new();
let compressed_bytes = &[
0x1f, 0x8b, 0x08, 0, 0, 0, 0, 0, 0, 255, 75, 76, 74, 78, 73, 77, 75, 7, 0, 166, 106,
42, 49, 7, 0, 0, 0,
];
let decompressed = decompressor.encode(compressed_bytes, true).unwrap();
assert_eq!(&decompressed[..], b"abcdefg");
assert_eq!(decompressor.total_in, compressed_bytes.len());
assert_eq!(decompressor.total_out, decompressed.len());
assert!(decompressor.get_ref().is_empty());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/compression/zstd.rs | pingora-core/src/protocols/http/compression/zstd.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{Encode, COMPRESSION_ERROR};
use bytes::Bytes;
use parking_lot::Mutex;
use pingora_error::{OrErr, Result};
use std::io::Write;
use std::time::{Duration, Instant};
use zstd::stream::write::Encoder;
pub struct Compressor {
compress: Mutex<Encoder<'static, Vec<u8>>>,
total_in: usize,
total_out: usize,
duration: Duration,
}
impl Compressor {
pub fn new(level: u32) -> Self {
Compressor {
// Mutex because Encoder is not Sync
// https://github.com/gyscos/zstd-rs/issues/186
compress: Mutex::new(Encoder::new(vec![], level as i32).unwrap()),
total_in: 0,
total_out: 0,
duration: Duration::new(0, 0),
}
}
}
impl Encode for Compressor {
fn encode(&mut self, input: &[u8], end: bool) -> Result<Bytes> {
// reserve at most 16k
const MAX_INIT_COMPRESSED_BUF_SIZE: usize = 16 * 1024;
let start = Instant::now();
self.total_in += input.len();
let mut compress = self.compress.lock();
// reserve at most input size, cap at 16k, compressed output should be smaller
compress
.get_mut()
.reserve(std::cmp::min(MAX_INIT_COMPRESSED_BUF_SIZE, input.len()));
compress
.write_all(input)
.or_err(COMPRESSION_ERROR, "while compress zstd")?;
// write to vec will never fail.
if end {
compress
.do_finish()
.or_err(COMPRESSION_ERROR, "while compress zstd")?;
}
self.total_out += compress.get_ref().len();
self.duration += start.elapsed();
Ok(std::mem::take(compress.get_mut()).into()) // into() Bytes will drop excess capacity
}
fn stat(&self) -> (&'static str, usize, usize, Duration) {
("zstd", self.total_in, self.total_out, self.duration)
}
}
#[cfg(test)]
mod tests_stream {
use super::*;
#[test]
fn compress_zstd_data() {
let mut compressor = Compressor::new(11);
let input = b"adcdefgabcdefghadcdefgabcdefghadcdefgabcdefghadcdefgabcdefgh\n";
let compressed = compressor.encode(&input[..], false).unwrap();
// waiting for more data
assert!(compressed.is_empty());
let compressed = compressor.encode(&input[..], true).unwrap();
// the zstd Magic_Number
assert_eq!(&compressed[..4], &[0x28, 0xB5, 0x2F, 0xFD]);
assert!(compressed.len() < input.len());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/compression/mod.rs | pingora-core/src/protocols/http/compression/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP response (de)compression libraries
//!
//! Brotli and Gzip and partially supported.
use super::HttpTask;
use bytes::Bytes;
use log::{debug, warn};
use pingora_error::{ErrorType, Result};
use pingora_http::{RequestHeader, ResponseHeader};
use std::time::Duration;
use strum::EnumCount;
use strum_macros::EnumCount as EnumCountMacro;
mod brotli;
mod gzip;
mod zstd;
/// The type of error to return when (de)compression fails
pub const COMPRESSION_ERROR: ErrorType = ErrorType::new("CompressionError");
/// The trait for both compress and decompress because the interface and syntax are the same:
/// encode some bytes to other bytes
pub trait Encode {
/// Encode the input bytes. The `end` flag signals the end of the entire input. The `end` flag
/// helps the encoder to flush out the remaining buffered encoded data because certain compression
/// algorithms prefer to collect large enough data to compress all together.
fn encode(&mut self, input: &[u8], end: bool) -> Result<Bytes>;
/// Return the Encoder's name, the total input bytes, the total output bytes and the total
/// duration spent on encoding the data.
fn stat(&self) -> (&'static str, usize, usize, Duration);
}
/// The response compression object. Currently support gzip compression and brotli decompression.
///
/// To use it, the caller should create a [`ResponseCompressionCtx`] per HTTP session.
/// The caller should call the corresponding filters for the request header, response header and
/// response body. If the algorithms are supported, the output response body will be encoded.
/// The response header will be adjusted accordingly as well. If the algorithm is not supported
/// or no encoding is needed, the response is untouched.
///
/// If configured and if the request's `accept-encoding` header contains the algorithm supported and the
/// incoming response doesn't have that encoding, the filter will compress the response.
/// If configured and supported, and if the incoming response's `content-encoding` isn't one of the
/// request's `accept-encoding` supported algorithm, the ctx will decompress the response.
///
/// # Currently supported algorithms and actions
/// - Brotli decompression: if the response is br compressed, this ctx can decompress it
/// - Gzip compression: if the response is uncompressed, this ctx can compress it with gzip
pub struct ResponseCompressionCtx(CtxInner);
enum CtxInner {
HeaderPhase {
// Store the preferred list to compare with content-encoding
accept_encoding: Vec<Algorithm>,
encoding_levels: [u32; Algorithm::COUNT],
decompress_enable: [bool; Algorithm::COUNT],
preserve_etag: [bool; Algorithm::COUNT],
},
BodyPhase(Option<Box<dyn Encode + Send + Sync>>),
}
impl ResponseCompressionCtx {
/// Create a new [`ResponseCompressionCtx`] with the expected compression level. `0` will disable
/// the compression. The compression level is applied across all algorithms.
/// The `decompress_enable` flag will tell the ctx to decompress if needed.
/// The `preserve_etag` flag indicates whether the ctx should avoid modifying the etag,
/// which will otherwise be weakened if the flag is false and (de)compression is applied.
pub fn new(compression_level: u32, decompress_enable: bool, preserve_etag: bool) -> Self {
Self(CtxInner::HeaderPhase {
accept_encoding: Vec::new(),
encoding_levels: [compression_level; Algorithm::COUNT],
decompress_enable: [decompress_enable; Algorithm::COUNT],
preserve_etag: [preserve_etag; Algorithm::COUNT],
})
}
/// Whether the encoder is enabled.
/// The enablement will change according to the request and response filter by this ctx.
pub fn is_enabled(&self) -> bool {
match &self.0 {
CtxInner::HeaderPhase {
decompress_enable,
encoding_levels: levels,
..
} => levels.iter().any(|l| *l != 0) || decompress_enable.iter().any(|d| *d),
CtxInner::BodyPhase(c) => c.is_some(),
}
}
/// Return the stat of this ctx:
/// algorithm name, in bytes, out bytes, time took for the compression
pub fn get_info(&self) -> Option<(&'static str, usize, usize, Duration)> {
match &self.0 {
CtxInner::HeaderPhase { .. } => None,
CtxInner::BodyPhase(c) => c.as_ref().map(|c| c.stat()),
}
}
/// Adjust the compression level for all compression algorithms.
/// # Panic
/// This function will panic if it has already started encoding the response body.
pub fn adjust_level(&mut self, new_level: u32) {
match &mut self.0 {
CtxInner::HeaderPhase {
encoding_levels: levels,
..
} => {
*levels = [new_level; Algorithm::COUNT];
}
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Adjust the compression level for a specific algorithm.
/// # Panic
/// This function will panic if it has already started encoding the response body.
pub fn adjust_algorithm_level(&mut self, algorithm: Algorithm, new_level: u32) {
match &mut self.0 {
CtxInner::HeaderPhase {
encoding_levels: levels,
..
} => {
levels[algorithm.index()] = new_level;
}
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Adjust the decompression flag for all compression algorithms.
/// # Panic
/// This function will panic if it has already started encoding the response body.
pub fn adjust_decompression(&mut self, enabled: bool) {
match &mut self.0 {
CtxInner::HeaderPhase {
decompress_enable, ..
} => {
*decompress_enable = [enabled; Algorithm::COUNT];
}
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Adjust the decompression flag for a specific algorithm.
/// # Panic
/// This function will panic if it has already started encoding the response body.
pub fn adjust_algorithm_decompression(&mut self, algorithm: Algorithm, enabled: bool) {
match &mut self.0 {
CtxInner::HeaderPhase {
decompress_enable, ..
} => {
decompress_enable[algorithm.index()] = enabled;
}
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Adjust preserve etag setting.
/// # Panic
/// This function will panic if it has already started encoding the response body.
pub fn adjust_preserve_etag(&mut self, enabled: bool) {
match &mut self.0 {
CtxInner::HeaderPhase { preserve_etag, .. } => {
*preserve_etag = [enabled; Algorithm::COUNT];
}
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Adjust preserve etag setting for a specific algorithm.
/// # Panic
/// This function will panic if it has already started encoding the response body.
pub fn adjust_algorithm_preserve_etag(&mut self, algorithm: Algorithm, enabled: bool) {
match &mut self.0 {
CtxInner::HeaderPhase { preserve_etag, .. } => {
preserve_etag[algorithm.index()] = enabled;
}
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Feed the request header into this ctx.
pub fn request_filter(&mut self, req: &RequestHeader) {
if !self.is_enabled() {
return;
}
match &mut self.0 {
CtxInner::HeaderPhase {
accept_encoding, ..
} => parse_accept_encoding(
req.headers.get(http::header::ACCEPT_ENCODING),
accept_encoding,
),
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Feed the response header into this ctx
pub fn response_header_filter(&mut self, resp: &mut ResponseHeader, end: bool) {
if !self.is_enabled() {
return;
}
match &self.0 {
CtxInner::HeaderPhase {
decompress_enable,
preserve_etag,
accept_encoding,
encoding_levels: levels,
} => {
if resp.status.is_informational() {
if resp.status == http::status::StatusCode::SWITCHING_PROTOCOLS {
// no transformation for websocket (TODO: cite RFC)
self.0 = CtxInner::BodyPhase(None);
}
// else, wait for the final response header for decision
return;
}
// do nothing if no body
if end {
self.0 = CtxInner::BodyPhase(None);
return;
}
if depends_on_accept_encoding(
resp,
levels.iter().any(|level| *level != 0),
decompress_enable,
) {
// The response depends on the Accept-Encoding header, make sure to indicate it
// in the Vary response header.
// https://www.rfc-editor.org/rfc/rfc9110#name-vary
add_vary_header(resp, &http::header::ACCEPT_ENCODING);
}
let action = decide_action(resp, accept_encoding);
debug!("compression action: {action:?}");
let (encoder, preserve_etag) = match action {
Action::Noop => (None, false),
Action::Compress(algorithm) => {
let idx = algorithm.index();
(algorithm.compressor(levels[idx]), preserve_etag[idx])
}
Action::Decompress(algorithm) => {
let idx = algorithm.index();
(
algorithm.decompressor(decompress_enable[idx]),
preserve_etag[idx],
)
}
};
if encoder.is_some() {
adjust_response_header(resp, &action, preserve_etag);
}
self.0 = CtxInner::BodyPhase(encoder);
}
CtxInner::BodyPhase(_) => panic!("Wrong phase: BodyPhase"),
}
}
/// Stream the response body chunks into this ctx. The return value will be the compressed
/// data.
///
/// Return None if compression is not enabled.
pub fn response_body_filter(&mut self, data: Option<&Bytes>, end: bool) -> Option<Bytes> {
match &mut self.0 {
CtxInner::HeaderPhase { .. } => panic!("Wrong phase: HeaderPhase"),
CtxInner::BodyPhase(compressor) => {
let result = compressor
.as_mut()
.map(|c| {
// Feed even empty slice to compressor because it might yield data
// when `end` is true
let data = if let Some(b) = data { b.as_ref() } else { &[] };
c.encode(data, end)
})
.transpose();
result.unwrap_or_else(|e| {
warn!("Failed to compress, compression disabled, {}", e);
// no point to transcode further data because bad data is already seen
self.0 = CtxInner::BodyPhase(None);
None
})
}
}
}
// TODO: retire this function, replace it with the two functions above
/// Feed the response into this ctx.
/// This filter will mutate the response accordingly if encoding is needed.
pub fn response_filter(&mut self, t: &mut HttpTask) {
if !self.is_enabled() {
return;
}
match t {
HttpTask::Header(resp, end) => self.response_header_filter(resp, *end),
HttpTask::Body(data, end) => {
let compressed = self.response_body_filter(data.as_ref(), *end);
if compressed.is_some() {
*t = HttpTask::Body(compressed, *end);
}
}
HttpTask::Done => {
// try to finish/flush compression
let compressed = self.response_body_filter(None, true);
if compressed.is_some() {
// compressor has more data to flush
*t = HttpTask::Body(compressed, true);
}
}
_ => { /* Trailer, Failed: do nothing? */ }
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, EnumCountMacro)]
pub enum Algorithm {
Any, // the "*"
Gzip,
Brotli,
Zstd,
// TODO: Identity,
// TODO: Deflate
Other, // anything unknown
}
impl Algorithm {
pub fn as_str(&self) -> &'static str {
match self {
Algorithm::Gzip => "gzip",
Algorithm::Brotli => "br",
Algorithm::Zstd => "zstd",
Algorithm::Any => "*",
Algorithm::Other => "other",
}
}
pub fn compressor(&self, level: u32) -> Option<Box<dyn Encode + Send + Sync>> {
if level == 0 {
None
} else {
match self {
Self::Gzip => Some(Box::new(gzip::Compressor::new(level))),
Self::Brotli => Some(Box::new(brotli::Compressor::new(level))),
Self::Zstd => Some(Box::new(zstd::Compressor::new(level))),
_ => None, // not implemented
}
}
}
pub fn decompressor(&self, enabled: bool) -> Option<Box<dyn Encode + Send + Sync>> {
if !enabled {
None
} else {
match self {
Self::Gzip => Some(Box::new(gzip::Decompressor::new())),
Self::Brotli => Some(Box::new(brotli::Decompressor::new())),
_ => None, // not implemented
}
}
}
pub fn index(&self) -> usize {
*self as usize
}
}
impl From<&str> for Algorithm {
fn from(s: &str) -> Self {
use unicase::UniCase;
let coding = UniCase::new(s);
if coding == UniCase::ascii("gzip") {
Algorithm::Gzip
} else if coding == UniCase::ascii("br") {
Algorithm::Brotli
} else if coding == UniCase::ascii("zstd") {
Algorithm::Zstd
} else if s.is_empty() {
Algorithm::Any
} else {
Algorithm::Other
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
enum Action {
Noop, // do nothing, e.g. when the input is already gzip
Compress(Algorithm),
Decompress(Algorithm),
}
// parse Accept-Encoding header and put it to the list
fn parse_accept_encoding(accept_encoding: Option<&http::HeaderValue>, list: &mut Vec<Algorithm>) {
// https://www.rfc-editor.org/rfc/rfc9110#name-accept-encoding
if let Some(ac) = accept_encoding {
// fast path
if ac.as_bytes() == b"gzip" {
list.push(Algorithm::Gzip);
return;
}
// properly parse AC header
match sfv::Parser::parse_list(ac.as_bytes()) {
Ok(parsed) => {
for item in parsed {
if let sfv::ListEntry::Item(i) = item {
if let Some(s) = i.bare_item.as_token() {
// TODO: support q value
let algorithm = Algorithm::from(s);
// ignore algorithms that we don't understand ignore
if algorithm != Algorithm::Other {
list.push(Algorithm::from(s));
}
}
}
}
}
Err(e) => {
warn!("Failed to parse accept-encoding {ac:?}, {e}")
}
}
} else {
// "If no Accept-Encoding header, any content coding is acceptable"
// keep the list empty
}
}
#[test]
fn test_accept_encoding_req_header() {
let mut header = RequestHeader::build("GET", b"/", None).unwrap();
let mut ac_list = Vec::new();
parse_accept_encoding(
header.headers.get(http::header::ACCEPT_ENCODING),
&mut ac_list,
);
assert!(ac_list.is_empty());
let mut ac_list = Vec::new();
header.insert_header("accept-encoding", "gzip").unwrap();
parse_accept_encoding(
header.headers.get(http::header::ACCEPT_ENCODING),
&mut ac_list,
);
assert_eq!(ac_list[0], Algorithm::Gzip);
let mut ac_list = Vec::new();
header
.insert_header("accept-encoding", "what, br, gzip")
.unwrap();
parse_accept_encoding(
header.headers.get(http::header::ACCEPT_ENCODING),
&mut ac_list,
);
assert_eq!(ac_list[0], Algorithm::Brotli);
assert_eq!(ac_list[1], Algorithm::Gzip);
}
// test whether the response depends on Accept-Encoding header
fn depends_on_accept_encoding(
resp: &ResponseHeader,
compress_enabled: bool,
decompress_enabled: &[bool],
) -> bool {
use http::header::CONTENT_ENCODING;
(decompress_enabled.iter().any(|enabled| *enabled)
&& resp.headers.get(CONTENT_ENCODING).is_some())
|| (compress_enabled && compressible(resp))
}
#[test]
fn test_decide_on_accept_encoding() {
let mut resp = ResponseHeader::build(200, None).unwrap();
resp.insert_header("content-length", "50").unwrap();
resp.insert_header("content-type", "text/html").unwrap();
resp.insert_header("content-encoding", "gzip").unwrap();
// enabled
assert!(depends_on_accept_encoding(&resp, false, &[true]));
// decompress disabled => disabled
assert!(!depends_on_accept_encoding(&resp, false, &[false]));
// no content-encoding => disabled
resp.remove_header("content-encoding");
assert!(!depends_on_accept_encoding(&resp, false, &[true]));
// compress enabled and compressible response => enabled
assert!(depends_on_accept_encoding(&resp, true, &[false]));
// compress disabled and compressible response => disabled
assert!(!depends_on_accept_encoding(&resp, false, &[false]));
// compress enabled and not compressible response => disabled
resp.insert_header("content-type", "text/html+zip").unwrap();
assert!(!depends_on_accept_encoding(&resp, true, &[false]));
}
// filter response header to see if (de)compression is needed
fn decide_action(resp: &ResponseHeader, accept_encoding: &[Algorithm]) -> Action {
use http::header::CONTENT_ENCODING;
let content_encoding = if let Some(ce) = resp.headers.get(CONTENT_ENCODING) {
// https://www.rfc-editor.org/rfc/rfc9110#name-content-encoding
if let Ok(ce_str) = std::str::from_utf8(ce.as_bytes()) {
Some(Algorithm::from(ce_str))
} else {
// not utf-8, treat it as unknown encoding to leave it untouched
Some(Algorithm::Other)
}
} else {
// no Accept-encoding
None
};
if let Some(ce) = content_encoding {
if accept_encoding.contains(&ce) {
// downstream can accept this encoding, nothing to do
Action::Noop
} else {
// always decompress because uncompressed is always acceptable
// https://www.rfc-editor.org/rfc/rfc9110#field.accept-encoding
// "If the representation has no content coding, then it is acceptable by default
// unless specifically excluded..." TODO: check the exclude case
// TODO: we could also transcode it to a preferred encoding, e.g. br->gzip
Action::Decompress(ce)
}
} else if accept_encoding.is_empty() // both CE and AE are empty
|| !compressible(resp) // the type is not compressible
|| accept_encoding[0] == Algorithm::Any
{
Action::Noop
} else {
// try to compress with the first AC
// TODO: support to configure preferred encoding
Action::Compress(accept_encoding[0])
}
}
#[test]
fn test_decide_action() {
use Action::*;
use Algorithm::*;
let header = ResponseHeader::build(200, None).unwrap();
// no compression asked, no compression needed
assert_eq!(decide_action(&header, &[]), Noop);
// already gzip, no compression needed
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-type", "text/html").unwrap();
header.insert_header("content-encoding", "gzip").unwrap();
assert_eq!(decide_action(&header, &[Gzip]), Noop);
// already gzip, no compression needed, upper case
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-encoding", "GzIp").unwrap();
header.insert_header("content-type", "text/html").unwrap();
assert_eq!(decide_action(&header, &[Gzip]), Noop);
// no encoding, compression needed, accepted content-type, large enough
// Will compress
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-length", "20").unwrap();
header.insert_header("content-type", "text/html").unwrap();
assert_eq!(decide_action(&header, &[Gzip]), Compress(Gzip));
// too small
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-length", "19").unwrap();
header.insert_header("content-type", "text/html").unwrap();
assert_eq!(decide_action(&header, &[Gzip]), Noop);
// already compressed MIME
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-length", "20").unwrap();
header
.insert_header("content-type", "text/html+zip")
.unwrap();
assert_eq!(decide_action(&header, &[Gzip]), Noop);
// unsupported MIME
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-length", "20").unwrap();
header.insert_header("content-type", "image/jpg").unwrap();
assert_eq!(decide_action(&header, &[Gzip]), Noop);
// compressed, need decompress
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-encoding", "gzip").unwrap();
assert_eq!(decide_action(&header, &[]), Decompress(Gzip));
// accept-encoding different, need decompress
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-encoding", "gzip").unwrap();
assert_eq!(decide_action(&header, &[Brotli]), Decompress(Gzip));
// less preferred but no need to decompress
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-encoding", "gzip").unwrap();
assert_eq!(decide_action(&header, &[Brotli, Gzip]), Noop);
}
use once_cell::sync::Lazy;
use regex::Regex;
// Allow text, application, font, a few image/ MIME types and binary/octet-stream
// TODO: fine tune this list
static MIME_CHECK: Lazy<Regex> = Lazy::new(|| {
Regex::new(r"^(?:text/|application/|font/|image/(?:x-icon|svg\+xml|nd\.microsoft\.icon)|binary/octet-stream)")
.unwrap()
});
// check if the response mime type is compressible
fn compressible(resp: &ResponseHeader) -> bool {
// arbitrary size limit, things to consider
// 1. too short body may have little redundancy to compress
// 2. gzip header and footer overhead
// 3. latency is the same as long as data fits in a TCP congestion window regardless of size
const MIN_COMPRESS_LEN: usize = 20;
// check if response is too small to compress
if let Some(cl) = resp.headers.get(http::header::CONTENT_LENGTH) {
if let Some(cl_num) = std::str::from_utf8(cl.as_bytes())
.ok()
.and_then(|v| v.parse::<usize>().ok())
{
if cl_num < MIN_COMPRESS_LEN {
return false;
}
}
}
// no Content-Length or large enough, check content-type next
if let Some(ct) = resp.headers.get(http::header::CONTENT_TYPE) {
if let Ok(ct_str) = std::str::from_utf8(ct.as_bytes()) {
if ct_str.contains("zip") {
// heuristic: don't compress mime type that has zip in it
false
} else {
// check if mime type in allow list
MIME_CHECK.find(ct_str).is_some()
}
} else {
false // invalid CT header, don't compress
}
} else {
false // don't compress empty content-type
}
}
// add Vary header with the specified value or extend an existing Vary header value
fn add_vary_header(resp: &mut ResponseHeader, value: &http::header::HeaderName) {
use http::header::{HeaderValue, VARY};
let already_present = resp.headers.get_all(VARY).iter().any(|existing| {
existing
.as_bytes()
.split(|b| *b == b',')
.map(|mut v| {
// This is equivalent to slice.trim_ascii() which is unstable
while let [first, rest @ ..] = v {
if first.is_ascii_whitespace() {
v = rest;
} else {
break;
}
}
while let [rest @ .., last] = v {
if last.is_ascii_whitespace() {
v = rest;
} else {
break;
}
}
v
})
.any(|v| v == b"*" || v.eq_ignore_ascii_case(value.as_ref()))
});
if !already_present {
resp.append_header(&VARY, HeaderValue::from_name(value.clone()))
.unwrap();
}
}
#[test]
fn test_add_vary_header() {
let mut header = ResponseHeader::build(200, None).unwrap();
add_vary_header(&mut header, &http::header::ACCEPT_ENCODING);
assert_eq!(
header
.headers
.get_all("Vary")
.into_iter()
.collect::<Vec<_>>(),
vec!["accept-encoding"]
);
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("Vary", "Accept-Language").unwrap();
add_vary_header(&mut header, &http::header::ACCEPT_ENCODING);
assert_eq!(
header
.headers
.get_all("Vary")
.into_iter()
.collect::<Vec<_>>(),
vec!["Accept-Language", "accept-encoding"]
);
let mut header = ResponseHeader::build(200, None).unwrap();
header
.insert_header("Vary", "Accept-Language, Accept-Encoding")
.unwrap();
add_vary_header(&mut header, &http::header::ACCEPT_ENCODING);
assert_eq!(
header
.headers
.get_all("Vary")
.into_iter()
.collect::<Vec<_>>(),
vec!["Accept-Language, Accept-Encoding"]
);
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("Vary", "*").unwrap();
add_vary_header(&mut header, &http::header::ACCEPT_ENCODING);
assert_eq!(
header
.headers
.get_all("Vary")
.into_iter()
.collect::<Vec<_>>(),
vec!["*"]
);
}
fn adjust_response_header(resp: &mut ResponseHeader, action: &Action, preserve_etag: bool) {
use http::header::{
HeaderValue, ACCEPT_RANGES, CONTENT_ENCODING, CONTENT_LENGTH, ETAG, TRANSFER_ENCODING,
};
fn set_stream_headers(resp: &mut ResponseHeader) {
// because the transcoding is streamed, content length is not known ahead
resp.remove_header(&CONTENT_LENGTH);
// remove Accept-Ranges header because range requests will no longer work
resp.remove_header(&ACCEPT_RANGES);
// we stream body now TODO: chunked is for h1 only
resp.insert_header(&TRANSFER_ENCODING, HeaderValue::from_static("chunked"))
.unwrap();
}
fn weaken_or_clear_etag(resp: &mut ResponseHeader) {
// RFC9110: https://datatracker.ietf.org/doc/html/rfc9110#section-8.8.1
// "a validator is weak if it is shared by two or more representations
// of a given resource at the same time, unless those representations
// have identical representation data"
// Follow nginx gzip filter's example when changing content encoding:
// - if the ETag is not a valid strong ETag, clear it (i.e. does not start with `"`)
// - else, weaken it
if let Some(etag) = resp.headers.get(&ETAG) {
let etag_bytes = etag.as_bytes();
if etag_bytes.starts_with(b"W/") {
// this is already a weak ETag, noop
} else if etag_bytes.starts_with(b"\"") {
// strong ETag, weaken since we are changing the byte representation
let weakened_etag = HeaderValue::from_bytes(&[b"W/", etag_bytes].concat())
.expect("valid header value prefixed with \"W/\" should remain valid");
resp.insert_header(&ETAG, weakened_etag)
.expect("can insert weakened etag when etag was already valid");
} else {
// invalid strong ETag, just clear it
// https://datatracker.ietf.org/doc/html/rfc9110#section-8.8.3
// says the opaque-tag section needs to be a quoted string
resp.remove_header(&ETAG);
}
}
}
match action {
Action::Noop => { /* do nothing */ }
Action::Decompress(_) => {
resp.remove_header(&CONTENT_ENCODING);
set_stream_headers(resp);
if !preserve_etag {
weaken_or_clear_etag(resp);
}
}
Action::Compress(a) => {
resp.insert_header(&CONTENT_ENCODING, HeaderValue::from_static(a.as_str()))
.unwrap();
set_stream_headers(resp);
if !preserve_etag {
weaken_or_clear_etag(resp);
}
}
}
}
#[test]
fn test_adjust_response_header() {
use Action::*;
use Algorithm::*;
// noop
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-length", "20").unwrap();
header.insert_header("content-encoding", "gzip").unwrap();
header.insert_header("accept-ranges", "bytes").unwrap();
header.insert_header("etag", "\"abc123\"").unwrap();
adjust_response_header(&mut header, &Noop, false);
assert_eq!(
header.headers.get("content-encoding").unwrap().as_bytes(),
b"gzip"
);
assert_eq!(
header.headers.get("content-length").unwrap().as_bytes(),
b"20"
);
assert_eq!(
header.headers.get("etag").unwrap().as_bytes(),
b"\"abc123\""
);
assert!(header.headers.get("transfer-encoding").is_none());
// decompress gzip
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-length", "20").unwrap();
header.insert_header("content-encoding", "gzip").unwrap();
header.insert_header("accept-ranges", "bytes").unwrap();
header.insert_header("etag", "\"abc123\"").unwrap();
adjust_response_header(&mut header, &Decompress(Gzip), false);
assert!(header.headers.get("content-encoding").is_none());
assert!(header.headers.get("content-length").is_none());
assert_eq!(
header.headers.get("transfer-encoding").unwrap().as_bytes(),
b"chunked"
);
assert!(header.headers.get("accept-ranges").is_none());
assert_eq!(
header.headers.get("etag").unwrap().as_bytes(),
b"W/\"abc123\""
);
// when preserve_etag on, strong etag is kept
header.insert_header("etag", "\"abc123\"").unwrap();
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v2/client.rs | pingora-core/src/protocols/http/v2/client.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP/2 client session and connection
// TODO: this module needs a refactor
use bytes::Bytes;
use futures::FutureExt;
use h2::client::{self, ResponseFuture, SendRequest};
use h2::{Reason, RecvStream, SendStream};
use http::HeaderMap;
use log::{debug, error, warn};
use pingora_error::{Error, ErrorType, ErrorType::*, OrErr, Result, RetryType};
use pingora_http::{RequestHeader, ResponseHeader};
use pingora_timeout::timeout;
use std::io::ErrorKind;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::task::{ready, Context, Poll};
use std::time::Duration;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::sync::watch;
use crate::connectors::http::v2::ConnectionRef;
use crate::protocols::{Digest, SocketAddr, UniqueIDType};
pub const PING_TIMEDOUT: ErrorType = ErrorType::new("PingTimedout");
pub struct Http2Session {
send_req: SendRequest<Bytes>,
send_body: Option<SendStream<Bytes>>,
resp_fut: Option<ResponseFuture>,
req_sent: Option<Box<RequestHeader>>,
response_header: Option<ResponseHeader>,
response_body_reader: Option<RecvStream>,
/// The read timeout, which will be applied to both reading the header and the body.
/// The timeout is reset on every read. This is not a timeout on the overall duration of the
/// response.
pub read_timeout: Option<Duration>,
/// The write timeout which will be applied to writing request body.
/// The timeout is reset on every write. This is not a timeout on the overall duration of the
/// request.
pub write_timeout: Option<Duration>,
pub conn: ConnectionRef,
// Indicate that whether a END_STREAM is already sent
ended: bool,
// Total DATA payload bytes received from upstream response
body_recv: usize,
}
impl Drop for Http2Session {
fn drop(&mut self) {
self.conn.release_stream();
}
}
impl Http2Session {
pub(crate) fn new(send_req: SendRequest<Bytes>, conn: ConnectionRef) -> Self {
Http2Session {
send_req,
send_body: None,
resp_fut: None,
req_sent: None,
response_header: None,
response_body_reader: None,
read_timeout: None,
write_timeout: None,
conn,
ended: false,
body_recv: 0,
}
}
fn sanitize_request_header(req: &mut RequestHeader) -> Result<()> {
req.set_version(http::Version::HTTP_2);
if req.uri.authority().is_some() {
return Ok(());
}
// use host header to populate :authority field
let Some(authority) = req.headers.get(http::header::HOST).map(|v| v.as_bytes()) else {
return Error::e_explain(InvalidHTTPHeader, "no authority header for h2");
};
let uri = http::uri::Builder::new()
.scheme("https") // fixed for now
.authority(authority)
.path_and_query(req.uri.path_and_query().as_ref().unwrap().as_str())
.build();
match uri {
Ok(uri) => {
req.set_uri(uri);
Ok(())
}
Err(_) => Error::e_explain(
InvalidHTTPHeader,
format!("invalid authority from host {authority:?}"),
),
}
}
/// Write the request header to the server
pub fn write_request_header(&mut self, mut req: Box<RequestHeader>, end: bool) -> Result<()> {
if self.req_sent.is_some() {
// cannot send again, TODO: warn
return Ok(());
}
Self::sanitize_request_header(&mut req)?;
let parts = req.as_owned_parts();
let request = http::Request::from_parts(parts, ());
// There is no write timeout for h2 because the actual write happens async from this fn
let (resp_fut, send_body) = self
.send_req
.send_request(request, end)
.or_err(H2Error, "while sending request")
.map_err(|e| self.handle_err(e))?;
self.req_sent = Some(req);
self.send_body = Some(send_body);
self.resp_fut = Some(resp_fut);
self.ended = self.ended || end;
Ok(())
}
/// Write a request body chunk
pub async fn write_request_body(&mut self, data: Bytes, end: bool) -> Result<()> {
if self.ended {
warn!("Try to write request body after end of stream, dropping the extra data");
return Ok(());
}
let body_writer = self
.send_body
.as_mut()
.expect("Try to write request body before sending request header");
super::write_body(body_writer, data, end, self.write_timeout)
.await
.map_err(|e| self.handle_err(e))?;
self.ended = self.ended || end;
Ok(())
}
/// Signal that the request body has ended
pub fn finish_request_body(&mut self) -> Result<()> {
if self.ended {
return Ok(());
}
let body_writer = self
.send_body
.as_mut()
.expect("Try to finish request stream before sending request header");
// Just send an empty data frame with end of stream set
body_writer
.send_data("".into(), true)
.or_err(WriteError, "while writing empty h2 request body")
.map_err(|e| self.handle_err(e))?;
self.ended = true;
Ok(())
}
/// Read the response header
pub async fn read_response_header(&mut self) -> Result<()> {
// TODO: how to read 1xx headers?
// https://github.com/hyperium/h2/issues/167
if self.response_header.is_some() {
panic!("H2 response header is already read")
}
let Some(resp_fut) = self.resp_fut.take() else {
panic!("Try to take response header, but it is already taken")
};
let res = match self.read_timeout {
Some(t) => timeout(t, resp_fut)
.await
.map_err(|_| Error::explain(ReadTimedout, "while reading h2 response header"))
.map_err(|e| self.handle_err(e))?,
None => resp_fut.await,
};
let (resp, body_reader) = res.map_err(handle_read_header_error)?.into_parts();
self.response_header = Some(resp.into());
self.response_body_reader = Some(body_reader);
Ok(())
}
#[doc(hidden)]
pub fn poll_read_response_header(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), h2::Error>> {
if self.response_header.is_some() {
panic!("H2 response header is already read")
}
let Some(mut resp_fut) = self.resp_fut.take() else {
panic!("Try to take response header, but it is already taken")
};
let res = match resp_fut.poll_unpin(cx) {
Poll::Ready(Ok(res)) => res,
Poll::Ready(Err(err)) => return Poll::Ready(Err(err)),
Poll::Pending => {
self.resp_fut = Some(resp_fut);
return Poll::Pending;
}
};
let (resp, body_reader) = res.into_parts();
self.response_header = Some(resp.into());
self.response_body_reader = Some(body_reader);
Poll::Ready(Ok(()))
}
/// Read the response body
///
/// `None` means, no more body to read
pub async fn read_response_body(&mut self) -> Result<Option<Bytes>> {
let Some(body_reader) = self.response_body_reader.as_mut() else {
// req is not sent or response is already read
// TODO: warn
return Ok(None);
};
let fut = body_reader.data();
let res = match self.read_timeout {
Some(t) => timeout(t, fut)
.await
.map_err(|_| Error::explain(ReadTimedout, "while reading h2 response body"))?,
None => fut.await,
};
let body = res
.transpose()
.or_err(ReadError, "while read h2 response body")
.map_err(|mut e| {
// cannot use handle_err() because of borrow checker
if self.conn.ping_timedout() {
e.etype = PING_TIMEDOUT;
}
e
})?;
if let Some(data) = body.as_ref() {
body_reader
.flow_control()
.release_capacity(data.len())
.or_err(ReadError, "while releasing h2 response body capacity")?;
self.body_recv = self.body_recv.saturating_add(data.len());
}
Ok(body)
}
#[doc(hidden)]
pub fn poll_read_response_body(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, h2::Error>>> {
let Some(body_reader) = self.response_body_reader.as_mut() else {
// req is not sent or response is already read
// TODO: warn
return Poll::Ready(None);
};
let data = match ready!(body_reader.poll_data(cx)).transpose() {
Ok(data) => data,
Err(err) => return Poll::Ready(Some(Err(err))),
};
if let Some(data) = data {
body_reader.flow_control().release_capacity(data.len())?;
return Poll::Ready(Some(Ok(data)));
}
Poll::Ready(None)
}
/// Whether the response has ended
pub fn response_finished(&self) -> bool {
// if response_body_reader doesn't exist, the response is not even read yet
self.response_body_reader
.as_ref()
.is_some_and(|reader| reader.is_end_stream())
}
/// Check whether stream finished with error.
/// Like `response_finished`, but also attempts to poll the h2 stream for errors that may have
/// caused the stream to terminate, and returns them as `H2Error`s.
pub fn check_response_end_or_error(&mut self) -> Result<bool> {
let Some(reader) = self.response_body_reader.as_mut() else {
// response is not even read
return Ok(false);
};
if !reader.is_end_stream() {
return Ok(false);
}
// https://github.com/hyperium/h2/issues/806
// The fundamental issue is that h2::RecvStream may return `is_end_stream` true
// when the stream was naturally closed via END_STREAM /OR/ if there was an error
// while reading data frames that forced the closure.
// The h2 API as-is makes it difficult to determine which situation is occurring.
//
// `poll_data` should be returning None after `is_end_stream`, if the stream
// is truly expecting no more data to be sent.
// https://docs.rs/h2/latest/h2/struct.RecvStream.html#method.is_end_stream
// So poll the data once to check this condition. If an error is returned, that indicates
// that the stream closed due to an error e.g. h2 protocol error.
//
// tokio::task::unconstrained because now_or_never may yield None when the future is ready
match tokio::task::unconstrained(reader.data()).now_or_never() {
Some(None) => Ok(true),
Some(Some(Ok(_))) => Error::e_explain(H2Error, "unexpected data after end stream"),
Some(Some(Err(e))) => Error::e_because(H2Error, "while checking end stream", e),
None => {
// RecvStream data() should be ready to poll after the stream ends,
// this indicates an unexpected change in the h2 crate
panic!("data() not ready after end stream")
}
}
}
/// Read the optional trailer headers
pub async fn read_trailers(&mut self) -> Result<Option<HeaderMap>> {
let Some(reader) = self.response_body_reader.as_mut() else {
// response is not even read
// TODO: warn
return Ok(None);
};
let fut = reader.trailers();
let res = match self.read_timeout {
Some(t) => timeout(t, fut)
.await
.map_err(|_| Error::explain(ReadTimedout, "while reading h2 trailer"))
.map_err(|e| self.handle_err(e))?,
None => fut.await,
};
match res {
Ok(t) => Ok(t),
Err(e) => {
// GOAWAY with no error: this is graceful shutdown, continue as if no trailer
// RESET_STREAM with no error: https://datatracker.ietf.org/doc/html/rfc9113#section-8.1:
// this is to signal client to stop uploading request without breaking the response.
// TODO: should actually stop uploading
// TODO: should we try reading again?
// TODO: handle this when reading headers and body as well
// https://github.com/hyperium/h2/issues/741
if (e.is_go_away() || e.is_reset())
&& e.is_remote()
&& e.reason() == Some(Reason::NO_ERROR)
{
Ok(None)
} else {
Err(e)
}
}
}
.or_err(ReadError, "while reading h2 trailers")
}
/// The request header if it is already sent
pub fn request_header(&self) -> Option<&RequestHeader> {
self.req_sent.as_deref()
}
/// The response header if it is already read
pub fn response_header(&self) -> Option<&ResponseHeader> {
self.response_header.as_ref()
}
/// Give up the http session abruptly.
pub fn shutdown(&mut self) {
if !self.ended || !self.response_finished() {
if let Some(send_body) = self.send_body.as_mut() {
send_body.send_reset(h2::Reason::INTERNAL_ERROR)
}
}
}
/// Drop everything in this h2 stream. Return the connection ref.
/// After this function the underlying h2 connection should already notify the closure of this
/// stream so that another stream can be created if needed.
pub(crate) fn conn(&self) -> ConnectionRef {
self.conn.clone()
}
/// Whether ping timeout occurred. After a ping timeout, the h2 connection will be terminated.
/// Ongoing h2 streams will receive an stream/connection error. The streams should check this
/// flag to tell whether the error is triggered by the timeout.
pub(crate) fn ping_timedout(&self) -> bool {
self.conn.ping_timedout()
}
/// Return the [Digest] of the connection
///
/// For reused connection, the timing in the digest will reflect its initial handshakes
/// The caller should check if the connection is reused to avoid misuse the timing field.
pub fn digest(&self) -> Option<&Digest> {
Some(self.conn.digest())
}
/// Return a mutable [Digest] reference for the connection
///
/// Will return `None` if multiple H2 streams are open.
pub fn digest_mut(&mut self) -> Option<&mut Digest> {
self.conn.digest_mut()
}
/// Return the server (peer) address recorded in the connection digest.
pub fn server_addr(&self) -> Option<&SocketAddr> {
self.conn
.digest()
.socket_digest
.as_ref()
.map(|d| d.peer_addr())?
}
/// Return the client (local) address recorded in the connection digest.
pub fn client_addr(&self) -> Option<&SocketAddr> {
self.conn
.digest()
.socket_digest
.as_ref()
.map(|d| d.local_addr())?
}
/// the FD of the underlying connection
pub fn fd(&self) -> UniqueIDType {
self.conn.id()
}
/// Upstream response body bytes received (HTTP/2 DATA payload; excludes headers/framing).
pub fn body_bytes_received(&self) -> usize {
self.body_recv
}
/// take the body sender to another task to perform duplex read and write
pub fn take_request_body_writer(&mut self) -> Option<SendStream<Bytes>> {
self.send_body.take()
}
fn handle_err(&self, mut e: Box<Error>) -> Box<Error> {
if self.ping_timedout() {
e.etype = PING_TIMEDOUT;
}
// is_go_away: retry via another connection, this connection is being teardown
// should retry
if self.response_header.is_none() {
if let Some(err) = e.root_cause().downcast_ref::<h2::Error>() {
if err.is_go_away()
&& err.is_remote()
&& (err.reason() == Some(h2::Reason::NO_ERROR))
{
e.retry = true.into();
}
}
}
e
}
}
/* helper functions */
/* Types of errors during h2 header read
1. peer requests to downgrade to h1, mostly IIS server for NTLM: we will downgrade and retry
2. peer sends invalid h2 frames, usually sending h1 only header: we will downgrade and retry
3. peer sends GO_AWAY(NO_ERROR) connection is being shut down: we will retry
4. peer IO error on reused conn, usually firewall kills old conn: we will retry
5. peer sends REFUSED_STREAM on RST_STREAM, this is safe to retry
6. All other errors will terminate the request
*/
fn handle_read_header_error(e: h2::Error) -> Box<Error> {
if e.is_remote() && (e.reason() == Some(h2::Reason::HTTP_1_1_REQUIRED)) {
let mut err = Error::because(H2Downgrade, "while reading h2 header", e);
err.retry = true.into();
err
} else if e.is_go_away() && e.is_library() && (e.reason() == Some(h2::Reason::PROTOCOL_ERROR)) {
// remote send invalid H2 responses
let mut err = Error::because(InvalidH2, "while reading h2 header", e);
err.retry = true.into();
err
} else if e.is_go_away() && e.is_remote() && (e.reason() == Some(h2::Reason::NO_ERROR)) {
// is_go_away: retry via another connection, this connection is being teardown
let mut err = Error::because(H2Error, "while reading h2 header", e);
err.retry = true.into();
err
} else if e.is_reset() && e.is_remote() && (e.reason() == Some(h2::Reason::REFUSED_STREAM)) {
// The REFUSED_STREAM error code can be included in a RST_STREAM frame to indicate
// that the stream is being closed prior to any processing having occurred.
// Any request that was sent on the reset stream can be safely retried.
// https://datatracker.ietf.org/doc/html/rfc9113#section-8.7
let mut err = Error::because(H2Error, "while reading h2 header", e);
err.retry = true.into();
err
} else if e.is_io() {
// is_io: typical if a previously reused connection silently drops it
// only retry if the connection is reused
// safety: e.get_io() will always succeed if e.is_io() is true
let io_err = e.get_io().expect("checked is io");
// for h2 hyperium raw_os_error() will be None unless this is a new connection
// where we handshake() and from_io() is called, check ErrorKind explicitly with true_io_error
let true_io_error = io_err.raw_os_error().is_some()
|| matches!(
io_err.kind(),
ErrorKind::ConnectionReset | ErrorKind::TimedOut | ErrorKind::BrokenPipe
);
let mut err = Error::because(ReadError, "while reading h2 header", e);
if true_io_error {
err.retry = RetryType::ReusedOnly;
} // else could be TLS error, which is unsafe to retry
err
} else {
Error::because(H2Error, "while reading h2 header", e)
}
}
use tokio::sync::oneshot;
pub async fn drive_connection<S>(
mut c: client::Connection<S>,
id: UniqueIDType,
closed: watch::Sender<bool>,
ping_interval: Option<Duration>,
ping_timeout_occurred: Arc<AtomicBool>,
) where
S: AsyncRead + AsyncWrite + Send + Unpin,
{
let interval = ping_interval.unwrap_or(Duration::ZERO);
if !interval.is_zero() {
// for ping to inform this fn to drop the connection
let (tx, rx) = oneshot::channel::<()>();
// for this fn to inform ping to give up when it is already dropped
let dropped = Arc::new(AtomicBool::new(false));
let dropped2 = dropped.clone();
if let Some(ping_pong) = c.ping_pong() {
pingora_runtime::current_handle().spawn(async move {
do_ping_pong(ping_pong, interval, tx, dropped2, id).await;
});
} else {
warn!("Cannot get ping-pong handler from h2 connection");
}
tokio::select! {
r = c => match r {
Ok(_) => debug!("H2 connection finished fd: {id}"),
Err(e) => debug!("H2 connection fd: {id} errored: {e:?}"),
},
r = rx => match r {
Ok(_) => {
ping_timeout_occurred.store(true, Ordering::Relaxed);
warn!("H2 connection Ping timeout/Error fd: {id}, closing conn");
},
Err(e) => warn!("H2 connection Ping Rx error {e:?}"),
},
};
dropped.store(true, Ordering::Relaxed);
} else {
match c.await {
Ok(_) => debug!("H2 connection finished fd: {id}"),
Err(e) => debug!("H2 connection fd: {id} errored: {e:?}"),
}
}
let _ = closed.send(true);
}
const PING_TIMEOUT: Duration = Duration::from_secs(5);
async fn do_ping_pong(
mut ping_pong: h2::PingPong,
interval: Duration,
tx: oneshot::Sender<()>,
dropped: Arc<AtomicBool>,
id: UniqueIDType,
) {
// delay before sending the first ping, no need to race with the first request
tokio::time::sleep(interval).await;
loop {
if dropped.load(Ordering::Relaxed) {
break;
}
let ping_fut = ping_pong.ping(h2::Ping::opaque());
debug!("H2 fd: {id} ping sent");
match tokio::time::timeout(PING_TIMEOUT, ping_fut).await {
Err(_) => {
error!("H2 fd: {id} ping timeout");
let _ = tx.send(());
break;
}
Ok(r) => match r {
Ok(_) => {
debug!("H2 fd: {} pong received", id);
tokio::time::sleep(interval).await;
}
Err(e) => {
if dropped.load(Ordering::Relaxed) {
// drive_connection() exits first, no need to error again
break;
}
error!("H2 fd: {id} ping error: {e}");
let _ = tx.send(());
break;
}
},
}
}
}
#[cfg(test)]
mod tests_h2 {
use super::*;
use bytes::Bytes;
use http::{Response, StatusCode};
use tokio::io::duplex;
#[tokio::test]
async fn h2_body_bytes_received_multi_frames() {
let (client_io, server_io) = duplex(65536);
// Server: respond with two DATA frames "a" and "bc"
tokio::spawn(async move {
let mut conn = h2::server::handshake(server_io).await.unwrap();
if let Some(result) = conn.accept().await {
let (req, mut send_resp) = result.unwrap();
assert_eq!(req.method(), http::Method::GET);
let resp = Response::builder().status(StatusCode::OK).body(()).unwrap();
let mut send_stream = send_resp.send_response(resp, false).unwrap();
send_stream.send_data(Bytes::from("a"), false).unwrap();
send_stream.send_data(Bytes::from("bc"), true).unwrap();
// Signal graceful shutdown so the accept loop can exit after the client finishes
conn.graceful_shutdown();
}
// Drive the server connection until the client closes
while let Some(_res) = conn.accept().await {}
});
// Client: build Http2Session and read response
let (send_req, connection) = h2::client::handshake(client_io).await.unwrap();
let (closed_tx, closed_rx) = tokio::sync::watch::channel(false);
let ping_timeout = Arc::new(AtomicBool::new(false));
tokio::spawn(async move {
let _ = connection.await;
let _ = closed_tx.send(true);
});
let digest = Digest::default();
let conn_ref = crate::connectors::http::v2::ConnectionRef::new(
send_req.clone(),
closed_rx,
ping_timeout,
0,
1,
digest,
);
let mut h2s = Http2Session::new(send_req, conn_ref);
// minimal request
let mut req = RequestHeader::build("GET", b"/", None).unwrap();
req.insert_header(http::header::HOST, "example.com")
.unwrap();
h2s.write_request_header(Box::new(req), true).unwrap();
h2s.read_response_header().await.unwrap();
let mut total = 0;
while let Some(chunk) = h2s.read_response_body().await.unwrap() {
total += chunk.len();
}
assert_eq!(total, 3);
assert_eq!(h2s.body_bytes_received(), 3);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v2/mod.rs | pingora-core/src/protocols/http/v2/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP/2 implementation
use std::time::Duration;
use crate::{Error, ErrorType::*, OrErr, Result};
use pingora_timeout::timeout;
use bytes::Bytes;
use h2::SendStream;
pub mod client;
pub mod server;
async fn reserve_and_send(
writer: &mut SendStream<Bytes>,
remaining: &mut Bytes,
end: bool,
) -> Result<()> {
// reserve remaining bytes then wait
writer.reserve_capacity(remaining.len());
let res = std::future::poll_fn(|cx| writer.poll_capacity(cx)).await;
match res {
None => Error::e_explain(H2Error, "cannot reserve capacity"),
Some(ready) => {
let n = ready.or_err(H2Error, "while waiting for capacity")?;
let remaining_size = remaining.len();
let data_to_send = remaining.split_to(std::cmp::min(remaining_size, n));
writer
.send_data(data_to_send, remaining.is_empty() && end)
.or_err(WriteError, "while writing h2 request body")?;
Ok(())
}
}
}
/// A helper function to write the body of h2 streams.
pub async fn write_body(
writer: &mut SendStream<Bytes>,
data: Bytes,
end: bool,
write_timeout: Option<Duration>,
) -> Result<()> {
let mut remaining = data;
// Cannot poll 0 capacity, so send it directly.
if remaining.is_empty() {
writer
.send_data(remaining, end)
.or_err(WriteError, "while writing h2 request body")?;
return Ok(());
}
loop {
match write_timeout {
Some(t) => match timeout(t, reserve_and_send(writer, &mut remaining, end)).await {
Ok(res) => res?,
Err(_) => Error::e_explain(
WriteTimedout,
format!("while writing h2 request body, timeout: {t:?}"),
)?,
},
None => {
reserve_and_send(writer, &mut remaining, end).await?;
}
}
if remaining.is_empty() {
return Ok(());
}
}
}
#[cfg(test)]
mod test {
use std::{sync::Arc, time::Duration};
use bytes::Bytes;
use futures::SinkExt;
use h2::frame::*;
use http::{HeaderMap, Method, Uri};
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt, DuplexStream};
use tokio_stream::StreamExt;
use pingora_http::{RequestHeader, ResponseHeader};
use pingora_timeout::sleep;
use crate::protocols::{
http::v2::server::{handshake, HttpSession},
Digest,
};
#[tokio::test]
async fn test_client_write_timeout() {
let mut handles = vec![];
let (client, mut server) = duplex(65536);
// Client
handles.push(tokio::spawn(async move {
let conn = crate::connectors::http::v2::handshake(Box::new(client), 500, None)
.await
.unwrap();
let mut h2_stream = conn.spawn_stream().await.unwrap().unwrap();
h2_stream.write_timeout = Some(Duration::from_millis(100));
let mut request = RequestHeader::build("GET", b"/", None).unwrap();
request.insert_header("Host", "one.one.one.one").unwrap();
h2_stream
.write_request_header(Box::new(request), false)
.unwrap();
h2_stream.read_response_header().await.unwrap();
assert_eq!(h2_stream.response_header().unwrap().status.as_u16(), 200);
let err = h2_stream
.write_request_body(Bytes::from_static(b"client body"), true)
.await
.err()
.unwrap();
assert_eq!(err.etype(), &pingora_error::ErrorType::WriteTimedout);
}));
// Server
handles.push(tokio::spawn(async move {
// 0. Prepare outbound frames
let mut outbound: Vec<h2::frame::Frame<Bytes>> = Vec::new();
let mut settings = Settings::default();
settings.set_initial_window_size(Some(1));
settings.set_max_concurrent_streams(Some(1));
outbound.push(settings.into());
outbound.push(Settings::ack().into());
let headers = HeaderMap::new();
outbound.push(
Headers::new(1.into(), Pseudo::response(http::StatusCode::OK), headers).into(),
);
outbound.push(WindowUpdate::new(1.into(), 10000).into());
// 1. Read preface from the client
server.read_exact(&mut [0u8; 24]).await.unwrap();
let mut server: h2::Codec<DuplexStream, Bytes> = h2::Codec::new(server);
// 2. Drain client's frames
for _ in 0..3 {
_ = server.next().await.unwrap();
}
// 3. Send frames
for (i, frame) in outbound.into_iter().enumerate() {
if i == 3 {
// Delay WindowUpdate to trigger client side write timeout on capacity await
sleep(Duration::from_millis(200)).await;
}
_ = server.send(frame).await;
}
}));
for handle in handles {
// ensure no panics
assert!(handle.await.is_ok());
}
}
#[tokio::test]
async fn test_server_write_timeout() {
let mut handles = vec![];
let (mut client, server) = duplex(65536);
// Client
handles.push(tokio::spawn(async move {
// 0. Prepare outbound frames
let mut outbound: Vec<h2::frame::Frame<Bytes>> = Vec::new();
let mut settings = Settings::default();
settings.set_initial_window_size(Some(1));
settings.set_max_concurrent_streams(Some(1));
outbound.push(settings.into());
outbound.push(Settings::ack().into());
let mut headers = Headers::new(
1.into(),
Pseudo::request(
Method::GET,
Uri::from_static("https://one.one.one.one"),
None,
),
HeaderMap::new(),
);
headers.set_end_headers();
outbound.push(headers.into());
outbound.push(WindowUpdate::new(1.into(), 10000).into());
// 1. Write h2 preface
client
.write_all(b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")
.await
.unwrap();
// 2. Send frames
let mut client: h2::Codec<DuplexStream, Bytes> = h2::Codec::new(client);
for (i, frame) in outbound.into_iter().enumerate() {
if i == 3 {
// Delay WindowUpdate to trigger server side write timeout on capacity await
sleep(Duration::from_millis(200)).await;
}
_ = client.send(frame).await;
}
// 3. Drain server's frames
for _ in 0..3 {
_ = client.next().await.unwrap();
}
}));
// Server
let mut connection = handshake(Box::new(server), None).await.unwrap();
let digest = Arc::new(Digest::default());
while let Some(mut h2_stream) = HttpSession::from_h2_conn(&mut connection, digest.clone())
.await
.unwrap()
{
handles.push(tokio::spawn(async move {
h2_stream.set_write_timeout(Some(Duration::from_millis(100)));
let req = h2_stream.req_header();
assert_eq!(req.method, Method::GET);
let response_header = Box::new(ResponseHeader::build(200, None).unwrap());
assert!(h2_stream
.write_response_header(response_header.clone(), false)
.is_ok());
let err = h2_stream
.write_body(Bytes::from_static(b"server body"), true)
.await
.err()
.unwrap();
assert_eq!(err.etype(), &pingora_error::ErrorType::WriteTimedout);
}));
}
for handle in handles {
// ensure no panics
assert!(handle.await.is_ok());
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/protocols/http/v2/server.rs | pingora-core/src/protocols/http/v2/server.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP/2 server session
use bytes::Bytes;
use futures::Future;
use h2::server;
use h2::server::SendResponse;
use h2::{RecvStream, SendStream};
use http::header::HeaderName;
use http::uri::PathAndQuery;
use http::{header, HeaderMap, Response};
use log::{debug, warn};
use pingora_http::{RequestHeader, ResponseHeader};
use pingora_timeout::timeout;
use std::sync::Arc;
use std::task::ready;
use std::time::Duration;
use crate::protocols::http::body_buffer::FixedBuffer;
use crate::protocols::http::date::get_cached_date;
use crate::protocols::http::v1::client::http_req_header_to_wire;
use crate::protocols::http::HttpTask;
use crate::protocols::{Digest, SocketAddr, Stream};
use crate::{Error, ErrorType, OrErr, Result};
const BODY_BUF_LIMIT: usize = 1024 * 64;
type H2Connection<S> = server::Connection<S, Bytes>;
pub use h2::server::Builder as H2Options;
/// Perform HTTP/2 connection handshake with an established (TLS) connection.
///
/// The optional `options` allow to adjust certain HTTP/2 parameters and settings.
/// See [`H2Options`] for more details.
pub async fn handshake(io: Stream, options: Option<H2Options>) -> Result<H2Connection<Stream>> {
let options = options.unwrap_or_default();
let res = options.handshake(io).await;
match res {
Ok(connection) => {
debug!("H2 handshake done.");
Ok(connection)
}
Err(e) => Error::e_because(
ErrorType::HandshakeError,
"while h2 handshaking with client",
e,
),
}
}
use futures::task::Context;
use futures::task::Poll;
use std::pin::Pin;
/// The future to poll for an idle session.
///
/// Calling `.await` in this object will not return until the client decides to close this stream.
pub struct Idle<'a>(&'a mut HttpSession);
impl Future for Idle<'_> {
type Output = Result<h2::Reason>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(body_writer) = self.0.send_response_body.as_mut() {
body_writer.poll_reset(cx)
} else {
self.0.send_response.poll_reset(cx)
}
.map_err(|e| Error::because(ErrorType::H2Error, "downstream error while idling", e))
}
}
/// HTTP/2 server session
pub struct HttpSession {
request_header: RequestHeader,
request_body_reader: RecvStream,
send_response: SendResponse<Bytes>,
send_response_body: Option<SendStream<Bytes>>,
// Remember what has been written
response_written: Option<Box<ResponseHeader>>,
// Indicate that whether a END_STREAM is already sent
// in order to tell whether needs to send one extra FRAME when this response finishes
ended: bool,
// How many (application, not wire) request body bytes have been read so far.
body_read: usize,
// How many (application, not wire) response body bytes have been sent so far.
body_sent: usize,
// buffered request body for retry logic
retry_buffer: Option<FixedBuffer>,
// digest to record underlying connection info
digest: Arc<Digest>,
/// The write timeout which will be applied to writing response body.
/// The timeout is reset on every write. This is not a timeout on the overall duration of the
/// response.
pub write_timeout: Option<Duration>,
// How long to wait when draining (discarding) request body
total_drain_timeout: Option<Duration>,
}
impl HttpSession {
/// Create a new [`HttpSession`] from the HTTP/2 connection.
/// This function returns a new HTTP/2 session when the provided HTTP/2 connection, `conn`,
/// establishes a new HTTP/2 stream to this server.
///
/// A [`Digest`] from the IO stream is also stored in the resulting session, since the
/// session doesn't have access to the underlying stream (and the stream itself isn't
/// accessible from the `h2::server::Connection`).
///
/// Note: in order to handle all **existing** and new HTTP/2 sessions, the server must call
/// this function in a loop until the client decides to close the connection.
///
/// `None` will be returned when the connection is closing so that the loop can exit.
///
pub async fn from_h2_conn(
conn: &mut H2Connection<Stream>,
digest: Arc<Digest>,
) -> Result<Option<Self>> {
// NOTE: conn.accept().await is what drives the entire connection.
let res = conn.accept().await.transpose().or_err(
ErrorType::H2Error,
"while accepting new downstream requests",
)?;
Ok(res.map(|(req, send_response)| {
let (request_header, request_body_reader) = req.into_parts();
HttpSession {
request_header: request_header.into(),
request_body_reader,
send_response,
send_response_body: None,
response_written: None,
ended: false,
body_read: 0,
body_sent: 0,
retry_buffer: None,
digest,
write_timeout: None,
total_drain_timeout: None,
}
}))
}
/// The request sent from the client
///
/// Different from its HTTP/1.X counterpart, this function never panics as the request is already
/// read when established a new HTTP/2 stream.
pub fn req_header(&self) -> &RequestHeader {
&self.request_header
}
/// A mutable reference to request sent from the client
///
/// Different from its HTTP/1.X counterpart, this function never panics as the request is already
/// read when established a new HTTP/2 stream.
pub fn req_header_mut(&mut self) -> &mut RequestHeader {
&mut self.request_header
}
/// Read request body bytes. `None` when there is no more body to read.
pub async fn read_body_bytes(&mut self) -> Result<Option<Bytes>> {
// TODO: timeout
let data = self.request_body_reader.data().await.transpose().or_err(
ErrorType::ReadError,
"while reading downstream request body",
)?;
if let Some(data) = data.as_ref() {
self.body_read += data.len();
if let Some(buffer) = self.retry_buffer.as_mut() {
buffer.write_to_buffer(data);
}
let _ = self
.request_body_reader
.flow_control()
.release_capacity(data.len());
}
Ok(data)
}
#[doc(hidden)]
pub fn poll_read_body_bytes(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, h2::Error>>> {
let data = match ready!(self.request_body_reader.poll_data(cx)).transpose() {
Ok(data) => data,
Err(err) => return Poll::Ready(Some(Err(err))),
};
if let Some(data) = data {
self.body_read += data.len();
self.request_body_reader
.flow_control()
.release_capacity(data.len())?;
return Poll::Ready(Some(Ok(data)));
}
Poll::Ready(None)
}
async fn do_drain_request_body(&mut self) -> Result<()> {
loop {
match self.read_body_bytes().await {
Ok(Some(_)) => { /* continue to drain */ }
Ok(None) => return Ok(()), // done
Err(e) => return Err(e),
}
}
}
/// Drain the request body. `Ok(())` when there is no (more) body to read.
// NOTE for h2 it may be worth allowing cancellation of the stream via reset.
pub async fn drain_request_body(&mut self) -> Result<()> {
if self.is_body_done() {
return Ok(());
}
match self.total_drain_timeout {
Some(t) => match timeout(t, self.do_drain_request_body()).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ErrorType::ReadTimedout,
format!("draining body, timeout: {t:?}"),
),
},
None => self.do_drain_request_body().await,
}
}
/// Sets the downstream write timeout. This will trigger if we're unable
/// to write to the stream after `timeout`.
pub fn set_write_timeout(&mut self, timeout: Option<Duration>) {
self.write_timeout = timeout;
}
/// Get the write timeout.
pub fn get_write_timeout(&self) -> Option<Duration> {
self.write_timeout
}
/// Sets the total drain timeout. This `timeout` will be used while draining
/// the request body.
pub fn set_total_drain_timeout(&mut self, timeout: Option<Duration>) {
self.total_drain_timeout = timeout;
}
/// Get the total drain timeout.
pub fn get_total_drain_timeout(&self) -> Option<Duration> {
self.total_drain_timeout
}
// the write_* don't have timeouts because the actual writing happens on the connection
// not here.
/// Write the response header to the client.
/// # the `end` flag
/// `end` marks the end of this session.
/// If the `end` flag is set, no more header or body can be sent to the client.
pub fn write_response_header(
&mut self,
mut header: Box<ResponseHeader>,
end: bool,
) -> Result<()> {
if self.ended {
// TODO: error or warn?
return Ok(());
}
if header.status.is_informational() {
// ignore informational response 1xx header because send_response() can only be called once
// https://github.com/hyperium/h2/issues/167
debug!("ignoring informational headers");
return Ok(());
}
if self.response_written.as_ref().is_some() {
warn!("Response header is already sent, cannot send again");
return Ok(());
}
/* update headers */
header.insert_header(header::DATE, get_cached_date())?;
// remove other h1 hop headers that cannot be present in H2
// https://httpwg.org/specs/rfc7540.html#n-connection-specific-header-fields
header.remove_header(&header::TRANSFER_ENCODING);
header.remove_header(&header::CONNECTION);
header.remove_header(&header::UPGRADE);
header.remove_header(&HeaderName::from_static("keep-alive"));
header.remove_header(&HeaderName::from_static("proxy-connection"));
let resp = Response::from_parts(header.as_owned_parts(), ());
let body_writer = self.send_response.send_response(resp, end).or_err(
ErrorType::WriteError,
"while writing h2 response to downstream",
)?;
self.response_written = Some(header);
self.send_response_body = Some(body_writer);
self.ended = self.ended || end;
Ok(())
}
/// Write response body to the client. See [Self::write_response_header] for how to use `end`.
pub async fn write_body(&mut self, data: Bytes, end: bool) -> Result<()> {
match self.write_timeout {
Some(t) => match timeout(t, self.do_write_body(data, end)).await {
Ok(res) => res,
Err(_) => Error::e_explain(
ErrorType::WriteTimedout,
format!("writing body, timeout: {t:?}"),
),
},
None => self.do_write_body(data, end).await,
}
}
async fn do_write_body(&mut self, data: Bytes, end: bool) -> Result<()> {
if self.ended {
// NOTE: in h1, we also track to see if content-length matches the data
// We have not tracked that in h2
warn!("Try to write body after end of stream, dropping the extra data");
return Ok(());
}
let Some(writer) = self.send_response_body.as_mut() else {
return Err(Error::explain(
ErrorType::H2Error,
"try to send body before header is sent",
));
};
let data_len = data.len();
super::write_body(writer, data, end, self.write_timeout)
.await
.map_err(|e| e.into_down())?;
self.body_sent += data_len;
self.ended = self.ended || end;
Ok(())
}
/// Write response trailers to the client, this also closes the stream.
pub fn write_trailers(&mut self, trailers: HeaderMap) -> Result<()> {
if self.ended {
warn!("Tried to write trailers after end of stream, dropping them");
return Ok(());
}
let Some(writer) = self.send_response_body.as_mut() else {
return Err(Error::explain(
ErrorType::H2Error,
"try to send trailers before header is sent",
));
};
writer.send_trailers(trailers).or_err(
ErrorType::WriteError,
"while writing h2 response trailers to downstream",
)?;
// sending trailers closes the stream
self.ended = true;
Ok(())
}
/// Similar to [Self::write_response_header], this function takes a reference instead
pub fn write_response_header_ref(&mut self, header: &ResponseHeader, end: bool) -> Result<()> {
self.write_response_header(Box::new(header.clone()), end)
}
// TODO: trailer
/// Mark the session end. If no `end` flag is already set before this call, this call will
/// signal the client. Otherwise this call does nothing.
///
/// Dropping this object without sending `end` will cause an error to the client, which will cause
/// the client to treat this session as bad or incomplete.
pub fn finish(&mut self) -> Result<()> {
if self.ended {
// already ended the stream
return Ok(());
}
if let Some(writer) = self.send_response_body.as_mut() {
// use an empty data frame to signal the end
writer.send_data("".into(), true).or_err(
ErrorType::WriteError,
"while writing h2 response body to downstream",
)?;
self.ended = true;
};
// else: the response header is not sent, do nothing now.
// When send_response_body is dropped, an RST_STREAM will be sent
Ok(())
}
pub async fn response_duplex_vec(&mut self, tasks: Vec<HttpTask>) -> Result<bool> {
let mut end_stream = false;
for task in tasks.into_iter() {
end_stream = match task {
HttpTask::Header(header, end) => {
self.write_response_header(header, end)
.map_err(|e| e.into_down())?;
end
}
HttpTask::Body(data, end) => match data {
Some(d) => {
if !d.is_empty() {
self.write_body(d, end).await.map_err(|e| e.into_down())?;
}
end
}
None => end,
},
HttpTask::Trailer(Some(trailers)) => {
self.write_trailers(*trailers)?;
true
}
HttpTask::Trailer(None) => true,
HttpTask::Done => true,
HttpTask::Failed(e) => {
return Err(e);
}
} || end_stream // safe guard in case `end` in tasks flips from true to false
}
if end_stream {
// no-op if finished already
self.finish().map_err(|e| e.into_down())?;
}
Ok(end_stream)
}
/// Return a string `$METHOD $PATH, Host: $HOST`. Mostly for logging and debug purpose
pub fn request_summary(&self) -> String {
format!(
"{} {}, Host: {}:{}",
self.request_header.method,
self.request_header
.uri
.path_and_query()
.map(PathAndQuery::as_str)
.unwrap_or_default(),
self.request_header.uri.host().unwrap_or_default(),
self.req_header()
.uri
.port()
.as_ref()
.map(|port| port.as_str())
.unwrap_or_default()
)
}
/// Return the written response header. `None` if it is not written yet.
pub fn response_written(&self) -> Option<&ResponseHeader> {
self.response_written.as_deref()
}
/// Give up the stream abruptly.
///
/// This will send a `INTERNAL_ERROR` stream error to the client
pub fn shutdown(&mut self) {
if !self.ended {
self.send_response.send_reset(h2::Reason::INTERNAL_ERROR);
}
}
#[doc(hidden)]
pub fn take_response_body_writer(&mut self) -> Option<SendStream<Bytes>> {
self.send_response_body.take()
}
// This is a hack for pingora-proxy to create subrequests from h2 server session
// TODO: be able to convert from h2 to h1 subrequest
pub fn pseudo_raw_h1_request_header(&self) -> Bytes {
let buf = http_req_header_to_wire(&self.request_header).unwrap(); // safe, None only when version unknown
buf.freeze()
}
/// Whether there is no more body to read
pub fn is_body_done(&self) -> bool {
// Check no body in request
// Also check we hit end of stream
self.is_body_empty() || self.request_body_reader.is_end_stream()
}
/// Whether there is any body to read. true means there no body in request.
pub fn is_body_empty(&self) -> bool {
self.body_read == 0
&& (self.request_body_reader.is_end_stream()
|| self
.request_header
.headers
.get(header::CONTENT_LENGTH)
.is_some_and(|cl| cl.as_bytes() == b"0"))
}
pub fn retry_buffer_truncated(&self) -> bool {
self.retry_buffer
.as_ref()
.map_or_else(|| false, |r| r.is_truncated())
}
pub fn enable_retry_buffering(&mut self) {
if self.retry_buffer.is_none() {
self.retry_buffer = Some(FixedBuffer::new(BODY_BUF_LIMIT))
}
}
pub fn get_retry_buffer(&self) -> Option<Bytes> {
self.retry_buffer.as_ref().and_then(|b| {
if b.is_truncated() {
None
} else {
b.get_buffer()
}
})
}
/// `async fn idle() -> Result<Reason, Error>;`
/// This async fn will be pending forever until the client closes the stream/connection
/// This function is used for watching client status so that the server is able to cancel
/// its internal tasks as the client waiting for the tasks goes away
pub fn idle(&mut self) -> Idle<'_> {
Idle(self)
}
/// Similar to `read_body_bytes()` but will be pending after Ok(None) is returned,
/// until the client closes the connection
pub async fn read_body_or_idle(&mut self, no_body_expected: bool) -> Result<Option<Bytes>> {
if no_body_expected || self.is_body_done() {
let reason = self.idle().await?;
Error::e_explain(
ErrorType::H2Error,
format!("Client closed H2, reason: {reason}"),
)
} else {
self.read_body_bytes().await
}
}
/// Return how many response body bytes (application, not wire) already sent downstream
pub fn body_bytes_sent(&self) -> usize {
self.body_sent
}
/// Return how many request body bytes (application, not wire) already read from downstream
pub fn body_bytes_read(&self) -> usize {
self.body_read
}
/// Return the [Digest] of the connection.
pub fn digest(&self) -> Option<&Digest> {
Some(&self.digest)
}
/// Return a mutable [Digest] reference for the connection.
pub fn digest_mut(&mut self) -> Option<&mut Digest> {
Arc::get_mut(&mut self.digest)
}
/// Return the server (local) address recorded in the connection digest.
pub fn server_addr(&self) -> Option<&SocketAddr> {
self.digest.socket_digest.as_ref().map(|d| d.local_addr())?
}
/// Return the client (peer) address recorded in the connection digest.
pub fn client_addr(&self) -> Option<&SocketAddr> {
self.digest.socket_digest.as_ref().map(|d| d.peer_addr())?
}
}
#[cfg(test)]
mod test {
use super::*;
use http::{HeaderValue, Method, Request};
use tokio::io::duplex;
#[tokio::test]
async fn test_server_handshake_accept_request() {
let (client, server) = duplex(65536);
let client_body = "test client body";
let server_body = "test server body";
let mut expected_trailers = HeaderMap::new();
expected_trailers.insert("test", HeaderValue::from_static("trailers"));
let trailers = expected_trailers.clone();
let mut handles = vec![];
handles.push(tokio::spawn(async move {
let (h2, connection) = h2::client::handshake(client).await.unwrap();
tokio::spawn(async move {
connection.await.unwrap();
});
let mut h2 = h2.ready().await.unwrap();
let request = Request::builder()
.method(Method::GET)
.uri("https://www.example.com/")
.body(())
.unwrap();
let (response, mut req_body) = h2.send_request(request, false).unwrap();
req_body.reserve_capacity(client_body.len());
req_body.send_data(client_body.into(), true).unwrap();
let (head, mut body) = response.await.unwrap().into_parts();
assert_eq!(head.status, 200);
let data = body.data().await.unwrap().unwrap();
assert_eq!(data, server_body);
let resp_trailers = body.trailers().await.unwrap().unwrap();
assert_eq!(resp_trailers, expected_trailers);
}));
let mut connection = handshake(Box::new(server), None).await.unwrap();
let digest = Arc::new(Digest::default());
while let Some(mut http) = HttpSession::from_h2_conn(&mut connection, digest.clone())
.await
.unwrap()
{
let trailers = trailers.clone();
handles.push(tokio::spawn(async move {
let req = http.req_header();
assert_eq!(req.method, Method::GET);
assert_eq!(req.uri, "https://www.example.com/");
http.enable_retry_buffering();
assert!(!http.is_body_empty());
assert!(!http.is_body_done());
let body = http.read_body_or_idle(false).await.unwrap().unwrap();
assert_eq!(body, client_body);
assert!(http.is_body_done());
assert_eq!(http.body_bytes_read(), 16);
let retry_body = http.get_retry_buffer().unwrap();
assert_eq!(retry_body, client_body);
// test idling before response header is sent
tokio::select! {
_ = http.idle() => {panic!("downstream should be idling")},
_= tokio::time::sleep(tokio::time::Duration::from_secs(1)) => {}
}
let response_header = Box::new(ResponseHeader::build(200, None).unwrap());
assert!(http
.write_response_header(response_header.clone(), false)
.is_ok());
// this write should be ignored otherwise we will error
assert!(http.write_response_header(response_header, false).is_ok());
// test idling after response header is sent
tokio::select! {
_ = http.read_body_or_idle(false) => {panic!("downstream should be idling")},
_= tokio::time::sleep(tokio::time::Duration::from_secs(1)) => {}
}
// end: false here to verify finish() closes the stream nicely
http.write_body(server_body.into(), false).await.unwrap();
assert_eq!(http.body_bytes_sent(), 16);
http.write_trailers(trailers).unwrap();
http.finish().unwrap();
}));
}
for handle in handles {
// ensure no panics
assert!(handle.await.is_ok());
}
}
#[tokio::test]
async fn test_req_content_length_eq_0_and_no_header_eos() {
let (client, server) = duplex(65536);
let server_body = "test server body";
let mut handles = vec![];
handles.push(tokio::spawn(async move {
let (h2, connection) = h2::client::handshake(client).await.unwrap();
tokio::spawn(async move {
connection.await.unwrap();
});
let mut h2 = h2.ready().await.unwrap();
let request = Request::builder()
.method(Method::POST)
.uri("https://www.example.com/")
.header("content-length", "0") // explicitly set
.body(())
.unwrap();
let (response, mut req_body) = h2.send_request(request, false).unwrap(); // no EOS
let (head, mut body) = response.await.unwrap().into_parts();
assert_eq!(head.status, 200);
let data = body.data().await.unwrap().unwrap();
assert_eq!(data, server_body);
req_body.send_data("".into(), true).unwrap(); // set EOS after read the resp body
}));
let mut connection = handshake(Box::new(server), None).await.unwrap();
let digest = Arc::new(Digest::default());
while let Some(mut http) = HttpSession::from_h2_conn(&mut connection, digest.clone())
.await
.unwrap()
{
handles.push(tokio::spawn(async move {
let req = http.req_header();
assert_eq!(req.method, Method::POST);
assert_eq!(req.uri, "https://www.example.com/");
// 1. Check body related methods
http.enable_retry_buffering();
assert!(http.is_body_empty());
assert!(http.is_body_done());
let retry_body = http.get_retry_buffer();
assert!(retry_body.is_none());
// 2. Send response
let response_header = Box::new(ResponseHeader::build(200, None).unwrap());
assert!(http
.write_response_header(response_header.clone(), false)
.is_ok());
http.write_body(server_body.into(), false).await.unwrap();
assert_eq!(http.body_bytes_sent(), 16);
// 3. Waiting for the reset from the client
assert!(http.read_body_or_idle(http.is_body_done()).await.is_err());
}));
}
for handle in handles {
// ensure no panics
assert!(handle.await.is_ok());
}
}
#[tokio::test]
async fn test_req_header_no_eos_empty_data_with_eos() {
let (client, server) = duplex(65536);
let server_body = "test server body";
let mut handles = vec![];
handles.push(tokio::spawn(async move {
let (h2, connection) = h2::client::handshake(client).await.unwrap();
tokio::spawn(async move {
connection.await.unwrap();
});
let mut h2 = h2.ready().await.unwrap();
let request = Request::builder()
.method(Method::POST)
.uri("https://www.example.com/")
.body(())
.unwrap();
let (response, mut req_body) = h2.send_request(request, false).unwrap(); // no EOS
let (head, mut body) = response.await.unwrap().into_parts();
assert_eq!(head.status, 200);
let data = body.data().await.unwrap().unwrap();
assert_eq!(data, server_body);
req_body.send_data("".into(), true).unwrap(); // set EOS after read the resp body
}));
let mut connection = handshake(Box::new(server), None).await.unwrap();
let digest = Arc::new(Digest::default());
while let Some(mut http) = HttpSession::from_h2_conn(&mut connection, digest.clone())
.await
.unwrap()
{
handles.push(tokio::spawn(async move {
let req = http.req_header();
assert_eq!(req.method, Method::POST);
assert_eq!(req.uri, "https://www.example.com/");
// 1. Check body related methods
http.enable_retry_buffering();
assert!(!http.is_body_empty());
assert!(!http.is_body_done());
let retry_body = http.get_retry_buffer();
assert!(retry_body.is_none());
// 2. Send response
let response_header = Box::new(ResponseHeader::build(200, None).unwrap());
assert!(http
.write_response_header(response_header.clone(), false)
.is_ok());
http.write_body(server_body.into(), false).await.unwrap();
assert_eq!(http.body_bytes_sent(), 16);
// 3. Waiting for the client to close stream.
http.read_body_or_idle(http.is_body_done()).await.unwrap();
}));
}
for handle in handles {
// ensure no panics
assert!(handle.await.is_ok());
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/modules/mod.rs | pingora-core/src/modules/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Modules to extend the functionalities of pingora services.
pub mod http;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/modules/http/grpc_web.rs | pingora-core/src/modules/http/grpc_web.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::*;
use crate::protocols::http::bridge::grpc_web::GrpcWebCtx;
use std::ops::{Deref, DerefMut};
/// gRPC-web bridge module, this will convert
/// HTTP/1.1 gRPC-web requests to H2 gRPC requests
#[derive(Default)]
pub struct GrpcWebBridge(GrpcWebCtx);
impl Deref for GrpcWebBridge {
type Target = GrpcWebCtx;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for GrpcWebBridge {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[async_trait]
impl HttpModule for GrpcWebBridge {
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
async fn request_header_filter(&mut self, req: &mut RequestHeader) -> Result<()> {
self.0.request_header_filter(req);
Ok(())
}
async fn response_header_filter(
&mut self,
resp: &mut ResponseHeader,
_end_of_stream: bool,
) -> Result<()> {
self.0.response_header_filter(resp);
Ok(())
}
fn response_trailer_filter(
&mut self,
trailers: &mut Option<Box<HeaderMap>>,
) -> Result<Option<Bytes>> {
if let Some(trailers) = trailers {
return self.0.response_trailer_filter(trailers);
}
Ok(None)
}
}
/// The builder for gRPC-web bridge module
pub struct GrpcWeb;
impl HttpModuleBuilder for GrpcWeb {
fn init(&self) -> Module {
Box::new(GrpcWebBridge::default())
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/modules/http/mod.rs | pingora-core/src/modules/http/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Modules for HTTP traffic.
//!
//! [HttpModule]s define request and response filters to use while running an
//! [HttpServer](crate::apps::http_app::HttpServer)
//! application.
//! See the [ResponseCompression](crate::modules::http::compression::ResponseCompression)
//! module for an example of how to implement a basic module.
pub mod compression;
pub mod grpc_web;
use async_trait::async_trait;
use bytes::Bytes;
use http::HeaderMap;
use once_cell::sync::OnceCell;
use pingora_error::Result;
use pingora_http::{RequestHeader, ResponseHeader};
use std::any::Any;
use std::any::TypeId;
use std::collections::HashMap;
use std::sync::Arc;
/// The trait an HTTP traffic module needs to implement
#[async_trait]
pub trait HttpModule {
async fn request_header_filter(&mut self, _req: &mut RequestHeader) -> Result<()> {
Ok(())
}
async fn request_body_filter(
&mut self,
_body: &mut Option<Bytes>,
_end_of_stream: bool,
) -> Result<()> {
Ok(())
}
async fn response_header_filter(
&mut self,
_resp: &mut ResponseHeader,
_end_of_stream: bool,
) -> Result<()> {
Ok(())
}
fn response_body_filter(
&mut self,
_body: &mut Option<Bytes>,
_end_of_stream: bool,
) -> Result<()> {
Ok(())
}
fn response_trailer_filter(
&mut self,
_trailers: &mut Option<Box<HeaderMap>>,
) -> Result<Option<Bytes>> {
Ok(None)
}
fn response_done_filter(&mut self) -> Result<Option<Bytes>> {
Ok(None)
}
fn as_any(&self) -> &dyn Any;
fn as_any_mut(&mut self) -> &mut dyn Any;
}
pub type Module = Box<dyn HttpModule + 'static + Send + Sync>;
/// Trait to init the http module ctx for each request
pub trait HttpModuleBuilder {
/// The order the module will run
///
/// The lower the value, the later it runs relative to other filters.
/// If the order of the filter is not important, leave it to the default 0.
fn order(&self) -> i16 {
0
}
/// Initialize and return the per request module context
fn init(&self) -> Module;
}
pub type ModuleBuilder = Box<dyn HttpModuleBuilder + 'static + Send + Sync>;
/// The object to hold multiple http modules
pub struct HttpModules {
modules: Vec<ModuleBuilder>,
module_index: OnceCell<Arc<HashMap<TypeId, usize>>>,
}
impl HttpModules {
/// Create a new [HttpModules]
pub fn new() -> Self {
HttpModules {
modules: vec![],
module_index: OnceCell::new(),
}
}
/// Add a new [ModuleBuilder] to [HttpModules]
///
/// Each type of [HttpModule] can be only added once.
/// # Panic
/// Panic if any [HttpModule] is added more than once.
pub fn add_module(&mut self, builder: ModuleBuilder) {
if self.module_index.get().is_some() {
// We use a shared module_index the index would be out of sync if we
// add more modules.
panic!("cannot add module after ctx is already built")
}
self.modules.push(builder);
// not the most efficient way but should be fine
// largest order first
self.modules.sort_by_key(|m| -m.order());
}
/// Build the contexts of all the modules added to this [HttpModules]
pub fn build_ctx(&self) -> HttpModuleCtx {
let module_ctx: Vec<_> = self.modules.iter().map(|b| b.init()).collect();
let module_index = self
.module_index
.get_or_init(|| {
let mut module_index = HashMap::with_capacity(self.modules.len());
for (i, c) in module_ctx.iter().enumerate() {
let exist = module_index.insert(c.as_any().type_id(), i);
if exist.is_some() {
panic!("duplicated filters found")
}
}
Arc::new(module_index)
})
.clone();
HttpModuleCtx {
module_ctx,
module_index,
}
}
}
/// The Contexts of multiple modules
///
/// This is the object that will apply all the included modules to a certain HTTP request.
/// The modules are ordered according to their `order()`.
pub struct HttpModuleCtx {
// the modules in the order of execution
module_ctx: Vec<Module>,
// find the module in the vec with its type ID
module_index: Arc<HashMap<TypeId, usize>>,
}
impl HttpModuleCtx {
/// Create a placeholder empty [HttpModuleCtx].
///
/// [HttpModules] should be used to create nonempty [HttpModuleCtx].
pub fn empty() -> Self {
HttpModuleCtx {
module_ctx: vec![],
module_index: Arc::new(HashMap::new()),
}
}
/// Get a ref to [HttpModule] if any.
pub fn get<T: 'static>(&self) -> Option<&T> {
let idx = self.module_index.get(&TypeId::of::<T>())?;
let ctx = &self.module_ctx[*idx];
Some(
ctx.as_any()
.downcast_ref::<T>()
.expect("type should always match"),
)
}
/// Get a mut ref to [HttpModule] if any.
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
let idx = self.module_index.get(&TypeId::of::<T>())?;
let ctx = &mut self.module_ctx[*idx];
Some(
ctx.as_any_mut()
.downcast_mut::<T>()
.expect("type should always match"),
)
}
/// Run the `request_header_filter` for all the modules according to their orders.
pub async fn request_header_filter(&mut self, req: &mut RequestHeader) -> Result<()> {
for filter in self.module_ctx.iter_mut() {
filter.request_header_filter(req).await?;
}
Ok(())
}
/// Run the `request_body_filter` for all the modules according to their orders.
pub async fn request_body_filter(
&mut self,
body: &mut Option<Bytes>,
end_of_stream: bool,
) -> Result<()> {
for filter in self.module_ctx.iter_mut() {
filter.request_body_filter(body, end_of_stream).await?;
}
Ok(())
}
/// Run the `response_header_filter` for all the modules according to their orders.
pub async fn response_header_filter(
&mut self,
req: &mut ResponseHeader,
end_of_stream: bool,
) -> Result<()> {
for filter in self.module_ctx.iter_mut() {
filter.response_header_filter(req, end_of_stream).await?;
}
Ok(())
}
/// Run the `response_body_filter` for all the modules according to their orders.
pub fn response_body_filter(
&mut self,
body: &mut Option<Bytes>,
end_of_stream: bool,
) -> Result<()> {
for filter in self.module_ctx.iter_mut() {
filter.response_body_filter(body, end_of_stream)?;
}
Ok(())
}
/// Run the `response_trailer_filter` for all the modules according to their orders.
///
/// Returns an `Option<Bytes>` which can be used to write response trailers into
/// the response body. Note, if multiple modules attempt to write trailers into
/// the body the last one will be used.
///
/// Implementors that intend to write trailers into the body need to ensure their filter
/// is using an encoding that supports this.
pub fn response_trailer_filter(
&mut self,
trailers: &mut Option<Box<HeaderMap>>,
) -> Result<Option<Bytes>> {
let mut encoded = None;
for filter in self.module_ctx.iter_mut() {
if let Some(buf) = filter.response_trailer_filter(trailers)? {
encoded = Some(buf);
}
}
Ok(encoded)
}
/// Run the `response_done_filter` for all the modules according to their orders.
///
/// This filter may be invoked in certain response paths to signal end of response
/// if not already done so via trailers or body (with end flag set).
///
/// Returns an `Option<Bytes>` which can be used to write additional response body
/// bytes. Note, if multiple modules attempt to write body bytes, only the last one
/// will be used.
pub fn response_done_filter(&mut self) -> Result<Option<Bytes>> {
let mut encoded = None;
for filter in self.module_ctx.iter_mut() {
if let Some(buf) = filter.response_done_filter()? {
encoded = Some(buf);
}
}
Ok(encoded)
}
}
#[cfg(test)]
mod tests {
use super::*;
struct MyModule;
#[async_trait]
impl HttpModule for MyModule {
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
async fn request_header_filter(&mut self, req: &mut RequestHeader) -> Result<()> {
req.insert_header("my-filter", "1")
}
}
struct MyModuleBuilder;
impl HttpModuleBuilder for MyModuleBuilder {
fn order(&self) -> i16 {
1
}
fn init(&self) -> Module {
Box::new(MyModule)
}
}
struct MyOtherModule;
#[async_trait]
impl HttpModule for MyOtherModule {
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
async fn request_header_filter(&mut self, req: &mut RequestHeader) -> Result<()> {
if req.headers.get("my-filter").is_some() {
// if this MyOtherModule runs after MyModule
req.insert_header("my-filter", "2")
} else {
// if this MyOtherModule runs before MyModule
req.insert_header("my-other-filter", "1")
}
}
}
struct MyOtherModuleBuilder;
impl HttpModuleBuilder for MyOtherModuleBuilder {
fn order(&self) -> i16 {
-1
}
fn init(&self) -> Module {
Box::new(MyOtherModule)
}
}
#[test]
fn test_module_get() {
let mut http_module = HttpModules::new();
http_module.add_module(Box::new(MyModuleBuilder));
http_module.add_module(Box::new(MyOtherModuleBuilder));
let mut ctx = http_module.build_ctx();
assert!(ctx.get::<MyModule>().is_some());
assert!(ctx.get::<MyOtherModule>().is_some());
assert!(ctx.get::<usize>().is_none());
assert!(ctx.get_mut::<MyModule>().is_some());
assert!(ctx.get_mut::<MyOtherModule>().is_some());
assert!(ctx.get_mut::<usize>().is_none());
}
#[tokio::test]
async fn test_module_filter() {
let mut http_module = HttpModules::new();
http_module.add_module(Box::new(MyOtherModuleBuilder));
http_module.add_module(Box::new(MyModuleBuilder));
let mut ctx = http_module.build_ctx();
let mut req = RequestHeader::build("Get", b"/", None).unwrap();
ctx.request_header_filter(&mut req).await.unwrap();
// MyModule runs before MyOtherModule
assert_eq!(req.headers.get("my-filter").unwrap(), "2");
assert!(req.headers.get("my-other-filter").is_none());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/modules/http/compression.rs | pingora-core/src/modules/http/compression.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP compression filter
use super::*;
use crate::protocols::http::compression::ResponseCompressionCtx;
use std::ops::{Deref, DerefMut};
/// HTTP response compression module
pub struct ResponseCompression(ResponseCompressionCtx);
impl Deref for ResponseCompression {
type Target = ResponseCompressionCtx;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for ResponseCompression {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[async_trait]
impl HttpModule for ResponseCompression {
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
async fn request_header_filter(&mut self, req: &mut RequestHeader) -> Result<()> {
self.0.request_filter(req);
Ok(())
}
async fn response_header_filter(
&mut self,
resp: &mut ResponseHeader,
end_of_stream: bool,
) -> Result<()> {
self.0.response_header_filter(resp, end_of_stream);
Ok(())
}
fn response_body_filter(
&mut self,
body: &mut Option<Bytes>,
end_of_stream: bool,
) -> Result<()> {
if !self.0.is_enabled() {
return Ok(());
}
let compressed = self.0.response_body_filter(body.as_ref(), end_of_stream);
if compressed.is_some() {
*body = compressed;
}
Ok(())
}
fn response_done_filter(&mut self) -> Result<Option<Bytes>> {
if !self.0.is_enabled() {
return Ok(None);
}
// Flush or finish any remaining encoded bytes upon HTTP response completion
// (if it was not already ended in the body filter).
Ok(self.0.response_body_filter(None, true))
}
}
/// The builder for HTTP response compression module
pub struct ResponseCompressionBuilder {
level: u32,
}
impl ResponseCompressionBuilder {
/// Return a [ModuleBuilder] for [ResponseCompression] with the given compression level
pub fn enable(level: u32) -> ModuleBuilder {
Box::new(ResponseCompressionBuilder { level })
}
}
impl HttpModuleBuilder for ResponseCompressionBuilder {
fn init(&self) -> Module {
Box::new(ResponseCompression(ResponseCompressionCtx::new(
self.level, false, false,
)))
}
fn order(&self) -> i16 {
// run the response filter later than most others filters
i16::MIN / 2
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/server/mod.rs | pingora-core/src/server/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Server process and configuration management
pub mod configuration;
#[cfg(unix)]
mod daemon;
#[cfg(unix)]
pub(crate) mod transfer_fd;
use async_trait::async_trait;
#[cfg(unix)]
use daemon::daemonize;
use log::{debug, error, info, warn};
use pingora_runtime::Runtime;
use pingora_timeout::fast_timeout;
#[cfg(feature = "sentry")]
use sentry::ClientOptions;
use std::sync::Arc;
use std::thread;
#[cfg(unix)]
use tokio::signal::unix;
use tokio::sync::{broadcast, watch, Mutex};
use tokio::time::{sleep, Duration};
use crate::services::Service;
use configuration::{Opt, ServerConf};
#[cfg(unix)]
pub use transfer_fd::Fds;
use pingora_error::{Error, ErrorType, Result};
/* Time to wait before exiting the program.
This is the graceful period for all existing sessions to finish */
const EXIT_TIMEOUT: u64 = 60 * 5;
/* Time to wait before shutting down listening sockets.
This is the graceful period for the new service to get ready */
const CLOSE_TIMEOUT: u64 = 5;
enum ShutdownType {
Graceful,
Quick,
}
/// The execution phase the server is currently in.
#[derive(Clone, Debug)]
#[non_exhaustive]
pub enum ExecutionPhase {
/// The server was created, but has not started yet.
Setup,
/// Services are being prepared.
///
/// During graceful upgrades this phase acquires the listening FDs from the old process.
Bootstrap,
/// Bootstrap has finished, listening FDs have been transferred.
BootstrapComplete,
/// The server is running and is listening for shutdown signals.
Running,
/// A QUIT signal was received, indicating that a new process wants to take over.
///
/// The server is trying to send the fds to the new process over a Unix socket.
GracefulUpgradeTransferringFds,
/// FDs have been sent to the new process.
/// Waiting a fixed amount of time to allow the new process to take the sockets.
GracefulUpgradeCloseTimeout,
/// A TERM signal was received, indicating that the server should shut down gracefully.
GracefulTerminate,
/// The server is shutting down.
ShutdownStarted,
/// Waiting for the configured grace period to end before shutting down.
ShutdownGracePeriod,
/// Wait for runtimes to finish.
ShutdownRuntimes,
/// The server has stopped.
Terminated,
}
/// The receiver for server's shutdown event. The value will turn to true once the server starts
/// to shutdown
pub type ShutdownWatch = watch::Receiver<bool>;
#[cfg(unix)]
pub type ListenFds = Arc<Mutex<Fds>>;
/// The type of shutdown process that has been requested.
#[derive(Debug)]
pub enum ShutdownSignal {
/// Send file descriptors to the new process before starting runtime shutdown with
/// [ServerConf::graceful_shutdown_timeout_seconds] timeout.
GracefulUpgrade,
/// Wait for [ServerConf::grace_period_seconds] before starting runtime shutdown with
/// [ServerConf::graceful_shutdown_timeout_seconds] timeout.
GracefulTerminate,
/// Shutdown with no timeout for runtime shutdown.
FastShutdown,
}
/// Watcher of a shutdown signal, e.g., [UnixShutdownSignalWatch] for Unix-like
/// platforms.
#[async_trait]
pub trait ShutdownSignalWatch {
/// Returns the desired shutdown type once one has been requested.
async fn recv(&self) -> ShutdownSignal;
}
/// A Unix shutdown watcher that awaits for Unix signals.
///
/// - `SIGQUIT`: graceful upgrade
/// - `SIGTERM`: graceful terminate
/// - `SIGINT`: fast shutdown
#[cfg(unix)]
pub struct UnixShutdownSignalWatch;
#[cfg(unix)]
#[async_trait]
impl ShutdownSignalWatch for UnixShutdownSignalWatch {
async fn recv(&self) -> ShutdownSignal {
let mut graceful_upgrade_signal = unix::signal(unix::SignalKind::quit()).unwrap();
let mut graceful_terminate_signal = unix::signal(unix::SignalKind::terminate()).unwrap();
let mut fast_shutdown_signal = unix::signal(unix::SignalKind::interrupt()).unwrap();
tokio::select! {
_ = graceful_upgrade_signal.recv() => {
ShutdownSignal::GracefulUpgrade
},
_ = graceful_terminate_signal.recv() => {
ShutdownSignal::GracefulTerminate
},
_ = fast_shutdown_signal.recv() => {
ShutdownSignal::FastShutdown
},
}
}
}
/// Arguments to configure running of the pingora server.
pub struct RunArgs {
/// Signal for initating shutdown
#[cfg(unix)]
pub shutdown_signal: Box<dyn ShutdownSignalWatch>,
}
impl Default for RunArgs {
#[cfg(unix)]
fn default() -> Self {
Self {
shutdown_signal: Box::new(UnixShutdownSignalWatch),
}
}
#[cfg(windows)]
fn default() -> Self {
Self {}
}
}
/// The server object
///
/// This object represents an entire pingora server process which may have multiple independent
/// services (see [crate::services]). The server object handles signals, reading configuration,
/// zero downtime upgrade and error reporting.
pub struct Server {
services: Vec<Box<dyn Service>>,
#[cfg(unix)]
listen_fds: Option<ListenFds>,
shutdown_watch: watch::Sender<bool>,
// TODO: we many want to drop this copy to let sender call closed()
shutdown_recv: ShutdownWatch,
/// Tracks the execution phase of the server during upgrades and graceful shutdowns.
///
/// Users can subscribe to the phase with [`Self::watch_execution_phase()`].
execution_phase_watch: broadcast::Sender<ExecutionPhase>,
/// The parsed server configuration
pub configuration: Arc<ServerConf>,
/// The parser command line options
pub options: Option<Opt>,
#[cfg(feature = "sentry")]
#[cfg_attr(docsrs, doc(cfg(feature = "sentry")))]
/// The Sentry ClientOptions.
///
/// Panics and other events sentry captures will be sent to this DSN **only in release mode**
pub sentry: Option<ClientOptions>,
}
// TODO: delete the pid when exit
impl Server {
/// Acquire a receiver for the server's execution phase.
///
/// The receiver will produce values for each transition.
pub fn watch_execution_phase(&self) -> broadcast::Receiver<ExecutionPhase> {
self.execution_phase_watch.subscribe()
}
#[cfg(unix)]
async fn main_loop(&self, run_args: RunArgs) -> ShutdownType {
// waiting for exit signal
self.execution_phase_watch
.send(ExecutionPhase::Running)
.ok();
match run_args.shutdown_signal.recv().await {
ShutdownSignal::FastShutdown => {
info!("SIGINT received, exiting");
ShutdownType::Quick
}
ShutdownSignal::GracefulTerminate => {
// we receive a graceful terminate, all instances are instructed to stop
info!("SIGTERM received, gracefully exiting");
// graceful shutdown if there are listening sockets
info!("Broadcasting graceful shutdown");
match self.shutdown_watch.send(true) {
Ok(_) => {
info!("Graceful shutdown started!");
}
Err(e) => {
error!("Graceful shutdown broadcast failed: {e}");
}
}
info!("Broadcast graceful shutdown complete");
self.execution_phase_watch
.send(ExecutionPhase::GracefulTerminate)
.ok();
ShutdownType::Graceful
}
ShutdownSignal::GracefulUpgrade => {
// TODO: still need to select! on signals in case a fast shutdown is needed
// aka: move below to another task and only kick it off here
info!("SIGQUIT received, sending socks and gracefully exiting");
self.execution_phase_watch
.send(ExecutionPhase::GracefulUpgradeTransferringFds)
.ok();
if let Some(fds) = &self.listen_fds {
let fds = fds.lock().await;
info!("Trying to send socks");
// XXX: this is blocking IO
match fds.send_to_sock(self.configuration.as_ref().upgrade_sock.as_str()) {
Ok(_) => {
info!("listener sockets sent");
}
Err(e) => {
error!("Unable to send listener sockets to new process: {e}");
// sentry log error on fd send failure
#[cfg(all(not(debug_assertions), feature = "sentry"))]
sentry::capture_error(&e);
}
}
self.execution_phase_watch
.send(ExecutionPhase::GracefulUpgradeCloseTimeout)
.ok();
sleep(Duration::from_secs(CLOSE_TIMEOUT)).await;
info!("Broadcasting graceful shutdown");
// gracefully exiting
match self.shutdown_watch.send(true) {
Ok(_) => {
info!("Graceful shutdown started!");
}
Err(e) => {
error!("Graceful shutdown broadcast failed: {e}");
// switch to fast shutdown
return ShutdownType::Graceful;
}
}
info!("Broadcast graceful shutdown complete");
ShutdownType::Graceful
} else {
info!("No socks to send, shutting down.");
ShutdownType::Graceful
}
}
}
}
fn run_service(
mut service: Box<dyn Service>,
#[cfg(unix)] fds: Option<ListenFds>,
shutdown: ShutdownWatch,
threads: usize,
work_stealing: bool,
listeners_per_fd: usize,
) -> Runtime
// NOTE: we need to keep the runtime outside async since
// otherwise the runtime will be dropped.
{
let service_runtime = Server::create_runtime(service.name(), threads, work_stealing);
service_runtime.get_handle().spawn(async move {
service
.start_service(
#[cfg(unix)]
fds,
shutdown,
listeners_per_fd,
)
.await;
info!("service exited.")
});
service_runtime
}
#[cfg(unix)]
fn load_fds(&mut self, upgrade: bool) -> Result<(), nix::Error> {
let mut fds = Fds::new();
if upgrade {
debug!("Trying to receive socks");
fds.get_from_sock(self.configuration.as_ref().upgrade_sock.as_str())?
}
self.listen_fds = Some(Arc::new(Mutex::new(fds)));
Ok(())
}
/// Create a new [`Server`], using the [`Opt`] and [`ServerConf`] values provided
///
/// This method is intended for pingora frontends that are NOT using the built-in
/// command line and configuration file parsing, and are instead using their own.
///
/// If a configuration file path is provided as part of `opt`, it will be ignored
/// and a warning will be logged.
pub fn new_with_opt_and_conf(raw_opt: impl Into<Option<Opt>>, mut conf: ServerConf) -> Server {
let opt = raw_opt.into();
if let Some(opts) = &opt {
if let Some(c) = opts.conf.as_ref() {
warn!("Ignoring command line argument using '{c}' as configuration, and using provided configuration instead.");
}
conf.merge_with_opt(opts);
}
let (tx, rx) = watch::channel(false);
Server {
services: vec![],
#[cfg(unix)]
listen_fds: None,
shutdown_watch: tx,
shutdown_recv: rx,
execution_phase_watch: broadcast::channel(100).0,
configuration: Arc::new(conf),
options: opt,
#[cfg(feature = "sentry")]
sentry: None,
}
}
/// Create a new [`Server`].
///
/// Only one [`Server`] needs to be created for a process. A [`Server`] can hold multiple
/// independent services.
///
/// Command line options can either be passed by parsing the command line arguments via
/// `Opt::parse_args()`, or be generated by other means.
pub fn new(opt: impl Into<Option<Opt>>) -> Result<Server> {
let opt = opt.into();
let (tx, rx) = watch::channel(false);
let conf = if let Some(opt) = opt.as_ref() {
opt.conf.as_ref().map_or_else(
|| {
// options, no conf, generated
ServerConf::new_with_opt_override(opt).ok_or_else(|| {
Error::explain(ErrorType::ReadError, "Conf generation failed")
})
},
|_| {
// options and conf loaded
ServerConf::load_yaml_with_opt_override(opt)
},
)
} else {
ServerConf::new()
.ok_or_else(|| Error::explain(ErrorType::ReadError, "Conf generation failed"))
}?;
Ok(Server {
services: vec![],
#[cfg(unix)]
listen_fds: None,
shutdown_watch: tx,
shutdown_recv: rx,
execution_phase_watch: broadcast::channel(100).0,
configuration: Arc::new(conf),
options: opt,
#[cfg(feature = "sentry")]
sentry: None,
})
}
/// Add a service to this server.
///
/// A service is anything that implements [`Service`].
pub fn add_service(&mut self, service: impl Service + 'static) {
self.services.push(Box::new(service));
}
/// Similar to [`Self::add_service()`], but take a list of services
pub fn add_services(&mut self, services: Vec<Box<dyn Service>>) {
self.services.extend(services);
}
/// Prepare the server to start
///
/// When trying to zero downtime upgrade from an older version of the server which is already
/// running, this function will try to get all its listening sockets in order to take them over.
pub fn bootstrap(&mut self) {
info!("Bootstrap starting");
debug!("{:#?}", self.options);
self.execution_phase_watch
.send(ExecutionPhase::Bootstrap)
.ok();
/* only init sentry in release builds */
#[cfg(all(not(debug_assertions), feature = "sentry"))]
let _guard = self.sentry.as_ref().map(|opts| sentry::init(opts.clone()));
if self.options.as_ref().is_some_and(|o| o.test) {
info!("Server Test passed, exiting");
std::process::exit(0);
}
// load fds
#[cfg(unix)]
match self.load_fds(self.options.as_ref().is_some_and(|o| o.upgrade)) {
Ok(_) => {
info!("Bootstrap done");
}
Err(e) => {
// sentry log error on fd load failure
#[cfg(all(not(debug_assertions), feature = "sentry"))]
sentry::capture_error(&e);
error!("Bootstrap failed on error: {:?}, exiting.", e);
std::process::exit(1);
}
}
self.execution_phase_watch
.send(ExecutionPhase::BootstrapComplete)
.ok();
}
/// Start the server using [Self::run] and default [RunArgs].
///
/// This function will block forever until the server needs to quit. So this would be the last
/// function to call for this object.
///
/// Note: this function may fork the process for daemonization, so any additional threads created
/// before this function will be lost to any service logic once this function is called.
pub fn run_forever(self) -> ! {
self.run(RunArgs::default());
std::process::exit(0)
}
/// Run the server until execution finished.
///
/// This function will run until the server has been instructed to shut down
/// through a signal, and will then wait for all services to finish and
/// runtimes to exit.
///
/// Note: if daemonization is enabled in the config, this function will
/// never return.
/// Instead it will either start the daemon process and exit, or panic
/// if daemonization fails.
pub fn run(mut self, run_args: RunArgs) {
info!("Server starting");
let conf = self.configuration.as_ref();
#[cfg(unix)]
if conf.daemon {
info!("Daemonizing the server");
fast_timeout::pause_for_fork();
daemonize(&self.configuration);
fast_timeout::unpause();
}
#[cfg(windows)]
if conf.daemon {
panic!("Daemonizing under windows is not supported");
}
/* only init sentry in release builds */
#[cfg(all(not(debug_assertions), feature = "sentry"))]
let _guard = self.sentry.as_ref().map(|opts| sentry::init(opts.clone()));
// Holds tuples of runtimes and their service name.
let mut runtimes: Vec<(Runtime, String)> = Vec::new();
while let Some(service) = self.services.pop() {
let threads = service.threads().unwrap_or(conf.threads);
let name = service.name().to_string();
let runtime = Server::run_service(
service,
#[cfg(unix)]
self.listen_fds.clone(),
self.shutdown_recv.clone(),
threads,
conf.work_stealing,
self.configuration.listener_tasks_per_fd,
);
runtimes.push((runtime, name));
}
// blocked on main loop so that it runs forever
// Only work steal runtime can use block_on()
let server_runtime = Server::create_runtime("Server", 1, true);
#[cfg(unix)]
let shutdown_type = server_runtime
.get_handle()
.block_on(self.main_loop(run_args));
#[cfg(windows)]
let shutdown_type = ShutdownType::Graceful;
self.execution_phase_watch
.send(ExecutionPhase::ShutdownStarted)
.ok();
if matches!(shutdown_type, ShutdownType::Graceful) {
self.execution_phase_watch
.send(ExecutionPhase::ShutdownGracePeriod)
.ok();
let exit_timeout = self
.configuration
.as_ref()
.grace_period_seconds
.unwrap_or(EXIT_TIMEOUT);
info!("Graceful shutdown: grace period {}s starts", exit_timeout);
thread::sleep(Duration::from_secs(exit_timeout));
info!("Graceful shutdown: grace period ends");
}
// Give tokio runtimes time to exit
let shutdown_timeout = match shutdown_type {
ShutdownType::Quick => Duration::from_secs(0),
ShutdownType::Graceful => Duration::from_secs(
self.configuration
.as_ref()
.graceful_shutdown_timeout_seconds
.unwrap_or(5),
),
};
self.execution_phase_watch
.send(ExecutionPhase::ShutdownRuntimes)
.ok();
let shutdowns: Vec<_> = runtimes
.into_iter()
.map(|(rt, name)| {
info!("Waiting for runtimes to exit!");
let join = thread::spawn(move || {
rt.shutdown_timeout(shutdown_timeout);
thread::sleep(shutdown_timeout)
});
(join, name)
})
.collect();
for (shutdown, name) in shutdowns {
info!("Waiting for service runtime {} to exit", name);
if let Err(e) = shutdown.join() {
error!("Failed to shutdown service runtime {}: {:?}", name, e);
}
debug!("Service runtime {} has exited", name);
}
info!("All runtimes exited, exiting now");
self.execution_phase_watch
.send(ExecutionPhase::Terminated)
.ok();
}
fn create_runtime(name: &str, threads: usize, work_steal: bool) -> Runtime {
if work_steal {
Runtime::new_steal(threads, name)
} else {
Runtime::new_no_steal(threads, name)
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/server/daemon.rs | pingora-core/src/server/daemon.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use daemonize::{Daemonize, Stdio};
use log::{debug, error};
use std::ffi::CString;
use std::fs::{self, OpenOptions};
use std::os::unix::prelude::OpenOptionsExt;
use std::path::Path;
use crate::server::configuration::ServerConf;
// Utilities to daemonize a pingora server, i.e. run the process in the background, possibly
// under a different running user and/or group.
// XXX: this operation should have been done when the old service is exiting.
// Now the new pid file just kick the old one out of the way
fn move_old_pid(path: &str) {
if !Path::new(path).exists() {
debug!("Old pid file does not exist");
return;
}
let new_path = format!("{path}.old");
match fs::rename(path, &new_path) {
Ok(()) => {
debug!("Old pid file renamed");
}
Err(e) => {
error!(
"failed to rename pid file from {} to {}: {}",
path, new_path, e
);
}
}
}
unsafe fn gid_for_username(name: &CString) -> Option<libc::gid_t> {
let passwd = libc::getpwnam(name.as_ptr() as *const libc::c_char);
if !passwd.is_null() {
return Some((*passwd).pw_gid);
}
None
}
/// Start a server instance as a daemon.
#[cfg(unix)]
pub fn daemonize(conf: &ServerConf) {
// TODO: customize working dir
let daemonize = Daemonize::new()
.umask(0o007) // allow same group to access files but not everyone else
.pid_file(&conf.pid_file);
let daemonize = if let Some(error_log) = conf.error_log.as_ref() {
let err = OpenOptions::new()
.append(true)
.create(true)
// open read() in case there are no readers
// available otherwise we will panic with
// an ENXIO since O_NONBLOCK is set
.read(true)
.custom_flags(libc::O_NONBLOCK)
.open(error_log)
.unwrap();
daemonize.stderr(err)
} else {
daemonize.stdout(Stdio::keep()).stderr(Stdio::keep())
};
let daemonize = match conf.user.as_ref() {
Some(user) => {
let user_cstr = CString::new(user.as_str()).unwrap();
#[cfg(target_os = "macos")]
let group_id = unsafe { gid_for_username(&user_cstr).map(|gid| gid as i32) };
#[cfg(target_os = "freebsd")]
let group_id = unsafe { gid_for_username(&user_cstr).map(|gid| gid as u32) };
#[cfg(target_os = "linux")]
let group_id = unsafe { gid_for_username(&user_cstr) };
daemonize
.privileged_action(move || {
if let Some(gid) = group_id {
// Set the supplemental group privileges for the child process.
unsafe {
libc::initgroups(user_cstr.as_ptr() as *const libc::c_char, gid);
}
}
})
.user(user.as_str())
.chown_pid_file(true)
}
None => daemonize,
};
let daemonize = match conf.group.as_ref() {
Some(group) => daemonize.group(group.as_str()),
None => daemonize,
};
move_old_pid(&conf.pid_file);
daemonize.start().unwrap(); // hard crash when fail
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/server/configuration/mod.rs | pingora-core/src/server/configuration/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Server configurations
//!
//! Server configurations define startup settings such as:
//! * User and group to run as after daemonization
//! * Number of threads per service
//! * Error log file path
use clap::Parser;
use log::{debug, trace};
use pingora_error::{Error, ErrorType::*, OrErr, Result};
use serde::{Deserialize, Serialize};
use std::ffi::OsString;
use std::fs;
// default maximum upstream retries for retry-able proxy errors
const DEFAULT_MAX_RETRIES: usize = 16;
/// The configuration file
///
/// Pingora configuration files are by default YAML files, but any key value format can potentially
/// be used.
///
/// # Extension
/// New keys can be added to the configuration files which this configuration object will ignore.
/// Then, users can parse these key-values to pass to their code to use.
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
pub struct ServerConf {
/// Version
pub version: usize,
/// Whether to run this process in the background.
pub daemon: bool,
/// When configured and `daemon` setting is `true`, error log will be written to the given
/// file. Otherwise StdErr will be used.
pub error_log: Option<String>,
/// The pid (process ID) file of this server to be created when running in background
pub pid_file: String,
/// the path to the upgrade socket
///
/// In order to perform zero downtime restart, both the new and old process need to agree on the
/// path to this sock in order to coordinate the upgrade.
pub upgrade_sock: String,
/// If configured, after daemonization, this process will switch to the given user before
/// starting to serve traffic.
pub user: Option<String>,
/// Similar to `user`, the group this process should switch to.
pub group: Option<String>,
/// How many threads **each** service should get. The threads are not shared across services.
pub threads: usize,
/// Number of listener tasks to use per fd. This allows for parallel accepts.
pub listener_tasks_per_fd: usize,
/// Allow work stealing between threads of the same service. Default `true`.
pub work_stealing: bool,
/// The path to CA file the SSL library should use. If empty, the default trust store location
/// defined by the SSL library will be used.
pub ca_file: Option<String>,
/// The maximum number of unique s2n configs to cache. Creating a new s2n config is an
/// expensive operation, so we cache and re-use config objects with identical configurations.
/// A value of 0 disables the cache.
///
/// WARNING: Disabling the s2n config cache can result in poor performance
#[cfg(feature = "s2n")]
pub s2n_config_cache_size: Option<usize>,
/// Grace period in seconds before starting the final step of the graceful shutdown after signaling shutdown.
pub grace_period_seconds: Option<u64>,
/// Timeout in seconds of the final step for the graceful shutdown.
pub graceful_shutdown_timeout_seconds: Option<u64>,
// These options don't belong here as they are specific to certain services
/// IPv4 addresses for a client connector to bind to. See
/// [`ConnectorOptions`](crate::connectors::ConnectorOptions).
/// Note: this is an _unstable_ field that may be renamed or removed in the future.
pub client_bind_to_ipv4: Vec<String>,
/// IPv6 addresses for a client connector to bind to. See
/// [`ConnectorOptions`](crate::connectors::ConnectorOptions).
/// Note: this is an _unstable_ field that may be renamed or removed in the future.
pub client_bind_to_ipv6: Vec<String>,
/// Keepalive pool size for client connections to upstream. See
/// [`ConnectorOptions`](crate::connectors::ConnectorOptions).
/// Note: this is an _unstable_ field that may be renamed or removed in the future.
pub upstream_keepalive_pool_size: usize,
/// Number of dedicated thread pools to use for upstream connection establishment.
/// See [`ConnectorOptions`](crate::connectors::ConnectorOptions).
/// Note: this is an _unstable_ field that may be renamed or removed in the future.
pub upstream_connect_offload_threadpools: Option<usize>,
/// Number of threads per dedicated upstream connection establishment pool.
/// See [`ConnectorOptions`](crate::connectors::ConnectorOptions).
/// Note: this is an _unstable_ field that may be renamed or removed in the future.
pub upstream_connect_offload_thread_per_pool: Option<usize>,
/// When enabled allows TLS keys to be written to a file specified by the SSLKEYLOG
/// env variable. This can be used by tools like Wireshark to decrypt upstream traffic
/// for debugging purposes.
/// Note: this is an _unstable_ field that may be renamed or removed in the future.
pub upstream_debug_ssl_keylog: bool,
/// The maximum number of retries that will be attempted when an error is
/// retry-able (`e.retry() == true`) when proxying to upstream.
///
/// This setting is a fail-safe and defaults to 16.
pub max_retries: usize,
}
impl Default for ServerConf {
fn default() -> Self {
ServerConf {
version: 0,
client_bind_to_ipv4: vec![],
client_bind_to_ipv6: vec![],
ca_file: None,
#[cfg(feature = "s2n")]
s2n_config_cache_size: None,
daemon: false,
error_log: None,
upstream_debug_ssl_keylog: false,
pid_file: "/tmp/pingora.pid".to_string(),
upgrade_sock: "/tmp/pingora_upgrade.sock".to_string(),
user: None,
group: None,
threads: 1,
listener_tasks_per_fd: 1,
work_stealing: true,
upstream_keepalive_pool_size: 128,
upstream_connect_offload_threadpools: None,
upstream_connect_offload_thread_per_pool: None,
grace_period_seconds: None,
graceful_shutdown_timeout_seconds: None,
max_retries: DEFAULT_MAX_RETRIES,
}
}
}
/// Command-line options
///
/// Call `Opt::parse_args()` to build this object from the process's command line arguments.
#[derive(Parser, Debug, Default)]
#[clap(name = "basic", long_about = None)]
pub struct Opt {
/// Whether this server should try to upgrade from a running old server
#[clap(
short,
long,
help = "This is the base set of command line arguments for a pingora-based service",
long_help = None
)]
pub upgrade: bool,
/// Whether this server should run in the background
#[clap(short, long)]
pub daemon: bool,
/// Not actually used. This flag is there so that the server is not upset seeing this flag
/// passed from `cargo test` sometimes
#[clap(long, hide = true)]
pub nocapture: bool,
/// Test the configuration and exit
///
/// When this flag is set, calling `server.bootstrap()` will exit the process without errors
///
/// This flag is useful for upgrading service where the user wants to make sure the new
/// service can start before shutting down the old server process.
#[clap(
short,
long,
help = "This flag is useful for upgrading service where the user wants \
to make sure the new service can start before shutting down \
the old server process.",
long_help = None
)]
pub test: bool,
/// The path to the configuration file.
///
/// See [`ServerConf`] for more details of the configuration file.
#[clap(short, long, help = "The path to the configuration file.", long_help = None)]
pub conf: Option<String>,
}
impl ServerConf {
// Does not has to be async until we want runtime reload
pub fn load_from_yaml<P>(path: P) -> Result<Self>
where
P: AsRef<std::path::Path> + std::fmt::Display,
{
let conf_str = fs::read_to_string(&path).or_err_with(ReadError, || {
format!("Unable to read conf file from {path}")
})?;
debug!("Conf file read from {path}");
Self::from_yaml(&conf_str)
}
pub fn load_yaml_with_opt_override(opt: &Opt) -> Result<Self> {
if let Some(path) = &opt.conf {
let mut conf = Self::load_from_yaml(path)?;
conf.merge_with_opt(opt);
Ok(conf)
} else {
Error::e_explain(ReadError, "No path specified")
}
}
pub fn new() -> Option<Self> {
Self::from_yaml("---\nversion: 1").ok()
}
pub fn new_with_opt_override(opt: &Opt) -> Option<Self> {
let conf = Self::new();
match conf {
Some(mut c) => {
c.merge_with_opt(opt);
Some(c)
}
None => None,
}
}
pub fn from_yaml(conf_str: &str) -> Result<Self> {
trace!("Read conf file: {conf_str}");
let conf: ServerConf = serde_yaml::from_str(conf_str).or_err_with(ReadError, || {
format!("Unable to parse yaml conf {conf_str}")
})?;
trace!("Loaded conf: {conf:?}");
conf.validate()
}
pub fn to_yaml(&self) -> String {
serde_yaml::to_string(self).unwrap()
}
pub fn validate(self) -> Result<Self> {
// TODO: do the validation
Ok(self)
}
pub fn merge_with_opt(&mut self, opt: &Opt) {
if opt.daemon {
self.daemon = true;
}
}
}
/// Create an instance of Opt by parsing the current command-line args.
/// This is equivalent to running `Opt::parse` but does not require the
/// caller to have included the `clap::Parser`
impl Opt {
pub fn parse_args() -> Self {
Opt::parse()
}
pub fn parse_from_args<I, T>(args: I) -> Self
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
Opt::parse_from(args)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn not_a_test_i_cannot_write_yaml_by_hand() {
init_log();
let conf = ServerConf {
version: 1,
client_bind_to_ipv4: vec!["1.2.3.4".to_string(), "5.6.7.8".to_string()],
client_bind_to_ipv6: vec![],
ca_file: None,
#[cfg(feature = "s2n")]
s2n_config_cache_size: None,
daemon: false,
error_log: None,
upstream_debug_ssl_keylog: false,
pid_file: "".to_string(),
upgrade_sock: "".to_string(),
user: None,
group: None,
threads: 1,
listener_tasks_per_fd: 1,
work_stealing: true,
upstream_keepalive_pool_size: 4,
upstream_connect_offload_threadpools: None,
upstream_connect_offload_thread_per_pool: None,
grace_period_seconds: None,
graceful_shutdown_timeout_seconds: None,
max_retries: 1,
};
// cargo test -- --nocapture not_a_test_i_cannot_write_yaml_by_hand
println!("{}", conf.to_yaml());
}
#[test]
fn test_load_file() {
init_log();
let conf_str = r#"
---
version: 1
client_bind_to_ipv4:
- 1.2.3.4
- 5.6.7.8
client_bind_to_ipv6: []
"#
.to_string();
let conf = ServerConf::from_yaml(&conf_str).unwrap();
assert_eq!(2, conf.client_bind_to_ipv4.len());
assert_eq!(0, conf.client_bind_to_ipv6.len());
assert_eq!(1, conf.version);
}
#[test]
fn test_default() {
init_log();
let conf_str = r#"
---
version: 1
"#
.to_string();
let conf = ServerConf::from_yaml(&conf_str).unwrap();
assert_eq!(0, conf.client_bind_to_ipv4.len());
assert_eq!(0, conf.client_bind_to_ipv6.len());
assert_eq!(1, conf.version);
assert_eq!(DEFAULT_MAX_RETRIES, conf.max_retries);
assert_eq!("/tmp/pingora.pid", conf.pid_file);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/server/transfer_fd/mod.rs | pingora-core/src/server/transfer_fd/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(target_os = "linux")]
use log::{debug, error, warn};
use nix::errno::Errno;
#[cfg(target_os = "linux")]
use nix::sys::socket::{self, AddressFamily, RecvMsg, SockFlag, SockType, UnixAddr};
#[cfg(target_os = "linux")]
use nix::sys::stat;
use nix::{Error, NixPath};
use std::collections::HashMap;
use std::io::Write;
#[cfg(target_os = "linux")]
use std::io::{IoSlice, IoSliceMut};
use std::os::unix::io::RawFd;
#[cfg(target_os = "linux")]
use std::{thread, time};
// Utilities to transfer file descriptors between sockets, e.g. during graceful upgrades.
/// Container for open file descriptors and their associated bind addresses.
pub struct Fds {
map: HashMap<String, RawFd>,
}
impl Fds {
pub fn new() -> Self {
Fds {
map: HashMap::new(),
}
}
pub fn add(&mut self, bind: String, fd: RawFd) {
self.map.insert(bind, fd);
}
pub fn get(&self, bind: &str) -> Option<&RawFd> {
self.map.get(bind)
}
pub fn serialize(&self) -> (Vec<String>, Vec<RawFd>) {
self.map.iter().map(|(key, val)| (key.clone(), val)).unzip()
}
pub fn deserialize(&mut self, binds: Vec<String>, fds: Vec<RawFd>) {
assert_eq!(binds.len(), fds.len());
for (bind, fd) in binds.into_iter().zip(fds) {
self.map.insert(bind, fd);
}
}
pub fn send_to_sock<P>(&self, path: &P) -> Result<usize, Error>
where
P: ?Sized + NixPath + std::fmt::Display,
{
let (vec_key, vec_fds) = self.serialize();
let mut ser_buf: [u8; 2048] = [0; 2048];
let ser_key_size = serialize_vec_string(&vec_key, &mut ser_buf);
send_fds_to(vec_fds, &ser_buf[..ser_key_size], path)
}
pub fn get_from_sock<P>(&mut self, path: &P) -> Result<(), Error>
where
P: ?Sized + NixPath + std::fmt::Display,
{
let mut de_buf: [u8; 2048] = [0; 2048];
let (fds, bytes) = get_fds_from(path, &mut de_buf)?;
let keys = deserialize_vec_string(&de_buf[..bytes])?;
self.deserialize(keys, fds);
Ok(())
}
}
fn serialize_vec_string(vec_string: &[String], mut buf: &mut [u8]) -> usize {
// There are many ways to do this. Serde is probably the way to go
// But let's start with something simple: space separated strings
let joined = vec_string.join(" ");
// TODO: check the buf is large enough
buf.write(joined.as_bytes()).unwrap()
}
fn deserialize_vec_string(buf: &[u8]) -> Result<Vec<String>, Error> {
let joined = std::str::from_utf8(buf).map_err(|_| Error::EINVAL)?;
Ok(joined.split_ascii_whitespace().map(String::from).collect())
}
#[cfg(target_os = "linux")]
pub fn get_fds_from<P>(path: &P, payload: &mut [u8]) -> Result<(Vec<RawFd>, usize), Error>
where
P: ?Sized + NixPath + std::fmt::Display,
{
const MAX_FDS: usize = 32;
let listen_fd = socket::socket(
AddressFamily::Unix,
SockType::Stream,
SockFlag::SOCK_NONBLOCK,
None,
)
.unwrap();
let unix_addr = UnixAddr::new(path).unwrap();
// clean up old sock
match nix::unistd::unlink(path) {
Ok(()) => {
debug!("unlink {} done", path);
}
Err(e) => {
// Normal if file does not exist
debug!("unlink {} failed: {}", path, e);
// TODO: warn if exist but not able to unlink
}
};
socket::bind(listen_fd, &unix_addr).unwrap();
/* sock is created before we change user, need to give permission to all */
stat::fchmodat(
None,
path,
stat::Mode::all(),
stat::FchmodatFlags::FollowSymlink,
)
.unwrap();
socket::listen(listen_fd, 8).unwrap();
let fd = match accept_with_retry(listen_fd) {
Ok(fd) => fd,
Err(e) => {
error!("Giving up reading socket from: {path}, error: {e:?}");
//cleanup
if nix::unistd::close(listen_fd).is_ok() {
nix::unistd::unlink(path).unwrap();
}
return Err(e);
}
};
let mut io_vec = [IoSliceMut::new(payload); 1];
let mut cmsg_buf = nix::cmsg_space!([RawFd; MAX_FDS]);
let msg: RecvMsg<UnixAddr> = socket::recvmsg(
fd,
&mut io_vec,
Some(&mut cmsg_buf),
socket::MsgFlags::empty(),
)
.unwrap();
let mut fds: Vec<RawFd> = Vec::new();
for cmsg in msg.cmsgs() {
if let socket::ControlMessageOwned::ScmRights(mut vec_fds) = cmsg {
fds.append(&mut vec_fds)
} else {
warn!("Unexpected control messages: {cmsg:?}")
}
}
//cleanup
if nix::unistd::close(listen_fd).is_ok() {
nix::unistd::unlink(path).unwrap();
}
Ok((fds, msg.bytes))
}
#[cfg(not(target_os = "linux"))]
pub fn get_fds_from<P>(_path: &P, _payload: &mut [u8]) -> Result<(Vec<RawFd>, usize), Error>
where
P: ?Sized + NixPath + std::fmt::Display,
{
log::error!("Upgrade is not currently supported outside of Linux platforms");
Err(Errno::ECONNREFUSED)
}
#[cfg(target_os = "linux")]
const MAX_RETRY: usize = 5;
#[cfg(target_os = "linux")]
const RETRY_INTERVAL: time::Duration = time::Duration::from_secs(1);
#[cfg(target_os = "linux")]
fn accept_with_retry(listen_fd: i32) -> Result<i32, Error> {
let mut retried = 0;
loop {
match socket::accept(listen_fd) {
Ok(fd) => return Ok(fd),
Err(e) => {
if retried > MAX_RETRY {
return Err(e);
}
match e {
Errno::EAGAIN => {
error!(
"No incoming socket transfer, sleep {RETRY_INTERVAL:?} and try again"
);
retried += 1;
thread::sleep(RETRY_INTERVAL);
}
_ => {
error!("Error accepting socket transfer: {e}");
return Err(e);
}
}
}
}
}
}
#[cfg(target_os = "linux")]
pub fn send_fds_to<P>(fds: Vec<RawFd>, payload: &[u8], path: &P) -> Result<usize, Error>
where
P: ?Sized + NixPath + std::fmt::Display,
{
const MAX_NONBLOCKING_POLLS: usize = 20;
const NONBLOCKING_POLL_INTERVAL: time::Duration = time::Duration::from_millis(500);
let send_fd = socket::socket(
AddressFamily::Unix,
SockType::Stream,
SockFlag::SOCK_NONBLOCK,
None,
)?;
let unix_addr = UnixAddr::new(path)?;
let mut retried = 0;
let mut nonblocking_polls = 0;
let conn_result: Result<usize, Error> = loop {
match socket::connect(send_fd, &unix_addr) {
Ok(_) => break Ok(0),
Err(e) => match e {
/* If the new process hasn't created the upgrade sock we'll get an ENOENT.
ECONNREFUSED may happen if the sock wasn't cleaned up
and the old process tries sending before the new one is listening.
EACCES may happen if connect() happen before the correct permission is set */
Errno::ENOENT | Errno::ECONNREFUSED | Errno::EACCES => {
/*the server is not ready yet*/
retried += 1;
if retried > MAX_RETRY {
error!(
"Max retry: {} reached. Giving up sending socket to: {}, error: {:?}",
MAX_RETRY, path, e
);
break Err(e);
}
warn!("server not ready, will try again in {RETRY_INTERVAL:?}");
thread::sleep(RETRY_INTERVAL);
}
/* handle nonblocking IO */
Errno::EINPROGRESS => {
nonblocking_polls += 1;
if nonblocking_polls >= MAX_NONBLOCKING_POLLS {
error!("Connect() not ready after retries when sending socket to: {path}",);
break Err(e);
}
warn!("Connect() not ready, will try again in {NONBLOCKING_POLL_INTERVAL:?}",);
thread::sleep(NONBLOCKING_POLL_INTERVAL);
}
_ => {
error!("Error sending socket to: {path}, error: {e:?}");
break Err(e);
}
},
}
};
let result = match conn_result {
Ok(_) => {
let io_vec = [IoSlice::new(payload); 1];
let scm = socket::ControlMessage::ScmRights(fds.as_slice());
let cmsg = [scm; 1];
loop {
match socket::sendmsg(
send_fd,
&io_vec,
&cmsg,
socket::MsgFlags::empty(),
None::<&UnixAddr>,
) {
Ok(result) => break Ok(result),
Err(e) => match e {
/* handle nonblocking IO */
Errno::EAGAIN => {
nonblocking_polls += 1;
if nonblocking_polls >= MAX_NONBLOCKING_POLLS {
error!(
"Sendmsg() not ready after retries when sending socket to: {}",
path
);
break Err(e);
}
warn!(
"Sendmsg() not ready, will try again in {:?}",
NONBLOCKING_POLL_INTERVAL
);
thread::sleep(NONBLOCKING_POLL_INTERVAL);
}
_ => break Err(e),
},
}
}
}
Err(_) => conn_result,
};
nix::unistd::close(send_fd).unwrap();
result
}
#[cfg(not(target_os = "linux"))]
pub fn send_fds_to<P>(_fds: Vec<RawFd>, _payload: &[u8], _path: &P) -> Result<usize, Error>
where
P: ?Sized + NixPath + std::fmt::Display,
{
Ok(0)
}
#[cfg(test)]
#[cfg(target_os = "linux")]
mod tests {
use super::*;
use log::{debug, error};
fn init_log() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_add_get() {
init_log();
let mut fds = Fds::new();
let key = "1.1.1.1:80".to_string();
fds.add(key.clone(), 128);
assert_eq!(128, *fds.get(&key).unwrap());
}
#[test]
fn test_table_serde() {
init_log();
let mut fds = Fds::new();
let key1 = "1.1.1.1:80".to_string();
fds.add(key1.clone(), 128);
let key2 = "1.1.1.1:443".to_string();
fds.add(key2.clone(), 129);
let (k, v) = fds.serialize();
let mut fds2 = Fds::new();
fds2.deserialize(k, v);
assert_eq!(128, *fds2.get(&key1).unwrap());
assert_eq!(129, *fds2.get(&key2).unwrap());
}
#[test]
fn test_vec_string_serde() {
init_log();
let vec_str: Vec<String> = vec!["aaaa".to_string(), "bbb".to_string()];
let mut ser_buf: [u8; 1024] = [0; 1024];
let size = serialize_vec_string(&vec_str, &mut ser_buf);
let de_vec_string = deserialize_vec_string(&ser_buf[..size]).unwrap();
assert_eq!(de_vec_string.len(), 2);
assert_eq!(de_vec_string[0], "aaaa");
assert_eq!(de_vec_string[1], "bbb");
}
#[test]
fn test_send_receive_fds() {
init_log();
let dumb_fd = socket::socket(
AddressFamily::Unix,
SockType::Stream,
SockFlag::empty(),
None,
)
.unwrap();
// receiver need to start in another thread since it is blocking
let child = thread::spawn(move || {
let mut buf: [u8; 32] = [0; 32];
let (fds, bytes) = get_fds_from("/tmp/pingora_fds_receive.sock", &mut buf).unwrap();
debug!("{:?}", fds);
assert_eq!(1, fds.len());
assert_eq!(32, bytes);
assert_eq!(1, buf[0]);
assert_eq!(1, buf[31]);
});
let fds = vec![dumb_fd];
let buf: [u8; 128] = [1; 128];
match send_fds_to(fds, &buf, "/tmp/pingora_fds_receive.sock") {
Ok(sent) => {
assert!(sent > 0);
}
Err(e) => {
error!("{:?}", e);
panic!()
}
}
child.join().unwrap();
}
#[test]
fn test_serde_via_socket() {
init_log();
let mut fds = Fds::new();
let key1 = "1.1.1.1:80".to_string();
let dumb_fd1 = socket::socket(
AddressFamily::Unix,
SockType::Stream,
SockFlag::empty(),
None,
)
.unwrap();
fds.add(key1.clone(), dumb_fd1);
let key2 = "1.1.1.1:443".to_string();
let dumb_fd2 = socket::socket(
AddressFamily::Unix,
SockType::Stream,
SockFlag::empty(),
None,
)
.unwrap();
fds.add(key2.clone(), dumb_fd2);
let child = thread::spawn(move || {
let mut fds2 = Fds::new();
fds2.get_from_sock("/tmp/pingora_fds_receive2.sock")
.unwrap();
assert!(*fds2.get(&key1).unwrap() > 0);
assert!(*fds2.get(&key2).unwrap() > 0);
});
fds.send_to_sock("/tmp/pingora_fds_receive2.sock").unwrap();
child.join().unwrap();
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/listeners/connection_filter.rs | pingora-core/src/listeners/connection_filter.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Connection filtering trait for early connection filtering
//!
//! This module provides the [`ConnectionFilter`] trait which allows filtering
//! incoming connections at the TCP level, before the TLS handshake occurs.
//!
//! # Feature Flag
//!
//! This functionality requires the `connection_filter` feature to be enabled:
//! ```toml
//! [dependencies]
//! pingora-core = { version = "0.5", features = ["connection_filter"] }
//! ```
//!
//! When the feature is disabled, a no-op implementation is provided for API compatibility.
use async_trait::async_trait;
use std::fmt::Debug;
use std::net::SocketAddr;
/// A trait for filtering incoming connections at the TCP level.
///
/// Implementations of this trait can inspect the peer address of incoming
/// connections and decide whether to accept or reject them before any
/// further processing (including TLS handshake) occurs.
///
/// # Example
///
/// ```rust,no_run
/// use async_trait::async_trait;
/// use pingora_core::listeners::ConnectionFilter;
/// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
///
/// #[derive(Debug)]
/// struct BlocklistFilter {
/// blocked_ips: Vec<IpAddr>,
/// }
///
/// #[async_trait]
/// impl ConnectionFilter for BlocklistFilter {
/// async fn should_accept(&self, addr: &SocketAddr) -> bool {
/// !self.blocked_ips.contains(&addr.ip())
/// }
/// }
/// ```
///
/// # Performance Considerations
///
/// This filter is called for every incoming connection, so implementations
/// should be efficient. Consider caching or pre-computing data structures
/// for IP filtering rather than doing expensive operations per connection.
#[async_trait]
pub trait ConnectionFilter: Debug + Send + Sync {
/// Determines whether an incoming connection should be accepted.
///
/// This method is called after a TCP connection is accepted but before
/// any further processing (including TLS handshake).
///
/// # Arguments
///
/// * `addr` - The socket address of the incoming connection
///
/// # Returns
///
/// * `true` - Accept the connection and continue processing
/// * `false` - Drop the connection immediately
///
/// # Example
///
/// ```rust,no_run
/// async fn should_accept(&self, addr: &SocketAddr) -> bool {
/// // Accept only connections from private IP ranges
/// match addr.ip() {
/// IpAddr::V4(ip) => ip.is_private(),
/// IpAddr::V6(_) => true,
/// }
/// }
///
async fn should_accept(&self, _addr: Option<&SocketAddr>) -> bool {
true
}
}
/// Default implementation that accepts all connections.
///
/// This filter accepts all incoming connections without any filtering.
/// It's used as the default when no custom filter is specified.
#[derive(Debug, Clone)]
pub struct AcceptAllFilter;
#[async_trait]
impl ConnectionFilter for AcceptAllFilter {
// Uses default implementation
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::{IpAddr, Ipv4Addr};
#[derive(Debug, Clone)]
struct BlockListFilter {
blocked_ips: Vec<IpAddr>,
}
#[async_trait]
impl ConnectionFilter for BlockListFilter {
async fn should_accept(&self, addr_opt: Option<&SocketAddr>) -> bool {
addr_opt
.map(|addr| !self.blocked_ips.contains(&addr.ip()))
.unwrap_or(true)
}
}
#[tokio::test]
async fn test_accept_all_filter() {
let filter = AcceptAllFilter;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
assert!(filter.should_accept(Some(&addr)).await);
}
#[tokio::test]
async fn test_blocklist_filter() {
let filter = BlockListFilter {
blocked_ips: vec![IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1))],
};
let blocked_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
let allowed_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)), 8080);
assert!(!filter.should_accept(Some(&blocked_addr)).await);
assert!(filter.should_accept(Some(&allowed_addr)).await);
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/listeners/l4.rs | pingora-core/src/listeners/l4.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "connection_filter")]
use log::debug;
use log::warn;
use pingora_error::{
ErrorType::{AcceptError, BindError},
OrErr, Result,
};
use std::io::ErrorKind;
use std::net::{SocketAddr, ToSocketAddrs};
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd};
#[cfg(unix)]
use std::os::unix::net::UnixListener as StdUnixListener;
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket};
use std::time::Duration;
use std::{fs::Permissions, sync::Arc};
use tokio::net::TcpSocket;
#[cfg(feature = "connection_filter")]
use super::connection_filter::ConnectionFilter;
#[cfg(feature = "connection_filter")]
use crate::listeners::AcceptAllFilter;
use crate::protocols::l4::ext::{set_dscp, set_tcp_fastopen_backlog};
use crate::protocols::l4::listener::Listener;
pub use crate::protocols::l4::stream::Stream;
#[cfg(feature = "connection_filter")]
use crate::protocols::GetSocketDigest;
use crate::protocols::TcpKeepalive;
#[cfg(unix)]
use crate::server::ListenFds;
const TCP_LISTENER_MAX_TRY: usize = 30;
const TCP_LISTENER_TRY_STEP: Duration = Duration::from_secs(1);
// TODO: configurable backlog
const LISTENER_BACKLOG: u32 = 65535;
/// Address for listening server, either TCP/UDS socket.
#[derive(Clone, Debug)]
pub enum ServerAddress {
Tcp(String, Option<TcpSocketOptions>),
#[cfg(unix)]
Uds(String, Option<Permissions>),
}
impl AsRef<str> for ServerAddress {
fn as_ref(&self) -> &str {
match &self {
Self::Tcp(l, _) => l,
#[cfg(unix)]
Self::Uds(l, _) => l,
}
}
}
impl ServerAddress {
fn tcp_sock_opts(&self) -> Option<&TcpSocketOptions> {
match &self {
Self::Tcp(_, op) => op.into(),
_ => None,
}
}
}
/// TCP socket configuration options, this is used for setting options on
/// listening sockets and accepted connections.
#[non_exhaustive]
#[derive(Clone, Debug, Default)]
pub struct TcpSocketOptions {
/// IPV6_V6ONLY flag (if true, limit socket to IPv6 communication only).
/// This is mostly useful when binding to `[::]`, which on most Unix distributions
/// will bind to both IPv4 and IPv6 addresses by default.
pub ipv6_only: Option<bool>,
/// Enable TCP fast open and set the backlog size of it.
/// See the [man page](https://man7.org/linux/man-pages/man7/tcp.7.html) for more information.
pub tcp_fastopen: Option<usize>,
/// Enable TCP keepalive on accepted connections.
/// See the [man page](https://man7.org/linux/man-pages/man7/tcp.7.html) for more information.
pub tcp_keepalive: Option<TcpKeepalive>,
/// Specifies the server should set the following DSCP value on outgoing connections.
/// See the [RFC](https://datatracker.ietf.org/doc/html/rfc2474) for more details.
pub dscp: Option<u8>,
/// Enable SO_REUSEPORT to allow multiple sockets to bind to the same address and port.
/// This is useful for load balancing across multiple worker processes.
/// See the [man page](https://man7.org/linux/man-pages/man7/socket.7.html) for more information.
pub so_reuseport: Option<bool>,
// TODO: allow configuring reuseaddr, backlog, etc. from here?
}
#[cfg(unix)]
mod uds {
use super::{OrErr, Result};
use crate::protocols::l4::listener::Listener;
use log::{debug, error};
use pingora_error::ErrorType::BindError;
use std::fs::{self, Permissions};
use std::io::ErrorKind;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::net::UnixListener as StdUnixListener;
use tokio::net::UnixListener;
use super::LISTENER_BACKLOG;
pub(super) fn set_perms(path: &str, perms: Option<Permissions>) -> Result<()> {
// set read/write permissions for all users on the socket by default
let perms = perms.unwrap_or(Permissions::from_mode(0o666));
fs::set_permissions(path, perms).or_err_with(BindError, || {
format!("Fail to bind to {path}, could not set permissions")
})
}
pub(super) fn set_backlog(l: StdUnixListener, backlog: u32) -> Result<UnixListener> {
let socket: socket2::Socket = l.into();
// Note that we call listen on an already listening socket
// POSIX undefined but on Linux it will update the backlog size
socket
.listen(backlog as i32)
.or_err_with(BindError, || format!("listen() failed on {socket:?}"))?;
UnixListener::from_std(socket.into()).or_err(BindError, "Failed to convert to tokio socket")
}
pub(super) fn bind(addr: &str, perms: Option<Permissions>) -> Result<Listener> {
/*
We remove the filename/address in case there is a dangling reference.
"Binding to a socket with a filename creates a socket in the
filesystem that must be deleted by the caller when it is no
longer needed (using unlink(2))"
*/
match std::fs::remove_file(addr) {
Ok(()) => {
debug!("unlink {addr} done");
}
Err(e) => match e.kind() {
ErrorKind::NotFound => debug!("unlink {addr} not found: {e}"),
_ => error!("unlink {addr} failed: {e}"),
},
}
let listener_socket = UnixListener::bind(addr)
.or_err_with(BindError, || format!("Bind() failed on {addr}"))?;
set_perms(addr, perms)?;
let std_listener = listener_socket.into_std().unwrap();
Ok(set_backlog(std_listener, LISTENER_BACKLOG)?.into())
}
}
// currently, these options can only apply on sockets prior to calling bind()
fn apply_tcp_socket_options(sock: &TcpSocket, opt: Option<&TcpSocketOptions>) -> Result<()> {
let Some(opt) = opt else {
return Ok(());
};
let socket_ref = socket2::SockRef::from(sock);
if let Some(ipv6_only) = opt.ipv6_only {
socket_ref
.set_only_v6(ipv6_only)
.or_err(BindError, "failed to set IPV6_V6ONLY")?;
}
#[cfg(unix)]
if let Some(reuseport) = opt.so_reuseport {
socket_ref
.set_reuse_port(reuseport)
.or_err(BindError, "failed to set SO_REUSEPORT")?;
}
#[cfg(unix)]
let raw = sock.as_raw_fd();
#[cfg(windows)]
let raw = sock.as_raw_socket();
if let Some(backlog) = opt.tcp_fastopen {
set_tcp_fastopen_backlog(raw, backlog)?;
}
if let Some(dscp) = opt.dscp {
set_dscp(raw, dscp)?;
}
Ok(())
}
fn from_raw_fd(address: &ServerAddress, fd: i32) -> Result<Listener> {
match address {
#[cfg(unix)]
ServerAddress::Uds(addr, perm) => {
let std_listener = unsafe { StdUnixListener::from_raw_fd(fd) };
// set permissions just in case
uds::set_perms(addr, perm.clone())?;
Ok(uds::set_backlog(std_listener, LISTENER_BACKLOG)?.into())
}
ServerAddress::Tcp(_, _) => {
#[cfg(unix)]
let std_listener_socket = unsafe { std::net::TcpStream::from_raw_fd(fd) };
#[cfg(windows)]
let std_listener_socket = unsafe { std::net::TcpStream::from_raw_socket(fd as u64) };
let listener_socket = TcpSocket::from_std_stream(std_listener_socket);
// Note that we call listen on an already listening socket
// POSIX undefined but on Linux it will update the backlog size
Ok(listener_socket
.listen(LISTENER_BACKLOG)
.or_err_with(BindError, || format!("Listen() failed on {address:?}"))?
.into())
}
}
}
async fn bind_tcp(addr: &str, opt: Option<TcpSocketOptions>) -> Result<Listener> {
let mut try_count = 0;
loop {
let sock_addr = addr
.to_socket_addrs() // NOTE: this could invoke a blocking network lookup
.or_err_with(BindError, || format!("Invalid listen address {addr}"))?
.next() // take the first one for now
.unwrap(); // assume there is always at least one
let listener_socket = match sock_addr {
SocketAddr::V4(_) => TcpSocket::new_v4(),
SocketAddr::V6(_) => TcpSocket::new_v6(),
}
.or_err_with(BindError, || format!("fail to create address {sock_addr}"))?;
// NOTE: this is to preserve the current TcpListener::bind() behavior.
// We have a few tests relying on this behavior to allow multiple identical
// test servers to coexist.
listener_socket
.set_reuseaddr(true)
.or_err(BindError, "fail to set_reuseaddr(true)")?;
apply_tcp_socket_options(&listener_socket, opt.as_ref())?;
match listener_socket.bind(sock_addr) {
Ok(()) => {
break Ok(listener_socket
.listen(LISTENER_BACKLOG)
.or_err(BindError, "bind() failed")?
.into())
}
Err(e) => {
if e.kind() != ErrorKind::AddrInUse {
break Err(e).or_err_with(BindError, || format!("bind() failed on {addr}"));
}
try_count += 1;
if try_count >= TCP_LISTENER_MAX_TRY {
break Err(e).or_err_with(BindError, || {
format!("bind() failed, after retries, {addr} still in use")
});
}
warn!("{addr} is in use, will try again");
tokio::time::sleep(TCP_LISTENER_TRY_STEP).await;
}
}
}
}
async fn bind(addr: &ServerAddress) -> Result<Listener> {
match addr {
#[cfg(unix)]
ServerAddress::Uds(l, perm) => uds::bind(l, perm.clone()),
ServerAddress::Tcp(l, opt) => bind_tcp(l, opt.clone()).await,
}
}
#[derive(Clone, Debug)]
pub struct ListenerEndpoint {
listen_addr: ServerAddress,
listener: Arc<Listener>,
#[cfg(feature = "connection_filter")]
connection_filter: Arc<dyn ConnectionFilter>,
}
#[derive(Default)]
pub struct ListenerEndpointBuilder {
listen_addr: Option<ServerAddress>,
#[cfg(feature = "connection_filter")]
connection_filter: Option<Arc<dyn ConnectionFilter>>,
}
impl ListenerEndpointBuilder {
pub fn new() -> ListenerEndpointBuilder {
Self {
listen_addr: None,
#[cfg(feature = "connection_filter")]
connection_filter: None,
}
}
pub fn listen_addr(&mut self, addr: ServerAddress) -> &mut Self {
self.listen_addr = Some(addr);
self
}
#[cfg(feature = "connection_filter")]
pub fn connection_filter(&mut self, filter: Arc<dyn ConnectionFilter>) -> &mut Self {
self.connection_filter = Some(filter);
self
}
#[cfg(unix)]
pub async fn listen(self, fds: Option<ListenFds>) -> Result<ListenerEndpoint> {
let listen_addr = self
.listen_addr
.expect("Tried to listen with no addr specified");
let listener = if let Some(fds_table) = fds {
let addr_str = listen_addr.as_ref();
// consider make this mutex std::sync::Mutex or OnceCell
let mut table = fds_table.lock().await;
if let Some(fd) = table.get(addr_str) {
from_raw_fd(&listen_addr, *fd)?
} else {
// not found
let listener = bind(&listen_addr).await?;
table.add(addr_str.to_string(), listener.as_raw_fd());
listener
}
} else {
// not found, no fd table
bind(&listen_addr).await?
};
#[cfg(feature = "connection_filter")]
let connection_filter = self
.connection_filter
.unwrap_or_else(|| Arc::new(AcceptAllFilter));
Ok(ListenerEndpoint {
listen_addr,
listener: Arc::new(listener),
#[cfg(feature = "connection_filter")]
connection_filter,
})
}
#[cfg(windows)]
pub async fn listen(self) -> Result<ListenerEndpoint> {
let listen_addr = self
.listen_addr
.expect("Tried to listen with no addr specified");
let listener = bind(&listen_addr).await?;
#[cfg(feature = "connection_filter")]
let connection_filter = self
.connection_filter
.unwrap_or_else(|| Arc::new(AcceptAllFilter));
Ok(ListenerEndpoint {
listen_addr,
listener: Arc::new(listener),
#[cfg(feature = "connection_filter")]
connection_filter,
})
}
}
impl ListenerEndpoint {
pub fn builder() -> ListenerEndpointBuilder {
ListenerEndpointBuilder::new()
}
pub fn as_str(&self) -> &str {
self.listen_addr.as_ref()
}
fn apply_stream_settings(&self, stream: &mut Stream) -> Result<()> {
// settings are applied based on whether the underlying stream supports it
stream.set_nodelay()?;
let Some(op) = self.listen_addr.tcp_sock_opts() else {
return Ok(());
};
if let Some(ka) = op.tcp_keepalive.as_ref() {
stream.set_keepalive(ka)?;
}
if let Some(dscp) = op.dscp {
#[cfg(unix)]
set_dscp(stream.as_raw_fd(), dscp)?;
#[cfg(windows)]
set_dscp(stream.as_raw_socket(), dscp)?;
}
Ok(())
}
pub async fn accept(&self) -> Result<Stream> {
#[cfg(feature = "connection_filter")]
{
loop {
let mut stream = self
.listener
.accept()
.await
.or_err(AcceptError, "Fail to accept()")?;
// Performance: nested if-let avoids cloning/allocations on each connection accept
let should_accept = if let Some(digest) = stream.get_socket_digest() {
if let Some(peer_addr) = digest.peer_addr() {
self.connection_filter
.should_accept(peer_addr.as_inet())
.await
} else {
// No peer address available - accept by default
true
}
} else {
// No socket digest available - accept by default
true
};
if !should_accept {
debug!("Connection rejected by filter");
drop(stream);
continue;
}
self.apply_stream_settings(&mut stream)?;
return Ok(stream);
}
}
#[cfg(not(feature = "connection_filter"))]
{
let mut stream = self
.listener
.accept()
.await
.or_err(AcceptError, "Fail to accept()")?;
self.apply_stream_settings(&mut stream)?;
Ok(stream)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn test_listen_tcp() {
let addr = "127.0.0.1:7100";
let mut builder = ListenerEndpoint::builder();
builder.listen_addr(ServerAddress::Tcp(addr.into(), None));
#[cfg(unix)]
let listener = builder.listen(None).await.unwrap();
#[cfg(windows)]
let listener = builder.listen().await.unwrap();
tokio::spawn(async move {
// just try to accept once
listener.accept().await.unwrap();
});
tokio::net::TcpStream::connect(addr)
.await
.expect("can connect to TCP listener");
}
#[tokio::test]
async fn test_listen_tcp_ipv6_only() {
let sock_opt = Some(TcpSocketOptions {
ipv6_only: Some(true),
..Default::default()
});
let mut builder = ListenerEndpoint::builder();
builder.listen_addr(ServerAddress::Tcp("[::]:7101".into(), sock_opt));
#[cfg(unix)]
let listener = builder.listen(None).await.unwrap();
#[cfg(windows)]
let listener = builder.listen().await.unwrap();
tokio::spawn(async move {
// just try to accept twice
listener.accept().await.unwrap();
listener.accept().await.unwrap();
});
tokio::net::TcpStream::connect("127.0.0.1:7101")
.await
.expect_err("cannot connect to v4 addr");
tokio::net::TcpStream::connect("[::1]:7101")
.await
.expect("can connect to v6 addr");
}
#[cfg(unix)]
#[tokio::test]
async fn test_listen_uds() {
let addr = "/tmp/test_listen_uds";
let mut builder = ListenerEndpoint::builder();
builder.listen_addr(ServerAddress::Uds(addr.into(), None));
let listener = builder.listen(None).await.unwrap();
tokio::spawn(async move {
// just try to accept once
listener.accept().await.unwrap();
});
tokio::net::UnixStream::connect(addr)
.await
.expect("can connect to UDS listener");
}
#[cfg(unix)]
#[tokio::test]
async fn test_tcp_so_reuseport() {
let addr = "127.0.0.1:7201";
let sock_opt = TcpSocketOptions {
so_reuseport: Some(true),
..Default::default()
};
// Create first listener with SO_REUSEPORT
let mut builder1 = ListenerEndpoint::builder();
builder1.listen_addr(ServerAddress::Tcp(addr.into(), Some(sock_opt.clone())));
let listener1 = builder1.listen(None).await.unwrap();
// Create second listener with the same address and SO_REUSEPORT
// This should succeed because SO_REUSEPORT is enabled
let mut builder2 = ListenerEndpoint::builder();
builder2.listen_addr(ServerAddress::Tcp(addr.into(), Some(sock_opt)));
let listener2 = builder2.listen(None).await.unwrap();
// Both listeners should be able to bind to the same address
assert_eq!(listener1.as_str(), addr);
assert_eq!(listener2.as_str(), addr);
}
#[tokio::test]
async fn test_tcp_so_reuseport_false() {
let addr = "127.0.0.1:7202";
let sock_opt_no_reuseport = TcpSocketOptions {
so_reuseport: Some(false), // Explicitly disable SO_REUSEPORT
..Default::default()
};
// Create first listener without SO_REUSEPORT
let mut builder1 = ListenerEndpoint::builder();
builder1.listen_addr(ServerAddress::Tcp(
addr.into(),
Some(sock_opt_no_reuseport.clone()),
));
let listener1 = builder1.listen(None).await.unwrap();
// Try to create second listener with the same address and no SO_REUSEPORT
// This should fail with "address already in use"
let mut builder2 = ListenerEndpoint::builder();
builder2.listen_addr(ServerAddress::Tcp(addr.into(), Some(sock_opt_no_reuseport)));
let result = builder2.listen(None).await;
// The second bind should fail
assert!(result.is_err());
let error_msg = format!("{:?}", result.unwrap_err());
assert!(
error_msg.contains("address")
|| error_msg.contains("in use")
|| error_msg.contains("bind")
);
// Verify the first listener still works
assert_eq!(listener1.as_str(), addr);
}
#[cfg(feature = "connection_filter")]
#[tokio::test]
async fn test_connection_filter_accept() {
use crate::listeners::ConnectionFilter;
use async_trait::async_trait;
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Debug)]
struct CountingFilter {
accept_count: Arc<AtomicUsize>,
reject_count: Arc<AtomicUsize>,
}
#[async_trait]
impl ConnectionFilter for CountingFilter {
async fn should_accept(&self, _addr: Option<&SocketAddr>) -> bool {
let count = self.accept_count.fetch_add(1, Ordering::SeqCst);
if count % 2 == 0 {
true
} else {
self.reject_count.fetch_add(1, Ordering::SeqCst);
false
}
}
}
let addr = "127.0.0.1:7300";
let accept_count = Arc::new(AtomicUsize::new(0));
let reject_count = Arc::new(AtomicUsize::new(0));
let filter = Arc::new(CountingFilter {
accept_count: accept_count.clone(),
reject_count: reject_count.clone(),
});
let mut builder = ListenerEndpoint::builder();
builder
.listen_addr(ServerAddress::Tcp(addr.into(), None))
.connection_filter(filter);
#[cfg(unix)]
let listener = builder.listen(None).await.unwrap();
#[cfg(windows)]
let listener = builder.listen().await.unwrap();
let listener_clone = listener.clone();
tokio::spawn(async move {
let _stream1 = listener_clone.accept().await.unwrap();
let _stream2 = listener_clone.accept().await.unwrap();
});
tokio::time::sleep(Duration::from_millis(10)).await;
let _conn1 = tokio::net::TcpStream::connect(addr).await.unwrap();
let _conn2 = tokio::net::TcpStream::connect(addr).await.unwrap();
let _conn3 = tokio::net::TcpStream::connect(addr).await.unwrap();
tokio::time::sleep(Duration::from_millis(50)).await;
assert_eq!(accept_count.load(Ordering::SeqCst), 3);
assert_eq!(reject_count.load(Ordering::SeqCst), 1);
}
#[cfg(feature = "connection_filter")]
#[tokio::test]
async fn test_connection_filter_blocks_all() {
use crate::listeners::ConnectionFilter;
use async_trait::async_trait;
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Debug)]
struct RejectAllFilter {
reject_count: Arc<AtomicUsize>,
}
#[async_trait]
impl ConnectionFilter for RejectAllFilter {
async fn should_accept(&self, _addr: Option<&SocketAddr>) -> bool {
self.reject_count.fetch_add(1, Ordering::SeqCst);
false
}
}
let addr = "127.0.0.1:7301";
let reject_count = Arc::new(AtomicUsize::new(0));
let mut builder = ListenerEndpoint::builder();
builder
.listen_addr(ServerAddress::Tcp(addr.into(), None))
.connection_filter(Arc::new(RejectAllFilter {
reject_count: reject_count.clone(),
}));
#[cfg(unix)]
let listener = builder.listen(None).await.unwrap();
#[cfg(windows)]
let listener = builder.listen().await.unwrap();
let listener_clone = listener.clone();
let _accept_handle = tokio::spawn(async move {
// This will never return since all connections are rejected
let _ = listener_clone.accept().await;
});
tokio::time::sleep(Duration::from_millis(50)).await;
let mut handles = vec![];
for _ in 0..3 {
let handle = tokio::spawn(async move {
if let Ok(stream) = tokio::net::TcpStream::connect(addr).await {
drop(stream);
}
});
handles.push(handle);
}
for handle in handles {
let _ = handle.await;
}
// Wait for rejections to be counted with timeout
let start = tokio::time::Instant::now();
let timeout = Duration::from_secs(2);
loop {
let rejected = reject_count.load(Ordering::SeqCst);
if rejected >= 3 {
assert_eq!(rejected, 3, "Should reject exactly 3 connections");
break;
}
if start.elapsed() > timeout {
panic!(
"Timeout waiting for rejections, got {} expected 3",
rejected
);
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/listeners/mod.rs | pingora-core/src/listeners/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The listening endpoints (TCP and TLS) and their configurations.
//!
//! This module provides the infrastructure for setting up network listeners
//! that accept incoming connections. It supports TCP, Unix domain sockets,
//! and TLS endpoints.
//!
//! # Connection Filtering
//!
//! With the `connection_filter` feature enabled, this module also provides
//! early connection filtering capabilities through the [`ConnectionFilter`] trait.
//! This allows dropping unwanted connections at the TCP level before any
//! expensive operations like TLS handshakes.
//!
//! ## Example with Connection Filtering
//!
//! ```rust,no_run
//! # #[cfg(feature = "connection_filter")]
//! # {
//! use pingora_core::listeners::{Listeners, ConnectionFilter};
//! use std::sync::Arc;
//!
//! // Create a custom filter
//! let filter = Arc::new(MyCustomFilter::new());
//!
//! // Apply to listeners
//! let mut listeners = Listeners::new();
//! listeners.set_connection_filter(filter);
//! listeners.add_tcp("0.0.0.0:8080");
//! # }
//! ```
mod l4;
#[cfg(feature = "connection_filter")]
pub mod connection_filter;
#[cfg(feature = "connection_filter")]
pub use connection_filter::{AcceptAllFilter, ConnectionFilter};
#[cfg(not(feature = "connection_filter"))]
#[derive(Debug, Clone)]
pub struct AcceptAllFilter;
#[cfg(not(feature = "connection_filter"))]
pub trait ConnectionFilter: std::fmt::Debug + Send + Sync {
fn should_accept(&self, _addr: &std::net::SocketAddr) -> bool {
true
}
}
#[cfg(not(feature = "connection_filter"))]
impl ConnectionFilter for AcceptAllFilter {
fn should_accept(&self, _addr: &std::net::SocketAddr) -> bool {
true
}
}
#[cfg(feature = "any_tls")]
pub mod tls;
#[cfg(not(feature = "any_tls"))]
pub use crate::tls::listeners as tls;
use crate::protocols::{l4::socket::SocketAddr, tls::TlsRef, Stream};
#[cfg(unix)]
use crate::server::ListenFds;
use async_trait::async_trait;
use pingora_error::Result;
use std::{any::Any, fs::Permissions, sync::Arc};
use l4::{ListenerEndpoint, Stream as L4Stream};
use tls::{Acceptor, TlsSettings};
pub use crate::protocols::tls::ALPN;
use crate::protocols::GetSocketDigest;
pub use l4::{ServerAddress, TcpSocketOptions};
/// The APIs to customize things like certificate during TLS server side handshake
#[async_trait]
pub trait TlsAccept {
// TODO: return error?
/// This function is called in the middle of a TLS handshake. Structs who
/// implement this function should provide tls certificate and key to the
/// [TlsRef] via `ssl_use_certificate` and `ssl_use_private_key`.
/// Note. This is only supported for openssl and boringssl
async fn certificate_callback(&self, _ssl: &mut TlsRef) -> () {
// does nothing by default
}
/// This function is called after the TLS handshake is complete.
///
/// Any value returned from this function (other than `None`) will be stored in the
/// `extension` field of `SslDigest`. This allows you to attach custom application-specific
/// data to the TLS connection, which will be accessible from the HTTP layer via the
/// `SslDigest` attached to the session digest.
async fn handshake_complete_callback(
&self,
_ssl: &TlsRef,
) -> Option<Arc<dyn Any + Send + Sync>> {
None
}
}
pub type TlsAcceptCallbacks = Box<dyn TlsAccept + Send + Sync>;
struct TransportStackBuilder {
l4: ServerAddress,
tls: Option<TlsSettings>,
#[cfg(feature = "connection_filter")]
connection_filter: Option<Arc<dyn ConnectionFilter>>,
}
impl TransportStackBuilder {
pub async fn build(
&mut self,
#[cfg(unix)] upgrade_listeners: Option<ListenFds>,
) -> Result<TransportStack> {
let mut builder = ListenerEndpoint::builder();
builder.listen_addr(self.l4.clone());
#[cfg(feature = "connection_filter")]
if let Some(filter) = &self.connection_filter {
builder.connection_filter(filter.clone());
}
#[cfg(unix)]
let l4 = builder.listen(upgrade_listeners).await?;
#[cfg(windows)]
let l4 = builder.listen().await?;
Ok(TransportStack {
l4,
tls: self.tls.take().map(|tls| Arc::new(tls.build())),
})
}
}
#[derive(Clone)]
pub(crate) struct TransportStack {
l4: ListenerEndpoint,
tls: Option<Arc<Acceptor>>,
}
impl TransportStack {
pub fn as_str(&self) -> &str {
self.l4.as_str()
}
pub async fn accept(&self) -> Result<UninitializedStream> {
let stream = self.l4.accept().await?;
Ok(UninitializedStream {
l4: stream,
tls: self.tls.clone(),
})
}
pub fn cleanup(&mut self) {
// placeholder
}
}
pub(crate) struct UninitializedStream {
l4: L4Stream,
tls: Option<Arc<Acceptor>>,
}
impl UninitializedStream {
pub async fn handshake(mut self) -> Result<Stream> {
self.l4.set_buffer();
if let Some(tls) = self.tls {
let tls_stream = tls.tls_handshake(self.l4).await?;
Ok(Box::new(tls_stream))
} else {
Ok(Box::new(self.l4))
}
}
/// Get the peer address of the connection if available
pub fn peer_addr(&self) -> Option<SocketAddr> {
self.l4
.get_socket_digest()
.and_then(|d| d.peer_addr().cloned())
}
}
/// The struct to hold one more multiple listening endpoints
pub struct Listeners {
stacks: Vec<TransportStackBuilder>,
#[cfg(feature = "connection_filter")]
connection_filter: Option<Arc<dyn ConnectionFilter>>,
}
impl Listeners {
/// Create a new [`Listeners`] with no listening endpoints.
pub fn new() -> Self {
Listeners {
stacks: vec![],
#[cfg(feature = "connection_filter")]
connection_filter: None,
}
}
/// Create a new [`Listeners`] with a TCP server endpoint from the given string.
pub fn tcp(addr: &str) -> Self {
let mut listeners = Self::new();
listeners.add_tcp(addr);
listeners
}
/// Create a new [`Listeners`] with a Unix domain socket endpoint from the given string.
#[cfg(unix)]
pub fn uds(addr: &str, perm: Option<Permissions>) -> Self {
let mut listeners = Self::new();
listeners.add_uds(addr, perm);
listeners
}
/// Create a new [`Listeners`] with a TLS (TCP) endpoint with the given address string,
/// and path to the certificate/private key pairs.
/// This endpoint will adopt the [Mozilla Intermediate](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29)
/// server side TLS settings.
pub fn tls(addr: &str, cert_path: &str, key_path: &str) -> Result<Self> {
let mut listeners = Self::new();
listeners.add_tls(addr, cert_path, key_path)?;
Ok(listeners)
}
/// Add a TCP endpoint to `self`.
pub fn add_tcp(&mut self, addr: &str) {
self.add_address(ServerAddress::Tcp(addr.into(), None));
}
/// Add a TCP endpoint to `self`, with the given [`TcpSocketOptions`].
pub fn add_tcp_with_settings(&mut self, addr: &str, sock_opt: TcpSocketOptions) {
self.add_address(ServerAddress::Tcp(addr.into(), Some(sock_opt)));
}
/// Add a Unix domain socket endpoint to `self`.
#[cfg(unix)]
pub fn add_uds(&mut self, addr: &str, perm: Option<Permissions>) {
self.add_address(ServerAddress::Uds(addr.into(), perm));
}
/// Add a TLS endpoint to `self` with the [Mozilla Intermediate](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29)
/// server side TLS settings.
pub fn add_tls(&mut self, addr: &str, cert_path: &str, key_path: &str) -> Result<()> {
self.add_tls_with_settings(addr, None, TlsSettings::intermediate(cert_path, key_path)?);
Ok(())
}
/// Add a TLS endpoint to `self` with the given socket and server side TLS settings.
/// See [`TlsSettings`] and [`TcpSocketOptions`] for more details.
pub fn add_tls_with_settings(
&mut self,
addr: &str,
sock_opt: Option<TcpSocketOptions>,
settings: TlsSettings,
) {
self.add_endpoint(ServerAddress::Tcp(addr.into(), sock_opt), Some(settings));
}
/// Add the given [`ServerAddress`] to `self`.
pub fn add_address(&mut self, addr: ServerAddress) {
self.add_endpoint(addr, None);
}
/// Set a connection filter for all endpoints in this listener collection
#[cfg(feature = "connection_filter")]
pub fn set_connection_filter(&mut self, filter: Arc<dyn ConnectionFilter>) {
log::debug!("Setting connection filter on Listeners");
// Store the filter for future endpoints
self.connection_filter = Some(filter.clone());
// Apply to existing stacks
for stack in &mut self.stacks {
stack.connection_filter = Some(filter.clone());
}
}
/// Add the given [`ServerAddress`] to `self` with the given [`TlsSettings`] if provided
pub fn add_endpoint(&mut self, l4: ServerAddress, tls: Option<TlsSettings>) {
self.stacks.push(TransportStackBuilder {
l4,
tls,
#[cfg(feature = "connection_filter")]
connection_filter: self.connection_filter.clone(),
})
}
pub(crate) async fn build(
&mut self,
#[cfg(unix)] upgrade_listeners: Option<ListenFds>,
) -> Result<Vec<TransportStack>> {
let mut stacks = Vec::with_capacity(self.stacks.len());
for b in self.stacks.iter_mut() {
let new_stack = b
.build(
#[cfg(unix)]
upgrade_listeners.clone(),
)
.await?;
stacks.push(new_stack);
}
Ok(stacks)
}
pub(crate) fn cleanup(&self) {
// placeholder
}
}
#[cfg(test)]
mod test {
use super::*;
#[cfg(feature = "connection_filter")]
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(feature = "any_tls")]
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::time::{sleep, Duration};
#[tokio::test]
async fn test_listen_tcp() {
let addr1 = "127.0.0.1:7101";
let addr2 = "127.0.0.1:7102";
let mut listeners = Listeners::tcp(addr1);
listeners.add_tcp(addr2);
let listeners = listeners
.build(
#[cfg(unix)]
None,
)
.await
.unwrap();
assert_eq!(listeners.len(), 2);
for listener in listeners {
tokio::spawn(async move {
// just try to accept once
let stream = listener.accept().await.unwrap();
stream.handshake().await.unwrap();
});
}
// make sure the above starts before the lines below
sleep(Duration::from_millis(10)).await;
TcpStream::connect(addr1).await.unwrap();
TcpStream::connect(addr2).await.unwrap();
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_listen_tls() {
use tokio::io::AsyncReadExt;
let addr = "127.0.0.1:7103";
let cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key_path = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let mut listeners = Listeners::tls(addr, &cert_path, &key_path).unwrap();
let listener = listeners
.build(
#[cfg(unix)]
None,
)
.await
.unwrap()
.pop()
.unwrap();
tokio::spawn(async move {
// just try to accept once
let stream = listener.accept().await.unwrap();
let mut stream = stream.handshake().await.unwrap();
let mut buf = [0; 1024];
let _ = stream.read(&mut buf).await.unwrap();
stream
.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\na")
.await
.unwrap();
});
// make sure the above starts before the lines below
sleep(Duration::from_millis(10)).await;
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let res = client.get(format!("https://{addr}")).send().await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
}
#[cfg(feature = "connection_filter")]
#[test]
fn test_connection_filter_inheritance() {
#[derive(Debug, Clone)]
struct TestFilter {
counter: Arc<AtomicUsize>,
}
#[async_trait]
impl ConnectionFilter for TestFilter {
async fn should_accept(&self, _addr: Option<&std::net::SocketAddr>) -> bool {
self.counter.fetch_add(1, Ordering::SeqCst);
true
}
}
let mut listeners = Listeners::new();
// Add an endpoint before setting filter
listeners.add_tcp("127.0.0.1:7104");
// Set the connection filter
let filter = Arc::new(TestFilter {
counter: Arc::new(AtomicUsize::new(0)),
});
listeners.set_connection_filter(filter.clone());
// Add endpoints after setting filter
listeners.add_tcp("127.0.0.1:7105");
#[cfg(feature = "any_tls")]
{
// Only test TLS if the feature is enabled
if let Ok(tls_settings) = TlsSettings::intermediate(
&format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR")),
&format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR")),
) {
listeners.add_tls_with_settings("127.0.0.1:7106", None, tls_settings);
}
}
// Verify all stacks have the filter (only when feature is enabled)
for stack in &listeners.stacks {
assert!(
stack.connection_filter.is_some(),
"All stacks should have the connection filter set"
);
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/listeners/tls/mod.rs | pingora-core/src/listeners/tls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "openssl_derived")]
mod boringssl_openssl;
#[cfg(feature = "openssl_derived")]
pub use boringssl_openssl::*;
#[cfg(feature = "rustls")]
mod rustls;
#[cfg(feature = "rustls")]
pub use rustls::*;
#[cfg(feature = "s2n")]
mod s2n;
#[cfg(feature = "s2n")]
pub use s2n::*;
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/listeners/tls/boringssl_openssl/mod.rs | pingora-core/src/listeners/tls/boringssl_openssl/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use log::debug;
use pingora_error::{ErrorType, OrErr, Result};
use std::ops::{Deref, DerefMut};
use crate::listeners::tls::boringssl_openssl::alpn::valid_alpn;
pub use crate::protocols::tls::ALPN;
use crate::protocols::IO;
use crate::tls::ssl::AlpnError;
use crate::tls::ssl::{SslAcceptor, SslAcceptorBuilder, SslFiletype, SslMethod};
use crate::{
listeners::TlsAcceptCallbacks,
protocols::tls::{
server::{handshake, handshake_with_callback},
SslStream,
},
};
pub const TLS_CONF_ERR: ErrorType = ErrorType::Custom("TLSConfigError");
pub(crate) struct Acceptor {
ssl_acceptor: SslAcceptor,
callbacks: Option<TlsAcceptCallbacks>,
}
/// The TLS settings of a listening endpoint
pub struct TlsSettings {
accept_builder: SslAcceptorBuilder,
callbacks: Option<TlsAcceptCallbacks>,
}
impl From<SslAcceptorBuilder> for TlsSettings {
fn from(settings: SslAcceptorBuilder) -> Self {
TlsSettings {
accept_builder: settings,
callbacks: None,
}
}
}
impl Deref for TlsSettings {
type Target = SslAcceptorBuilder;
fn deref(&self) -> &Self::Target {
&self.accept_builder
}
}
impl DerefMut for TlsSettings {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.accept_builder
}
}
impl TlsSettings {
/// Create a new [`TlsSettings`] with the [Mozilla Intermediate](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29)
/// server side TLS settings. Users can adjust the TLS settings after this object is created.
/// Return error if the provided certificate and private key are invalid or not found.
pub fn intermediate(cert_path: &str, key_path: &str) -> Result<Self> {
let mut accept_builder = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).or_err(
TLS_CONF_ERR,
"fail to create mozilla_intermediate_v5 Acceptor",
)?;
accept_builder
.set_private_key_file(key_path, SslFiletype::PEM)
.or_err_with(TLS_CONF_ERR, || format!("fail to read key file {key_path}"))?;
accept_builder
.set_certificate_chain_file(cert_path)
.or_err_with(TLS_CONF_ERR, || {
format!("fail to read cert file {cert_path}")
})?;
Ok(TlsSettings {
accept_builder,
callbacks: None,
})
}
/// Create a new [`TlsSettings`] similar to [TlsSettings::intermediate()]. A struct that implements [TlsAcceptCallbacks]
/// is needed to provide the certificate during the TLS handshake.
pub fn with_callbacks(callbacks: TlsAcceptCallbacks) -> Result<Self> {
let accept_builder = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).or_err(
TLS_CONF_ERR,
"fail to create mozilla_intermediate_v5 Acceptor",
)?;
Ok(TlsSettings {
accept_builder,
callbacks: Some(callbacks),
})
}
/// Enable HTTP/2 support for this endpoint, which is default off.
/// This effectively sets the ALPN to prefer HTTP/2 with HTTP/1.1 allowed
pub fn enable_h2(&mut self) {
self.set_alpn(ALPN::H2H1);
}
/// Set the ALPN preference of this endpoint. See [`ALPN`] for more details
pub fn set_alpn(&mut self, alpn: ALPN) {
match alpn {
ALPN::H2H1 => self
.accept_builder
.set_alpn_select_callback(alpn::prefer_h2),
ALPN::H1 => self.accept_builder.set_alpn_select_callback(alpn::h1_only),
ALPN::H2 => self.accept_builder.set_alpn_select_callback(alpn::h2_only),
ALPN::Custom(custom) => {
self.accept_builder
.set_alpn_select_callback(move |_, alpn_in| {
if !valid_alpn(alpn_in) {
return Err(AlpnError::NOACK);
}
match alpn::select_protocol(alpn_in, custom.protocol()) {
Some(p) => Ok(p),
None => Err(AlpnError::NOACK),
}
});
}
}
}
pub(crate) fn build(self) -> Acceptor {
Acceptor {
ssl_acceptor: self.accept_builder.build(),
callbacks: self.callbacks,
}
}
}
impl Acceptor {
pub async fn tls_handshake<S: IO>(&self, stream: S) -> Result<SslStream<S>> {
debug!("new ssl session");
// TODO: be able to offload this handshake in a thread pool
if let Some(cb) = self.callbacks.as_ref() {
handshake_with_callback(&self.ssl_acceptor, stream, cb).await
} else {
handshake(&self.ssl_acceptor, stream).await
}
}
}
mod alpn {
use super::*;
use crate::tls::ssl::{select_next_proto, AlpnError, SslRef};
pub(super) fn valid_alpn(alpn_in: &[u8]) -> bool {
if alpn_in.is_empty() {
return false;
}
// TODO: can add more thorough validation here.
true
}
/// Finds the first protocol in the client-offered ALPN list that matches the given protocol.
///
/// This is a helper for ALPN negotiation. It iterates over the client's protocol list
/// (in wire format) and returns the first protocol that matches proto
/// The returned reference always points into `client_protocols`, so lifetimes are correct.
pub(super) fn select_protocol<'a>(
client_protocols: &'a [u8],
proto: &[u8],
) -> Option<&'a [u8]> {
let mut bytes = client_protocols;
while !bytes.is_empty() {
let len = bytes[0] as usize;
bytes = &bytes[1..];
if len == proto.len() && &bytes[..len] == proto {
return Some(&bytes[..len]);
}
bytes = &bytes[len..];
}
None
}
// A standard implementation provided by the SSL lib is used below
pub fn prefer_h2<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
if !valid_alpn(alpn_in) {
return Err(AlpnError::NOACK);
}
match select_next_proto(ALPN::H2H1.to_wire_preference(), alpn_in) {
Some(p) => Ok(p),
_ => Err(AlpnError::NOACK), // unknown ALPN, just ignore it. Most clients will fallback to h1
}
}
pub fn h1_only<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
if !valid_alpn(alpn_in) {
return Err(AlpnError::NOACK);
}
match select_next_proto(ALPN::H1.to_wire_preference(), alpn_in) {
Some(p) => Ok(p),
_ => Err(AlpnError::NOACK), // unknown ALPN, just ignore it. Most clients will fallback to h1
}
}
pub fn h2_only<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
if !valid_alpn(alpn_in) {
return Err(AlpnError::ALERT_FATAL);
}
match select_next_proto(ALPN::H2.to_wire_preference(), alpn_in) {
Some(p) => Ok(p),
_ => Err(AlpnError::ALERT_FATAL), // cannot agree
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/listeners/tls/rustls/mod.rs | pingora-core/src/listeners/tls/rustls/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use crate::listeners::TlsAcceptCallbacks;
use crate::protocols::tls::{server::handshake, server::handshake_with_callback, TlsStream};
use log::debug;
use pingora_error::ErrorType::InternalError;
use pingora_error::{Error, OrErr, Result};
use pingora_rustls::load_certs_and_key_files;
use pingora_rustls::ServerConfig;
use pingora_rustls::{version, TlsAcceptor as RusTlsAcceptor};
use crate::protocols::{ALPN, IO};
/// The TLS settings of a listening endpoint
pub struct TlsSettings {
alpn_protocols: Option<Vec<Vec<u8>>>,
cert_path: String,
key_path: String,
}
pub struct Acceptor {
pub acceptor: RusTlsAcceptor,
callbacks: Option<TlsAcceptCallbacks>,
}
impl TlsSettings {
/// Create a Rustls acceptor based on the current setting for certificates,
/// keys, and protocols.
///
/// _NOTE_ This function will panic if there is an error in loading
/// certificate files or constructing the builder
///
/// Todo: Return a result instead of panicking XD
pub fn build(self) -> Acceptor {
let Ok(Some((certs, key))) = load_certs_and_key_files(&self.cert_path, &self.key_path)
else {
panic!(
"Failed to load provided certificates \"{}\" or key \"{}\".",
self.cert_path, self.key_path
)
};
// TODO - Add support for client auth & custom CA support
let mut config =
ServerConfig::builder_with_protocol_versions(&[&version::TLS12, &version::TLS13])
.with_no_client_auth()
.with_single_cert(certs, key)
.explain_err(InternalError, |e| {
format!("Failed to create server listener config: {e}")
})
.unwrap();
if let Some(alpn_protocols) = self.alpn_protocols {
config.alpn_protocols = alpn_protocols;
}
Acceptor {
acceptor: RusTlsAcceptor::from(Arc::new(config)),
callbacks: None,
}
}
/// Enable HTTP/2 support for this endpoint, which is default off.
/// This effectively sets the ALPN to prefer HTTP/2 with HTTP/1.1 allowed
pub fn enable_h2(&mut self) {
self.set_alpn(ALPN::H2H1);
}
pub fn set_alpn(&mut self, alpn: ALPN) {
self.alpn_protocols = Some(alpn.to_wire_protocols());
}
pub fn intermediate(cert_path: &str, key_path: &str) -> Result<Self>
where
Self: Sized,
{
Ok(TlsSettings {
alpn_protocols: None,
cert_path: cert_path.to_string(),
key_path: key_path.to_string(),
})
}
pub fn with_callbacks() -> Result<Self>
where
Self: Sized,
{
// TODO: verify if/how callback in handshake can be done using Rustls
Error::e_explain(
InternalError,
"Certificate callbacks are not supported with feature \"rustls\".",
)
}
}
impl Acceptor {
pub async fn tls_handshake<S: IO>(&self, stream: S) -> Result<TlsStream<S>> {
debug!("new tls session");
// TODO: be able to offload this handshake in a thread pool
if let Some(cb) = self.callbacks.as_ref() {
handshake_with_callback(self, stream, cb).await
} else {
handshake(self, stream).await
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/src/listeners/tls/s2n/mod.rs | pingora-core/src/listeners/tls/s2n/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use log::debug;
use pingora_error::Result;
use pingora_s2n::{
load_certs_and_key_files, ClientAuthType, Config, IgnoreVerifyHostnameCallback, S2NPolicy,
TlsAcceptor, DEFAULT_TLS13,
};
use crate::protocols::tls::server::handshake;
use crate::protocols::tls::{CaType, PskConfig, PskType, S2NConnectionBuilder, TlsStream};
use crate::protocols::{ALPN, IO};
/// The TLS settings of a listening endpoint
pub struct TlsSettings {
cert_path: Option<String>,
key_path: Option<String>,
ca: Option<CaType>,
alpn: Option<ALPN>,
psk_config: Option<Arc<PskType>>,
security_policy: Option<S2NPolicy>,
client_auth_required: bool,
verify_client_hostname: bool,
max_blinding_delay: Option<u32>,
}
pub struct Acceptor {
pub acceptor: TlsAcceptor<S2NConnectionBuilder>,
}
impl TlsSettings {
pub fn build(self) -> Acceptor {
let mut builder = Config::builder();
// Default security policy with TLS 1.3 support
// https://aws.github.io/s2n-tls/usage-guide/ch06-security-policies.html
let policy = self.security_policy.unwrap_or(DEFAULT_TLS13);
if let Some(max_blinding_delay) = self.max_blinding_delay {
builder.set_max_blinding_delay(max_blinding_delay).unwrap();
}
if self.client_auth_required {
builder
.set_client_auth_type(ClientAuthType::Required)
.unwrap();
}
if let Some(alpn) = self.alpn {
builder
.set_application_protocol_preference(alpn.to_wire_protocols())
.unwrap();
}
if let (Some(cert_path), Some(key_path)) = (self.cert_path, self.key_path) {
let Ok((cert, key)) = load_certs_and_key_files(&cert_path, &key_path) else {
panic!(
"Failed to load provided certificates \"{}\" or key \"{}\".",
cert_path, key_path
)
};
builder.load_pem(&cert, &key).unwrap();
}
if let Some(ca) = self.ca {
builder.trust_pem(&ca.raw_pem).expect("invalid ca pem");
}
if !self.verify_client_hostname {
builder
.set_verify_host_callback(IgnoreVerifyHostnameCallback::new())
.unwrap();
}
let config = builder.build().unwrap();
let connection_builder = S2NConnectionBuilder {
config: config,
psk_config: self.psk_config.clone(),
security_policy: Some(policy.clone()),
};
Acceptor {
acceptor: TlsAcceptor::new(connection_builder),
}
}
/// Enable HTTP/2 support for this endpoint, which is default off.
/// This effectively sets the ALPN to prefer HTTP/2 with HTTP/1.1 allowed
pub fn enable_h2(&mut self) {
self.set_alpn(ALPN::H2H1);
}
fn set_alpn(&mut self, alpn: ALPN) {
self.alpn = Some(alpn);
}
/// Configure CA to use for mTLS
pub fn set_ca(&mut self, ca: CaType) {
self.ca = Some(ca);
}
/// Configure pre-shared keys to use for TLS-PSK handshake
/// https://datatracker.ietf.org/doc/html/rfc4279
pub fn set_psk_config(&mut self, psk_config: PskConfig) {
self.psk_config = Some(Arc::new(psk_config));
}
/// S2N-TLS security policy to use. If not set, the default policy
/// "default_tls13" will be used.
/// https://aws.github.io/s2n-tls/usage-guide/ch06-security-policies.html
pub fn set_policy(&mut self, policy: S2NPolicy) {
self.security_policy = Some(policy);
}
/// The certificate and private key to use for TLS connections
pub fn set_cert(&mut self, cert_path: &str, key_path: &str) {
self.cert_path = Some(cert_path.to_string());
self.key_path = Some(key_path.to_string());
}
/// Require client certificate authentication (mTLS)
pub fn set_client_auth_required(&mut self, required: bool) {
self.client_auth_required = required;
}
/// If validating client certificate, also verify client hostname (mTLS)
pub fn set_verify_client_hostname(&mut self, verify: bool) {
self.verify_client_hostname = verify;
}
/// S2N-TLS will delay a response up to the max blinding delay (default 30)
/// seconds whenever an error triggered by a peer occurs to mitigate against
/// timing side channels.
pub fn set_max_blinding_delay(&mut self, delay: u32) {
self.max_blinding_delay = Some(delay);
}
pub fn intermediate(cert_path: &str, key_path: &str) -> Result<Self>
where
Self: Sized,
{
Ok(TlsSettings {
cert_path: Some(cert_path.to_string()),
key_path: Some(key_path.to_string()),
ca: None,
security_policy: None,
alpn: None,
psk_config: None,
client_auth_required: false,
verify_client_hostname: false,
max_blinding_delay: None,
})
}
pub fn new() -> Self {
TlsSettings {
cert_path: None,
key_path: None,
ca: None,
security_policy: None,
alpn: None,
psk_config: None,
client_auth_required: false,
verify_client_hostname: false,
max_blinding_delay: None,
}
}
}
impl Acceptor {
pub async fn tls_handshake<S: IO>(&self, stream: S) -> Result<TlsStream<S>> {
debug!("new tls session");
handshake(self, stream).await
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/tests/test_basic.rs | pingora-core/tests/test_basic.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod utils;
#[cfg(all(unix, feature = "any_tls"))]
use hyperlocal::{UnixClientExt, Uri};
#[tokio::test]
async fn test_http() {
utils::init();
let res = reqwest::get("http://127.0.0.1:6145").await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_https_http2() {
utils::init();
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let res = client.get("https://127.0.0.1:6146").send().await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.http1_only()
.build()
.unwrap();
let res = client.get("https://127.0.0.1:6146").send().await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_11);
}
#[cfg(unix)]
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_uds() {
utils::init();
let url = Uri::new("/tmp/echo.sock", "/").into();
let client = hyper::Client::unix();
let res = client.get(url).await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/tests/server_phase_fastshutdown.rs | pingora-core/tests/server_phase_fastshutdown.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// NOTE: This test sends a shutdown signal to itself,
// so it needs to be in an isolated test to prevent concurrency.
use pingora_core::server::{ExecutionPhase, RunArgs, Server};
// Ensure that execution phases are reported correctly.
#[test]
fn test_server_execution_phase_monitor_fast_shutdown() {
let mut server = Server::new(None).unwrap();
let mut phase = server.watch_execution_phase();
let join = std::thread::spawn(move || {
server.bootstrap();
server.run(RunArgs::default());
});
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::Bootstrap
));
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::BootstrapComplete,
));
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::Running,
));
// Need to wait for startup, otherwise the signal handler is not
// installed yet.
//
// TODO: signal handlers are installed after Running phase
// message is sent, sleep for now to avoid test flake
std::thread::sleep(std::time::Duration::from_millis(500));
unsafe {
libc::raise(libc::SIGINT);
}
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::ShutdownStarted,
));
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::ShutdownRuntimes,
));
join.join().unwrap();
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::Terminated,
));
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/tests/server_phase_gracefulshutdown.rs | pingora-core/tests/server_phase_gracefulshutdown.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// NOTE: This test sends a shutdown signal to itself,
// so it needs to be in an isolated test to prevent concurrency.
use pingora_core::server::{configuration::ServerConf, ExecutionPhase, RunArgs, Server};
// Ensure that execution phases are reported correctly.
#[test]
fn test_server_execution_phase_monitor_graceful_shutdown() {
let conf = ServerConf {
// Use small timeouts to speed up the test.
grace_period_seconds: Some(1),
graceful_shutdown_timeout_seconds: Some(1),
..Default::default()
};
let mut server = Server::new_with_opt_and_conf(None, conf);
let mut phase = server.watch_execution_phase();
let join = std::thread::spawn(move || {
server.bootstrap();
server.run(RunArgs::default());
});
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::Bootstrap
));
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::BootstrapComplete,
));
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::Running,
));
// Need to wait for startup, otherwise the signal handler is not
// installed yet.
//
// TODO: signal handlers are installed after Running phase
// message is sent, sleep for now to avoid test flake
std::thread::sleep(std::time::Duration::from_millis(500));
unsafe {
libc::raise(libc::SIGTERM);
}
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::GracefulTerminate,
));
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::ShutdownStarted,
));
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::ShutdownGracePeriod,
));
assert!(matches!(
dbg!(phase.blocking_recv().unwrap()),
ExecutionPhase::ShutdownRuntimes,
));
join.join().unwrap();
assert!(matches!(
phase.blocking_recv().unwrap(),
ExecutionPhase::Terminated,
));
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/tests/utils/mod.rs | pingora-core/tests/utils/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use once_cell::sync::Lazy;
use std::{thread, time};
use clap::Parser;
use pingora_core::listeners::Listeners;
use pingora_core::server::configuration::Opt;
use pingora_core::server::Server;
use pingora_core::services::listening::Service;
use async_trait::async_trait;
use bytes::Bytes;
use http::{Response, StatusCode};
use pingora_timeout::timeout;
use std::time::Duration;
use pingora_core::apps::http_app::ServeHttp;
use pingora_core::protocols::http::ServerSession;
#[derive(Clone)]
pub struct EchoApp;
#[async_trait]
impl ServeHttp for EchoApp {
async fn response(&self, http_stream: &mut ServerSession) -> Response<Vec<u8>> {
// read timeout of 2s
let read_timeout = 2000;
let body = match timeout(
Duration::from_millis(read_timeout),
http_stream.read_request_body(),
)
.await
{
Ok(res) => match res.unwrap() {
Some(bytes) => bytes,
None => Bytes::from("no body!"),
},
Err(_) => {
panic!("Timed out after {:?}ms", read_timeout);
}
};
Response::builder()
.status(StatusCode::OK)
.header(http::header::CONTENT_TYPE, "text/html")
.header(http::header::CONTENT_LENGTH, body.len())
.body(body.to_vec())
.unwrap()
}
}
pub struct MyServer {
// Maybe useful in the future
#[allow(dead_code)]
pub handle: thread::JoinHandle<()>,
}
fn entry_point(opt: Option<Opt>) {
env_logger::init();
let cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key_path = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let mut my_server = Server::new(opt).unwrap();
my_server.bootstrap();
let mut listeners = Listeners::tcp("0.0.0.0:6145");
#[cfg(unix)]
listeners.add_uds("/tmp/echo.sock", None);
let mut tls_settings =
pingora_core::listeners::tls::TlsSettings::intermediate(&cert_path, &key_path).unwrap();
tls_settings.enable_h2();
listeners.add_tls_with_settings("0.0.0.0:6146", None, tls_settings);
let echo_service_http =
Service::with_listeners("Echo Service HTTP".to_string(), listeners, EchoApp);
my_server.add_service(echo_service_http);
my_server.run_forever();
}
impl MyServer {
pub fn start() -> Self {
let opts: Vec<String> = vec![
"pingora".into(),
"-c".into(),
"tests/pingora_conf.yaml".into(),
];
let server_handle = thread::spawn(|| {
entry_point(Some(Opt::parse_from(opts)));
});
// wait until the server is up
thread::sleep(time::Duration::from_secs(2));
MyServer {
handle: server_handle,
}
}
}
pub static TEST_SERVER: Lazy<MyServer> = Lazy::new(MyServer::start);
pub fn init() {
let _ = *TEST_SERVER;
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-core/examples/client_cert.rs | pingora-core/examples/client_cert.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg_attr(not(feature = "openssl"), allow(unused))]
use std::any::Any;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::sync::Arc;
use async_trait::async_trait;
use clap::Parser;
use http::header::{CONTENT_LENGTH, CONTENT_TYPE};
use http::{Response, StatusCode};
use pingora_core::apps::http_app::ServeHttp;
use pingora_core::listeners::tls::TlsSettings;
use pingora_core::listeners::TlsAccept;
use pingora_core::protocols::http::ServerSession;
use pingora_core::protocols::tls::TlsRef;
use pingora_core::server::configuration::Opt;
use pingora_core::server::Server;
use pingora_core::services::listening::Service;
use pingora_core::Result;
#[cfg(feature = "openssl")]
use pingora_openssl::{
nid::Nid,
ssl::{NameType, SslFiletype, SslVerifyMode},
x509::{GeneralName, X509Name},
};
// Custom structure to hold TLS information
struct MyTlsInfo {
// SNI (Server Name Indication) from the TLS handshake
sni: Option<String>,
// SANs (Subject Alternative Names) from client certificate
sans: Vec<String>,
// Common Name (CN) from client certificate
common_name: Option<String>,
}
struct MyApp;
#[async_trait]
impl ServeHttp for MyApp {
async fn response(&self, session: &mut ServerSession) -> http::Response<Vec<u8>> {
static EMPTY_VEC: Vec<String> = vec![];
// Extract TLS info from the session's digest extensions
let my_tls_info = session
.digest()
.and_then(|digest| digest.ssl_digest.as_ref())
.and_then(|ssl_digest| ssl_digest.extension.get::<MyTlsInfo>());
let sni = my_tls_info
.and_then(|my_tls_info| my_tls_info.sni.as_deref())
.unwrap_or("<none>");
let sans = my_tls_info
.map(|my_tls_info| &my_tls_info.sans)
.unwrap_or(&EMPTY_VEC);
let common_name = my_tls_info
.and_then(|my_tls_info| my_tls_info.common_name.as_deref())
.unwrap_or("<none>");
// Create response message
let mut message = String::new();
message += &format!("Your SNI was: {sni}\n");
message += &format!("Your SANs were: {sans:?}\n");
message += &format!("Client Common Name (CN): {}\n", common_name);
let message = message.into_bytes();
Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "text/plain")
.header(CONTENT_LENGTH, message.len())
.body(message)
.unwrap()
}
}
struct MyTlsCallbacks;
#[async_trait]
impl TlsAccept for MyTlsCallbacks {
#[cfg(feature = "openssl")]
async fn handshake_complete_callback(
&self,
tls_ref: &TlsRef,
) -> Option<Arc<dyn Any + Send + Sync>> {
// Here you can inspect the TLS connection and return an extension if needed.
// Extract SNI (Server Name Indication)
let sni = tls_ref
.servername(NameType::HOST_NAME)
.map(ToOwned::to_owned);
// Extract SAN (Subject Alternative Names) from the client certificate
let sans = tls_ref
.peer_certificate()
.and_then(|cert| cert.subject_alt_names())
.map_or(vec![], |sans| {
sans.into_iter()
.filter_map(|san| san_to_string(&san))
.collect::<Vec<_>>()
});
// Extract Common Name (CN) from the client certificate
let common_name = tls_ref.peer_certificate().and_then(|cert| {
let cn = cert.subject_name().entries_by_nid(Nid::COMMONNAME).next()?;
Some(cn.data().as_utf8().ok()?.to_string())
});
let tls_info = MyTlsInfo {
sni,
sans,
common_name,
};
Some(Arc::new(tls_info))
}
}
// Convert GeneralName of SAN to String representation
#[cfg(feature = "openssl")]
fn san_to_string(san: &GeneralName) -> Option<String> {
if let Some(dnsname) = san.dnsname() {
return Some(dnsname.to_owned());
}
if let Some(uri) = san.uri() {
return Some(uri.to_owned());
}
if let Some(email) = san.email() {
return Some(email.to_owned());
}
if let Some(ip) = san.ipaddress() {
return bytes_to_ip_addr(ip).map(|addr| addr.to_string());
}
None
}
// Convert byte slice to IpAddr
fn bytes_to_ip_addr(bytes: &[u8]) -> Option<IpAddr> {
match bytes.len() {
4 => {
let addr = Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]);
Some(IpAddr::V4(addr))
}
16 => {
let mut octets = [0u8; 16];
octets.copy_from_slice(bytes);
let addr = Ipv6Addr::from(octets);
Some(IpAddr::V6(addr))
}
_ => None,
}
}
// This example demonstrates an HTTP server that requires client certificates.
// The server extracts the SNI (Server Name Indication) from the TLS handshake and
// SANs (Subject Alternative Names) from the client certificate, then returns them
// as part of the HTTP response.
//
// ## How to run
//
// cargo run -F openssl --example client_cert
//
// # In another terminal, run the following command to test the server:
// cd pingora-core
// curl -k -i \
// --cert examples/keys/clients/cert-1.pem --key examples/keys/clients/key-1.pem \
// --resolve myapp.example.com:6196:127.0.0.1 \
// https://myapp.example.com:6196/
// curl -k -i \
// --cert examples/keys/clients/cert-2.pem --key examples/keys/clients/key-2.pem \
// --resolve myapp.example.com:6196:127.0.0.1 \
// https://myapp.example.com:6196/
// curl -k -i \
// --cert examples/keys/clients/invalid-cert.pem --key examples/keys/clients/invalid-key.pem \
// --resolve myapp.example.com:6196:127.0.0.1 \
// https://myapp.example.com:6196/
#[cfg(feature = "openssl")]
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// read command line arguments
let opt = Opt::parse();
let mut my_server = Server::new(Some(opt))?;
my_server.bootstrap();
let mut my_app = Service::new("my app".to_owned(), MyApp);
// Paths to server certificate, private key, and client CA certificate
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let server_cert_path = format!("{manifest_dir}/examples/keys/server/cert.pem");
let server_key_path = format!("{manifest_dir}/examples/keys/server/key.pem");
let client_ca_path = format!("{manifest_dir}/examples/keys/client-ca/cert.pem");
// Create TLS settings with callbacks
let callbacks = Box::new(MyTlsCallbacks);
let mut tls_settings = TlsSettings::with_callbacks(callbacks)?;
// Set server certificate and private key
tls_settings.set_certificate_chain_file(&server_cert_path)?;
tls_settings.set_private_key_file(server_key_path, SslFiletype::PEM)?;
// Require client certificate
tls_settings.set_verify(SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT);
// Set CA for client certificate verification
tls_settings.set_ca_file(&client_ca_path)?;
// Optionally, set the list of acceptable client CAs sent to the client
tls_settings.set_client_ca_list(X509Name::load_client_ca_file(&client_ca_path)?);
my_app.add_tls_with_settings("0.0.0.0:6196", None, tls_settings);
my_server.add_service(my_app);
my_server.run_forever();
}
#[cfg(not(feature = "openssl"))]
fn main() {
eprintln!("This example requires the 'openssl' feature to be enabled.");
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-s2n/src/lib.rs | pingora-s2n/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applijable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use pingora_error::{Error, ErrorType, Result};
use std::fs;
pub use s2n_tls::{
callbacks::VerifyHostNameCallback,
config::{Builder as ConfigBuilder, Config},
connection::{Builder as ConnectionBuilder, Connection},
enums::{ClientAuthType, Mode, PskHmac},
error::Error as S2NError,
psk::Psk,
security::{Policy as S2NPolicy, DEFAULT_TLS13},
};
pub use s2n_tls_tokio::{TlsAcceptor, TlsConnector, TlsStream};
pub fn load_certs_and_key_files(cert_file: &str, key_file: &str) -> Result<(Vec<u8>, Vec<u8>)> {
let cert_bytes = load_pem_file(cert_file)?;
let key_bytes = load_pem_file(key_file)?;
Ok((cert_bytes, key_bytes))
}
pub fn load_pem_file(file: &str) -> Result<Vec<u8>> {
if let Ok(bytes) = fs::read(file) {
Ok(bytes)
} else {
Error::e_explain(
ErrorType::InvalidCert,
"Certificate in pem file could not be read",
)
}
}
pub fn hash_certificate(cert: &[u8]) -> Vec<u8> {
let hash = ring::digest::digest(&ring::digest::SHA256, cert);
hash.as_ref().to_vec()
}
/// Verify host name callback that always returns a success,
/// effectively ignoring hostname validation
pub struct IgnoreVerifyHostnameCallback {}
impl IgnoreVerifyHostnameCallback {
pub fn new() -> Self {
IgnoreVerifyHostnameCallback {}
}
}
impl Default for IgnoreVerifyHostnameCallback {
fn default() -> Self {
Self::new()
}
}
impl VerifyHostNameCallback for IgnoreVerifyHostnameCallback {
fn verify_host_name(&self, _host_name: &str) -> bool {
true
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/tinyufo/src/lib.rs | tinyufo/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A In-memory cache implementation with TinyLFU as the admission policy and [S3-FIFO](https://s3fifo.com/) as the eviction policy.
//!
//! TinyUFO improves cache hit ratio noticeably compared to LRU.
//!
//! TinyUFO is lock-free. It is very fast in the systems with a lot concurrent reads and/or writes
use ahash::RandomState;
use crossbeam_queue::SegQueue;
use std::marker::PhantomData;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::{
AtomicBool, AtomicU8,
Ordering::{Acquire, Relaxed, SeqCst},
};
mod buckets;
mod estimation;
use buckets::Buckets;
use estimation::TinyLfu;
use std::hash::Hash;
const SMALL: bool = false;
const MAIN: bool = true;
// Indicate which queue an item is located
#[derive(Debug, Default)]
struct Location(AtomicBool);
impl Location {
fn new_small() -> Self {
Self(AtomicBool::new(SMALL))
}
fn value(&self) -> bool {
self.0.load(Relaxed)
}
fn is_main(&self) -> bool {
self.value()
}
fn move_to_main(&self) {
self.0.store(true, Relaxed);
}
}
// We have 8 bits to spare but we still cap at 3. This is to make sure that the main queue
// in the worst case can find something to evict quickly
const USES_CAP: u8 = 3;
#[derive(Debug, Default)]
struct Uses(AtomicU8);
impl Uses {
pub fn inc_uses(&self) -> u8 {
loop {
let uses = self.uses();
if uses >= USES_CAP {
return uses;
}
if let Err(new) = self.0.compare_exchange(uses, uses + 1, Acquire, Relaxed) {
// someone else beat us to it
if new >= USES_CAP {
// already above cap
return new;
} // else, try again
} else {
return uses + 1;
}
}
}
// decrease uses, return the previous value
pub fn decr_uses(&self) -> u8 {
loop {
let uses = self.uses();
if uses == 0 {
return 0;
}
if let Err(new) = self.0.compare_exchange(uses, uses - 1, Acquire, Relaxed) {
// someone else beat us to it
if new == 0 {
return 0;
} // else, try again
} else {
return uses;
}
}
}
pub fn uses(&self) -> u8 {
self.0.load(Relaxed)
}
}
type Key = u64;
type Weight = u16;
/// The key-value pair returned from cache eviction
#[derive(Clone)]
pub struct KV<T> {
/// NOTE: that we currently don't store the Actual key in the cache. This returned value
/// is just the hash of it.
pub key: Key,
pub data: T,
pub weight: Weight,
}
// the data and its metadata
pub struct Bucket<T> {
uses: Uses,
queue: Location,
weight: Weight,
data: T,
}
const SMALL_QUEUE_PERCENTAGE: f32 = 0.1;
struct FiFoQueues<T> {
total_weight_limit: usize,
small: SegQueue<Key>,
small_weight: AtomicUsize,
main: SegQueue<Key>,
main_weight: AtomicUsize,
// this replaces the ghost queue of S3-FIFO with similar goal: track the evicted assets
estimator: TinyLfu,
_t: PhantomData<T>,
}
impl<T: Clone + Send + Sync + 'static> FiFoQueues<T> {
fn admit(
&self,
key: Key,
data: T,
weight: u16,
ignore_lfu: bool,
buckets: &Buckets<T>,
) -> Vec<KV<T>> {
// Note that we only use TinyLFU during cache admission but not cache read.
// So effectively we mostly sketch the popularity of less popular assets.
// In this way the sketch is a bit more accurate on these assets.
// Also we don't need another separated window cache to address the sparse burst issue as
// this sketch doesn't favor very popular assets much.
let new_freq = self.estimator.incr(key);
assert!(weight > 0);
let new_bucket = {
let Some((uses, queue, weight)) = buckets.get_map(&key, |bucket| {
// the item exists, in case weight changes
let old_weight = bucket.weight;
let uses = bucket.uses.inc_uses();
fn update_atomic(weight: &AtomicUsize, old: u16, new: u16) {
if old == new {
return;
}
if old > new {
weight.fetch_sub((old - new) as usize, SeqCst);
} else {
weight.fetch_add((new - old) as usize, SeqCst);
}
}
let queue = bucket.queue.is_main();
if queue == MAIN {
update_atomic(&self.main_weight, old_weight, weight);
} else {
update_atomic(&self.small_weight, old_weight, weight);
}
(uses, queue, weight)
}) else {
let mut evicted = self.evict_to_limit(weight, buckets);
// TODO: figure out the right way to compare frequencies of different weights across
// many evicted assets. For now TinyLFU is only used when only evicting 1 item.
let (key, data, weight) = if !ignore_lfu && evicted.len() == 1 {
// Apply the admission algorithm of TinyLFU: compare the incoming new item
// and the evicted one. The more popular one is admitted to cache
let evicted_first = &evicted[0];
let evicted_freq = self.estimator.get(evicted_first.key);
if evicted_freq > new_freq {
// put it back
let first = evicted.pop().expect("just check non-empty");
// return the put value
evicted.push(KV { key, data, weight });
(first.key, first.data, first.weight)
} else {
(key, data, weight)
}
} else {
(key, data, weight)
};
let bucket = Bucket {
queue: Location::new_small(),
weight,
uses: Default::default(), // 0
data,
};
let old = buckets.insert(key, bucket);
if old.is_none() {
// Always push key first before updating weight
// If doing the other order, another concurrent thread might not
// find things to evict
self.small.push(key);
self.small_weight.fetch_add(weight as usize, SeqCst);
} // else: two threads are racing adding the item
// TODO: compare old.weight and update accordingly
return evicted;
};
Bucket {
queue: Location(queue.into()),
weight,
uses: Uses(uses.into()),
data,
}
};
// replace the existing one
buckets.insert(key, new_bucket);
// NOTE: there is a chance that the item itself is evicted if it happens to be the one selected
// by the algorithm. We could avoid this by checking if the item is in the returned evicted items,
// and then add it back. But to keep the code simple we just allow it to happen.
self.evict_to_limit(0, buckets)
}
// the `extra_weight` is to essentially tell the cache to reserve that amount of weight for
// admission. It is used when calling `evict_to_limit` before admitting the asset itself.
fn evict_to_limit(&self, extra_weight: Weight, buckets: &Buckets<T>) -> Vec<KV<T>> {
let mut evicted = if self.total_weight_limit
< self.small_weight.load(SeqCst) + self.main_weight.load(SeqCst) + extra_weight as usize
{
Vec::with_capacity(1)
} else {
vec![]
};
while self.total_weight_limit
< self.small_weight.load(SeqCst) + self.main_weight.load(SeqCst) + extra_weight as usize
{
if let Some(evicted_item) = self.evict_one(buckets) {
evicted.push(evicted_item);
} else {
break;
}
}
evicted
}
fn evict_one(&self, buckets: &Buckets<T>) -> Option<KV<T>> {
let evict_small = self.small_weight_limit() <= self.small_weight.load(SeqCst);
if evict_small {
let evicted = self.evict_one_from_small(buckets);
// evict_one_from_small could just promote everything to main without evicting any
// so need to evict_one_from_main if nothing evicted
if evicted.is_some() {
return evicted;
}
}
self.evict_one_from_main(buckets)
}
fn small_weight_limit(&self) -> usize {
(self.total_weight_limit as f32 * SMALL_QUEUE_PERCENTAGE).floor() as usize + 1
}
fn evict_one_from_small(&self, buckets: &Buckets<T>) -> Option<KV<T>> {
loop {
let Some(to_evict) = self.small.pop() else {
// empty queue, this is caught between another pop() and fetch_sub()
return None;
};
let v = buckets
.get_map(&to_evict, |bucket| {
let weight = bucket.weight;
self.small_weight.fetch_sub(weight as usize, SeqCst);
if bucket.uses.uses() > 1 {
// move to main
bucket.queue.move_to_main();
self.main.push(to_evict);
self.main_weight.fetch_add(weight as usize, SeqCst);
// continue until find one to evict
None
} else {
let data = bucket.data.clone();
let weight = bucket.weight;
buckets.remove(&to_evict);
Some(KV {
key: to_evict,
data,
weight,
})
}
})
.flatten();
if v.is_some() {
// found the one to evict, break
return v;
}
}
}
fn evict_one_from_main(&self, buckets: &Buckets<T>) -> Option<KV<T>> {
loop {
let to_evict = self.main.pop()?;
if let Some(v) = buckets
.get_map(&to_evict, |bucket| {
if bucket.uses.decr_uses() > 0 {
// put it back
self.main.push(to_evict);
// continue the loop
None
} else {
// evict
let weight = bucket.weight;
self.main_weight.fetch_sub(weight as usize, SeqCst);
let data = bucket.data.clone();
buckets.remove(&to_evict);
Some(KV {
key: to_evict,
data,
weight,
})
}
})
.flatten()
{
// found the one to evict, break
return Some(v);
}
}
}
}
/// [TinyUfo] cache
pub struct TinyUfo<K, T> {
queues: FiFoQueues<T>,
buckets: Buckets<T>,
random_status: RandomState,
_k: PhantomData<K>,
}
impl<K: Hash, T: Clone + Send + Sync + 'static> TinyUfo<K, T> {
/// Create a new TinyUfo cache with the given weight limit and the given
/// size limit of the ghost queue.
pub fn new(total_weight_limit: usize, estimated_size: usize) -> Self {
let queues = FiFoQueues {
small: SegQueue::new(),
small_weight: 0.into(),
main: SegQueue::new(),
main_weight: 0.into(),
total_weight_limit,
estimator: TinyLfu::new(estimated_size),
_t: PhantomData,
};
TinyUfo {
queues,
buckets: Buckets::new_fast(estimated_size),
random_status: RandomState::new(),
_k: PhantomData,
}
}
/// Create a new TinyUfo cache but with more memory efficient data structures.
/// The trade-off is that the the get() is slower by a constant factor.
/// The cache hit ratio could be higher as this type of TinyUFO allows to store
/// more assets with the same memory.
pub fn new_compact(total_weight_limit: usize, estimated_size: usize) -> Self {
let queues = FiFoQueues {
small: SegQueue::new(),
small_weight: 0.into(),
main: SegQueue::new(),
main_weight: 0.into(),
total_weight_limit,
estimator: TinyLfu::new_compact(estimated_size),
_t: PhantomData,
};
TinyUfo {
queues,
buckets: Buckets::new_compact(estimated_size, 32),
random_status: RandomState::new(),
_k: PhantomData,
}
}
// TODO: with_capacity()
/// Read the given key
///
/// Return Some(T) if the key exists
pub fn get(&self, key: &K) -> Option<T> {
let key = self.random_status.hash_one(key);
self.buckets.get_map(&key, |p| {
p.uses.inc_uses();
p.data.clone()
})
}
/// Put the key value to the [TinyUfo]
///
/// Return a list of [KV] of key and `T` that are evicted
pub fn put(&self, key: K, data: T, weight: Weight) -> Vec<KV<T>> {
let key = self.random_status.hash_one(&key);
self.queues.admit(key, data, weight, false, &self.buckets)
}
/// Remove the given key from the cache if it exists
///
/// Returns Some(T) if the key was found and removed, None otherwise
pub fn remove(&self, key: &K) -> Option<T> {
let key = self.random_status.hash_one(key);
// Get data and update weights
let result = self.buckets.get_map(&key, |bucket| {
let data = bucket.data.clone();
let weight = bucket.weight;
// Update weight based on queue location
if bucket.queue.is_main() {
self.queues.main_weight.fetch_sub(weight as usize, SeqCst);
} else {
self.queues.small_weight.fetch_sub(weight as usize, SeqCst);
}
data
});
// If we found and processed the item, remove it from buckets
if result.is_some() {
self.buckets.remove(&key);
}
result
}
/// Always put the key value to the [TinyUfo]
///
/// Return a list of [KV] of key and `T` that are evicted
///
/// Similar to [Self::put] but guarantee the assertion of the asset.
/// In [Self::put], the TinyLFU check may reject putting the current asset if it is less
/// popular than the once being evicted.
///
/// In some real world use cases, a few reads to the same asset may be pending for the put action
/// to be finished so that they can read the asset from cache. Neither the above behaviors are ideal
/// for this use case.
///
/// Compared to [Self::put], the hit ratio when using this function is reduced by about 0.5pp or less in
/// under zipf workloads.
pub fn force_put(&self, key: K, data: T, weight: Weight) -> Vec<KV<T>> {
let key = self.random_status.hash_one(&key);
self.queues.admit(key, data, weight, true, &self.buckets)
}
#[cfg(test)]
fn peek_queue(&self, key: K) -> Option<bool> {
let key = self.random_status.hash_one(&key);
self.buckets.get_queue(&key)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_uses() {
let uses: Uses = Default::default();
assert_eq!(uses.uses(), 0);
uses.inc_uses();
assert_eq!(uses.uses(), 1);
for _ in 0..USES_CAP {
uses.inc_uses();
}
assert_eq!(uses.uses(), USES_CAP);
for _ in 0..USES_CAP + 2 {
uses.decr_uses();
}
assert_eq!(uses.uses(), 0);
}
#[test]
fn test_evict_from_small() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
let evicted = cache.put(4, 4, 3);
assert_eq!(evicted.len(), 2);
assert_eq!(evicted[0].data, 1);
assert_eq!(evicted[1].data, 2);
assert_eq!(cache.peek_queue(1), None);
assert_eq!(cache.peek_queue(2), None);
assert_eq!(cache.peek_queue(3), Some(SMALL));
}
#[test]
fn test_evict_from_small_to_main() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
cache.get(&1);
cache.get(&1); // 1 will be moved to main during next eviction
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
let evicted = cache.put(4, 4, 2);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].weight, 2);
assert_eq!(cache.peek_queue(1), Some(MAIN));
// either 2, 3, or 4 was evicted. Check evicted for which.
let mut remaining = vec![2, 3, 4];
remaining.remove(
remaining
.iter()
.position(|x| *x == evicted[0].data)
.unwrap(),
);
assert_eq!(cache.peek_queue(evicted[0].key), None);
for k in remaining {
assert_eq!(cache.peek_queue(k), Some(SMALL));
}
}
#[test]
fn test_evict_reentry() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
let evicted = cache.put(4, 4, 1);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].data, 1);
assert_eq!(cache.peek_queue(1), None);
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
assert_eq!(cache.peek_queue(4), Some(SMALL));
let evicted = cache.put(1, 1, 1);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].data, 2);
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), None);
assert_eq!(cache.peek_queue(3), Some(SMALL));
assert_eq!(cache.peek_queue(4), Some(SMALL));
}
#[test]
fn test_evict_entry_denied() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
// trick: put a few times to bump their frequencies
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
let evicted = cache.put(4, 4, 1);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].data, 4); // 4 is returned
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
assert_eq!(cache.peek_queue(4), None);
}
#[test]
fn test_force_put() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
// trick: put a few times to bump their frequencies
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// force put will replace 1 with 4 even through 1 is more popular
let evicted = cache.force_put(4, 4, 1);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].data, 1); // 1 is returned
assert_eq!(cache.peek_queue(1), None);
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
assert_eq!(cache.peek_queue(4), Some(SMALL));
}
#[test]
fn test_evict_from_main() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
// all 3 will qualify to main
cache.get(&1);
cache.get(&1);
cache.get(&2);
cache.get(&2);
cache.get(&3);
cache.get(&3);
let evicted = cache.put(4, 4, 1);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].data, 1);
// 1 kicked from main
assert_eq!(cache.peek_queue(1), None);
assert_eq!(cache.peek_queue(2), Some(MAIN));
assert_eq!(cache.peek_queue(3), Some(MAIN));
assert_eq!(cache.peek_queue(4), Some(SMALL));
let evicted = cache.put(1, 1, 1);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].data, 4);
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(MAIN));
assert_eq!(cache.peek_queue(3), Some(MAIN));
assert_eq!(cache.peek_queue(4), None);
}
#[test]
fn test_evict_from_small_compact() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_compact_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
let evicted = cache.put(4, 4, 3);
assert_eq!(evicted.len(), 2);
assert_eq!(evicted[0].data, 1);
assert_eq!(evicted[1].data, 2);
assert_eq!(cache.peek_queue(1), None);
assert_eq!(cache.peek_queue(2), None);
assert_eq!(cache.peek_queue(3), Some(SMALL));
}
#[test]
fn test_evict_from_small_to_main_compact() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.queues.estimator = TinyLfu::new_compact_seeded(5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
// cache full now
cache.get(&1);
cache.get(&1); // 1 will be moved to main during next eviction
assert_eq!(cache.peek_queue(1), Some(SMALL));
assert_eq!(cache.peek_queue(2), Some(SMALL));
assert_eq!(cache.peek_queue(3), Some(SMALL));
let evicted = cache.put(4, 4, 2);
assert_eq!(evicted.len(), 1);
assert_eq!(evicted[0].weight, 2);
assert_eq!(cache.peek_queue(1), Some(MAIN));
// either 2, 3, or 4 was evicted. Check evicted for which.
let mut remaining = vec![2, 3, 4];
remaining.remove(
remaining
.iter()
.position(|x| *x == evicted[0].data)
.unwrap(),
);
assert_eq!(cache.peek_queue(evicted[0].key), None);
for k in remaining {
assert_eq!(cache.peek_queue(k), Some(SMALL));
}
}
#[test]
fn test_remove() {
let mut cache = TinyUfo::new(5, 5);
cache.random_status = RandomState::with_seeds(2, 3, 4, 5);
cache.put(1, 1, 1);
cache.put(2, 2, 2);
cache.put(3, 3, 2);
assert_eq!(cache.remove(&1), Some(1));
assert_eq!(cache.remove(&3), Some(3));
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&3), None);
// Verify empty keys get evicted when cache fills up
// Fill cache to trigger eviction
cache.put(5, 5, 2);
cache.put(6, 6, 2);
cache.put(7, 7, 2);
// The removed items (1, 3) should be naturally evicted now
// and new items should be in cache
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&3), None);
assert!(cache.get(&5).is_some() || cache.get(&6).is_some() || cache.get(&7).is_some());
// Test weights after eviction cycles
let total_weight =
cache.queues.small_weight.load(SeqCst) + cache.queues.main_weight.load(SeqCst);
assert!(total_weight <= 5); // Should not exceed limit
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/tinyufo/src/estimation.rs | tinyufo/src/estimation.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use ahash::RandomState;
use std::hash::Hash;
use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering};
struct Estimator {
estimator: Box<[(Box<[AtomicU8]>, RandomState)]>,
}
impl Estimator {
fn optimal_paras(items: usize) -> (usize, usize) {
use std::cmp::max;
// derived from https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch
// width = ceil(e / ε)
// depth = ceil(ln(1 − δ) / ln(1 / 2))
let error_range = 1.0 / (items as f64);
let failure_probability = 1.0 / (items as f64);
(
max((std::f64::consts::E / error_range).ceil() as usize, 16),
max((failure_probability.ln() / 0.5f64.ln()).ceil() as usize, 2),
)
}
fn optimal(items: usize) -> Self {
let (slots, hashes) = Self::optimal_paras(items);
Self::new(hashes, slots, RandomState::new)
}
fn compact(items: usize) -> Self {
let (slots, hashes) = Self::optimal_paras(items / 100);
Self::new(hashes, slots, RandomState::new)
}
#[cfg(test)]
fn seeded(items: usize) -> Self {
let (slots, hashes) = Self::optimal_paras(items);
Self::new(hashes, slots, || RandomState::with_seeds(2, 3, 4, 5))
}
#[cfg(test)]
fn seeded_compact(items: usize) -> Self {
let (slots, hashes) = Self::optimal_paras(items / 100);
Self::new(hashes, slots, || RandomState::with_seeds(2, 3, 4, 5))
}
/// Create a new `Estimator` with the given amount of hashes and columns (slots) using
/// the given random source.
pub fn new(hashes: usize, slots: usize, random: impl Fn() -> RandomState) -> Self {
let mut estimator = Vec::with_capacity(hashes);
for _ in 0..hashes {
let mut slot = Vec::with_capacity(slots);
for _ in 0..slots {
slot.push(AtomicU8::new(0));
}
estimator.push((slot.into_boxed_slice(), random()));
}
Estimator {
estimator: estimator.into_boxed_slice(),
}
}
pub fn incr<T: Hash>(&self, key: T) -> u8 {
let mut min = u8::MAX;
for (slot, hasher) in self.estimator.iter() {
let hash = hasher.hash_one(&key) as usize;
let counter = &slot[hash % slot.len()];
let (_current, new) = incr_no_overflow(counter);
min = std::cmp::min(min, new);
}
min
}
/// Get the estimated frequency of `key`.
pub fn get<T: Hash>(&self, key: T) -> u8 {
let mut min = u8::MAX;
for (slot, hasher) in self.estimator.iter() {
let hash = hasher.hash_one(&key) as usize;
let counter = &slot[hash % slot.len()];
let current = counter.load(Ordering::Relaxed);
min = std::cmp::min(min, current);
}
min
}
/// right shift all values inside this `Estimator`.
pub fn age(&self, shift: u8) {
for (slot, _) in self.estimator.iter() {
for counter in slot.iter() {
// we don't CAS because the only update between the load and store
// is fetch_add(1), which should be fine to miss/ignore
let c = counter.load(Ordering::Relaxed);
counter.store(c >> shift, Ordering::Relaxed);
}
}
}
}
fn incr_no_overflow(var: &AtomicU8) -> (u8, u8) {
loop {
let current = var.load(Ordering::Relaxed);
if current == u8::MAX {
return (current, current);
}
let new = if current == u8::MAX - 1 {
u8::MAX
} else {
current + 1
};
if let Err(new) = var.compare_exchange(current, new, Ordering::Acquire, Ordering::Relaxed) {
// someone else beat us to it
if new == u8::MAX {
// already max
return (current, new);
} // else, try again
} else {
return (current, new);
}
}
}
// bare-minimum TinyLfu with CM-Sketch, no doorkeeper for now
pub(crate) struct TinyLfu {
estimator: Estimator,
window_counter: AtomicUsize,
window_limit: usize,
}
impl TinyLfu {
pub fn get<T: Hash>(&self, key: T) -> u8 {
self.estimator.get(key)
}
pub fn incr<T: Hash>(&self, key: T) -> u8 {
let window_size = self.window_counter.fetch_add(1, Ordering::Relaxed);
// When window_size concurrently increases, only one resets the window and age the estimator.
// > self.window_limit * 2 is a safety net in case for whatever reason window_size grows
// out of control
if window_size == self.window_limit || window_size > self.window_limit * 2 {
self.window_counter.store(0, Ordering::Relaxed);
self.estimator.age(1); // right shift 1 bit
}
self.estimator.incr(key)
}
// because we use 8-bits counters, window size can be 256 * the cache size
pub fn new(cache_size: usize) -> Self {
Self {
estimator: Estimator::optimal(cache_size),
window_counter: Default::default(),
// 8x: just a heuristic to balance the memory usage and accuracy
window_limit: cache_size * 8,
}
}
pub fn new_compact(cache_size: usize) -> Self {
Self {
estimator: Estimator::compact(cache_size),
window_counter: Default::default(),
// 8x: just a heuristic to balance the memory usage and accuracy
window_limit: cache_size * 8,
}
}
#[cfg(test)]
pub fn new_seeded(cache_size: usize) -> Self {
Self {
estimator: Estimator::seeded(cache_size),
window_counter: Default::default(),
// 8x: just a heuristic to balance the memory usage and accuracy
window_limit: cache_size * 8,
}
}
#[cfg(test)]
pub fn new_compact_seeded(cache_size: usize) -> Self {
Self {
estimator: Estimator::seeded_compact(cache_size),
window_counter: Default::default(),
// 8x: just a heuristic to balance the memory usage and accuracy
window_limit: cache_size * 8,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cmk_paras() {
let (slots, hashes) = Estimator::optimal_paras(1_000_000);
// just smoke check some standard input
assert_eq!(slots, 2718282);
assert_eq!(hashes, 20);
}
#[test]
fn test_tiny_lfu() {
let tiny = TinyLfu::new(1);
assert_eq!(tiny.get(1), 0);
assert_eq!(tiny.incr(1), 1);
assert_eq!(tiny.incr(1), 2);
assert_eq!(tiny.get(1), 2);
// Might have hash collisions for the others, need to
// get() before can assert on the incr() value.
let two = tiny.get(2);
assert_eq!(tiny.incr(2), two + 1);
assert_eq!(tiny.incr(2), two + 2);
assert_eq!(tiny.get(2), two + 2);
let three = tiny.get(3);
assert_eq!(tiny.incr(3), three + 1);
assert_eq!(tiny.incr(3), three + 2);
assert_eq!(tiny.incr(3), three + 3);
assert_eq!(tiny.incr(3), three + 4);
// 8 incr(), now resets on next incr
// can only assert they are greater than or equal
// to the incr() we do per key.
assert!(tiny.incr(3) >= 3); // had 4, reset to 2, added another.
assert!(tiny.incr(1) >= 2); // had 2, reset to 1, added another.
assert!(tiny.incr(2) >= 2); // had 2, reset to 1, added another.
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/tinyufo/src/buckets.rs | tinyufo/src/buckets.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Concurrent storage backend
use super::{Bucket, Key};
use ahash::RandomState;
use crossbeam_skiplist::{map::Entry, SkipMap};
use flurry::HashMap;
/// N-shard skip list. Memory efficient, constant time lookup on average, but a bit slower
/// than hash map
pub struct Compact<T>(Box<[SkipMap<Key, Bucket<T>>]>);
impl<T: Send + 'static> Compact<T> {
/// Create a new [Compact]
pub fn new(total_items: usize, items_per_shard: usize) -> Self {
assert!(items_per_shard > 0);
let shards = std::cmp::max(total_items / items_per_shard, 1);
let mut shard_array = vec![];
for _ in 0..shards {
shard_array.push(SkipMap::new());
}
Self(shard_array.into_boxed_slice())
}
pub fn get(&self, key: &Key) -> Option<Entry<'_, Key, Bucket<T>>> {
let shard = *key as usize % self.0.len();
self.0[shard].get(key)
}
pub fn get_map<V, F: FnOnce(Entry<Key, Bucket<T>>) -> V>(&self, key: &Key, f: F) -> Option<V> {
let v = self.get(key);
v.map(f)
}
fn insert(&self, key: Key, value: Bucket<T>) -> Option<()> {
let shard = key as usize % self.0.len();
let removed = self.0[shard].remove(&key);
self.0[shard].insert(key, value);
removed.map(|_| ())
}
fn remove(&self, key: &Key) {
let shard = *key as usize % self.0.len();
(&self.0)[shard].remove(key);
}
}
// Concurrent hash map, fast but use more memory
pub struct Fast<T>(HashMap<Key, Bucket<T>, RandomState>);
impl<T: Send + Sync> Fast<T> {
pub fn new(total_items: usize) -> Self {
Self(HashMap::with_capacity_and_hasher(
total_items,
RandomState::new(),
))
}
pub fn get_map<V, F: FnOnce(&Bucket<T>) -> V>(&self, key: &Key, f: F) -> Option<V> {
let pinned = self.0.pin();
let v = pinned.get(key);
v.map(f)
}
fn insert(&self, key: Key, value: Bucket<T>) -> Option<()> {
let pinned = self.0.pin();
pinned.insert(key, value).map(|_| ())
}
fn remove(&self, key: &Key) {
let pinned = self.0.pin();
pinned.remove(key);
}
}
pub enum Buckets<T> {
Fast(Box<Fast<T>>),
Compact(Compact<T>),
}
impl<T: Send + Sync + 'static> Buckets<T> {
pub fn new_fast(items: usize) -> Self {
Self::Fast(Box::new(Fast::new(items)))
}
pub fn new_compact(items: usize, items_per_shard: usize) -> Self {
Self::Compact(Compact::new(items, items_per_shard))
}
pub fn insert(&self, key: Key, value: Bucket<T>) -> Option<()> {
match self {
Self::Compact(c) => c.insert(key, value),
Self::Fast(f) => f.insert(key, value),
}
}
pub fn remove(&self, key: &Key) {
match self {
Self::Compact(c) => c.remove(key),
Self::Fast(f) => f.remove(key),
}
}
pub fn get_map<V, F: FnOnce(&Bucket<T>) -> V>(&self, key: &Key, f: F) -> Option<V> {
match self {
Self::Compact(c) => c.get_map(key, |v| f(v.value())),
Self::Fast(c) => c.get_map(key, f),
}
}
#[cfg(test)]
pub fn get_queue(&self, key: &Key) -> Option<bool> {
self.get_map(key, |v| v.queue.is_main())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fast() {
let fast = Buckets::new_fast(10);
assert!(fast.get_map(&1, |_| ()).is_none());
let bucket = Bucket {
queue: crate::Location::new_small(),
weight: 1,
uses: Default::default(),
data: 1,
};
fast.insert(1, bucket);
assert_eq!(fast.get_map(&1, |v| v.data), Some(1));
fast.remove(&1);
assert!(fast.get_map(&1, |_| ()).is_none());
}
#[test]
fn test_compact() {
let compact = Buckets::new_compact(10, 2);
assert!(compact.get_map(&1, |_| ()).is_none());
let bucket = Bucket {
queue: crate::Location::new_small(),
weight: 1,
uses: Default::default(),
data: 1,
};
compact.insert(1, bucket);
assert_eq!(compact.get_map(&1, |v| v.data), Some(1));
compact.remove(&1);
assert!(compact.get_map(&1, |_| ()).is_none());
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/tinyufo/benches/bench_hit_ratio.rs | tinyufo/benches/bench_hit_ratio.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rand::prelude::*;
use std::num::NonZeroUsize;
const ITEMS: usize = 10_000;
const ITERATIONS: usize = 5_000_000;
fn bench_one(zip_exp: f64, cache_size_percent: f32) {
print!("{zip_exp:.2}, {cache_size_percent:4}\t\t\t");
let cache_size = (cache_size_percent * ITEMS as f32).round() as usize;
let mut lru = lru::LruCache::<u64, ()>::new(NonZeroUsize::new(cache_size).unwrap());
let moka = moka::sync::Cache::new(cache_size as u64);
let quick_cache = quick_cache::sync::Cache::new(cache_size);
let tinyufo = tinyufo::TinyUfo::new(cache_size, cache_size);
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(ITEMS as f64, zip_exp).unwrap();
let mut lru_hit = 0;
let mut moka_hit = 0;
let mut quick_cache_hit = 0;
let mut tinyufo_hit = 0;
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if lru.get(&key).is_some() {
lru_hit += 1;
} else {
lru.push(key, ());
}
if moka.get(&key).is_some() {
moka_hit += 1;
} else {
moka.insert(key, ());
}
if quick_cache.get(&key).is_some() {
quick_cache_hit += 1;
} else {
quick_cache.insert(key, ());
}
if tinyufo.get(&key).is_some() {
tinyufo_hit += 1;
} else {
tinyufo.put(key, (), 1);
}
}
print!("{:.2}%\t\t", lru_hit as f32 / ITERATIONS as f32 * 100.0);
print!("{:.2}%\t\t", moka_hit as f32 / ITERATIONS as f32 * 100.0);
print!(
"{:.2}%\t\t",
quick_cache_hit as f32 / ITERATIONS as f32 * 100.0
);
println!("{:.2}%", tinyufo_hit as f32 / ITERATIONS as f32 * 100.0);
}
/*
cargo bench --bench bench_hit_ratio
zipf & cache size lru moka QuickC TinyUFO
0.90, 0.005 19.24% 33.43% 32.33% 33.35%
0.90, 0.01 26.23% 37.86% 38.80% 40.06%
0.90, 0.05 45.58% 55.13% 55.71% 57.80%
0.90, 0.1 55.72% 64.15% 64.01% 66.36%
0.90, 0.25 71.16% 77.12% 75.92% 78.53%
1.00, 0.005 31.08% 45.68% 44.07% 45.15%
1.00, 0.01 39.17% 50.80% 50.90% 52.30%
1.00, 0.05 58.71% 66.92% 67.09% 68.79%
1.00, 0.1 67.59% 74.28% 74.00% 75.92%
1.00, 0.25 79.94% 84.35% 83.45% 85.28%
1.05, 0.005 37.66% 51.78% 50.13% 51.12%
1.05, 0.01 46.07% 57.13% 57.07% 58.41%
1.05, 0.05 65.06% 72.37% 72.41% 73.93%
1.05, 0.1 73.13% 78.97% 78.60% 80.24%
1.05, 0.25 83.74% 87.41% 86.68% 88.14%
1.10, 0.005 44.49% 57.84% 56.16% 57.28%
1.10, 0.01 52.97% 63.19% 62.99% 64.24%
1.10, 0.05 70.95% 77.24% 77.26% 78.55%
1.10, 0.1 78.05% 82.86% 82.66% 84.01%
1.10, 0.25 87.12% 90.10% 89.51% 90.66%
1.50, 0.005 85.27% 89.92% 89.08% 89.69%
1.50, 0.01 89.86% 92.77% 92.44% 92.94%
1.50, 0.05 96.01% 97.08% 96.99% 97.23%
1.50, 0.1 97.51% 98.15% 98.08% 98.24%
1.50, 0.25 98.81% 99.09% 99.03% 99.09%
*/
fn main() {
println!("zipf & cache size\t\tlru\t\tmoka\t\tQuickC\t\tTinyUFO",);
for zif_exp in [0.9, 1.0, 1.05, 1.1, 1.5] {
for cache_capacity in [0.005, 0.01, 0.05, 0.1, 0.25] {
bench_one(zif_exp, cache_capacity);
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/tinyufo/benches/bench_memory.rs | tinyufo/benches/bench_memory.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
use rand::prelude::*;
use std::num::NonZeroUsize;
const ITERATIONS: usize = 5_000_000;
fn bench_lru(zip_exp: f64, items: usize, cache_size_percent: f32) {
let cache_size = (cache_size_percent * items as f32).round() as usize;
let mut lru = lru::LruCache::<u64, ()>::new(NonZeroUsize::new(cache_size).unwrap());
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, zip_exp).unwrap();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if lru.get(&key).is_none() {
lru.push(key, ());
}
}
}
fn bench_moka(zip_exp: f64, items: usize, cache_size_percent: f32) {
let cache_size = (cache_size_percent * items as f32).round() as usize;
let moka = moka::sync::Cache::new(cache_size as u64);
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, zip_exp).unwrap();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if moka.get(&key).is_none() {
moka.insert(key, ());
}
}
}
fn bench_quick_cache(zip_exp: f64, items: usize, cache_size_percent: f32) {
let cache_size = (cache_size_percent * items as f32).round() as usize;
let quick_cache = quick_cache::sync::Cache::new(cache_size);
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, zip_exp).unwrap();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if quick_cache.get(&key).is_none() {
quick_cache.insert(key, ());
}
}
}
fn bench_tinyufo(zip_exp: f64, items: usize, cache_size_percent: f32) {
let cache_size = (cache_size_percent * items as f32).round() as usize;
let tinyufo = tinyufo::TinyUfo::new(cache_size, (cache_size as f32 * 1.0) as usize);
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, zip_exp).unwrap();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if tinyufo.get(&key).is_none() {
tinyufo.put(key, (), 1);
}
}
}
fn bench_tinyufo_compact(zip_exp: f64, items: usize, cache_size_percent: f32) {
let cache_size = (cache_size_percent * items as f32).round() as usize;
let tinyufo = tinyufo::TinyUfo::new_compact(cache_size, (cache_size as f32 * 1.0) as usize);
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, zip_exp).unwrap();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if tinyufo.get(&key).is_none() {
tinyufo.put(key, (), 1);
}
}
}
/*
cargo bench --bench bench_memory
total items 1000, cache size 10%
lru
dhat: At t-gmax: 9,408 bytes in 106 blocks
moka
dhat: At t-gmax: 354,232 bytes in 1,581 blocks
QuickCache
dhat: At t-gmax: 11,840 bytes in 8 blocks
TinyUFO
dhat: At t-gmax: 37,337 bytes in 351 blocks
TinyUFO compat
dhat: At t-gmax: 19,000 bytes in 60 blocks
total items 10000, cache size 10%
lru
dhat: At t-gmax: 128,512 bytes in 1,004 blocks
moka
dhat: At t-gmax: 535,320 bytes in 7,278 blocks
QuickCache
dhat: At t-gmax: 93,000 bytes in 66 blocks
TinyUFO
dhat: At t-gmax: 236,053 bytes in 2,182 blocks
TinyUFO Compact
dhat: At t-gmax: 86,352 bytes in 1,128 blocks
total items 100000, cache size 10%
lru
dhat: At t-gmax: 1,075,648 bytes in 10,004 blocks
moka
dhat: At t-gmax: 2,489,088 bytes in 62,374 blocks
QuickCache
dhat: At t-gmax: 863,752 bytes in 66 blocks
TinyUFO
dhat: At t-gmax: 2,290,635 bytes in 20,467 blocks
TinyUFO
dhat: At t-gmax: 766,024 bytes in 10,421 blocks
*/
fn main() {
for items in [1000, 10_000, 100_000] {
println!("\ntotal items {items}, cache size 10%");
{
let _profiler = dhat::Profiler::new_heap();
bench_lru(1.05, items, 0.1);
println!("lru");
}
{
let _profiler = dhat::Profiler::new_heap();
bench_moka(1.05, items, 0.1);
println!("\nmoka");
}
{
let _profiler = dhat::Profiler::new_heap();
bench_quick_cache(1.05, items, 0.1);
println!("\nQuickCache");
}
{
let _profiler = dhat::Profiler::new_heap();
bench_tinyufo(1.05, items, 0.1);
println!("\nTinyUFO");
}
{
let _profiler = dhat::Profiler::new_heap();
bench_tinyufo_compact(1.05, items, 0.1);
println!("\nTinyUFO Compact");
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/tinyufo/benches/bench_perf.rs | tinyufo/benches/bench_perf.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rand::prelude::*;
use std::num::NonZeroUsize;
use std::sync::{Barrier, Mutex};
use std::thread;
use std::time::Instant;
const ITEMS: usize = 100;
const ITERATIONS: usize = 5_000_000;
const THREADS: usize = 8;
/*
cargo bench --bench bench_perf
Note: the performance number vary a lot on different planform, CPU and CPU arch
Below is from Linux + Ryzen 5 7600 CPU
lru read total 150.423567ms, 30ns avg per operation, 33239472 ops per second
moka read total 462.133322ms, 92ns avg per operation, 10819389 ops per second
quick_cache read total 125.618216ms, 25ns avg per operation, 39803144 ops per second
tinyufo read total 199.007359ms, 39ns avg per operation, 25124698 ops per second
tinyufo compact read total 331.145859ms, 66ns avg per operation, 15099087 ops per second
lru read total 5.402631847s, 1.08µs avg per operation, 925474 ops per second
...
total 6960329 ops per second
moka read total 2.742258211s, 548ns avg per operation, 1823314 ops per second
...
total 14072430 ops per second
quick_cache read total 1.186566627s, 237ns avg per operation, 4213838 ops per second
...
total 33694776 ops per second
tinyufo read total 208.346855ms, 41ns avg per operation, 23998444 ops per second
...
total 148691408 ops per second
tinyufo compact read total 539.403037ms, 107ns avg per operation, 9269507 ops per second
...
total 74130632 ops per second
lru mixed read/write 5.500309876s, 1.1µs avg per operation, 909039 ops per second, 407431 misses
...
total 6846743 ops per second
moka mixed read/write 2.368500882s, 473ns avg per operation, 2111040 ops per second 279324 misses
...
total 16557962 ops per second
quick_cache mixed read/write 838.072588ms, 167ns avg per operation, 5966070 ops per second 315051 misses
...
total 47698472 ops per second
tinyufo mixed read/write 456.134531ms, 91ns avg per operation, 10961678 ops per second, 294977 misses
...
total 80865792 ops per second
tinyufo compact mixed read/write 638.770053ms, 127ns avg per operation, 7827543 ops per second, 294641 misses
...
total 62600844 ops per second
*/
fn main() {
println!("Note: these performance numbers vary a lot across different CPUs and OSes.");
// we don't bench eviction here so make the caches large enough to hold all
let lru = Mutex::new(lru::LruCache::<u64, ()>::unbounded());
let moka = moka::sync::Cache::new(ITEMS as u64 + 10);
let quick_cache = quick_cache::sync::Cache::new(ITEMS + 10);
let tinyufo = tinyufo::TinyUfo::new(ITEMS + 10, 10);
let tinyufo_compact = tinyufo::TinyUfo::new_compact(ITEMS + 10, 10);
// populate first, then we bench access/promotion
for i in 0..ITEMS {
lru.lock().unwrap().put(i as u64, ());
moka.insert(i as u64, ());
quick_cache.insert(i as u64, ());
tinyufo.put(i as u64, (), 1);
tinyufo_compact.put(i as u64, (), 1);
}
// single thread
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(ITEMS as f64, 1.03).unwrap();
let before = Instant::now();
for _ in 0..ITERATIONS {
lru.lock().unwrap().get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"lru read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
let before = Instant::now();
for _ in 0..ITERATIONS {
moka.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"moka read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
let before = Instant::now();
for _ in 0..ITERATIONS {
quick_cache.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"quick_cache read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
let before = Instant::now();
for _ in 0..ITERATIONS {
tinyufo.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"tinyufo read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
let before = Instant::now();
for _ in 0..ITERATIONS {
tinyufo_compact.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"tinyufo compact read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
// concurrent
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(ITEMS as f64, 1.03).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
lru.lock().unwrap().get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"lru read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(ITEMS as f64, 1.03).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
moka.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"moka read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(ITEMS as f64, 1.03).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
quick_cache.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"quick_cache read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(ITEMS as f64, 1.03).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
tinyufo.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"tinyufo read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(ITEMS as f64, 1.03).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
tinyufo_compact.get(&(zipf.sample(&mut rng) as u64));
}
let elapsed = before.elapsed();
println!(
"tinyufo compact read total {elapsed:?}, {:?} avg per operation, {} ops per second",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
///// bench mixed read and write /////
const CACHE_SIZE: usize = 1000;
let items: usize = 10000;
const ZIPF_EXP: f64 = 1.3;
let lru = Mutex::new(lru::LruCache::<u64, ()>::new(
NonZeroUsize::new(CACHE_SIZE).unwrap(),
));
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut miss_count = 0;
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, ZIPF_EXP).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
let mut lru = lru.lock().unwrap();
if lru.get(&key).is_none() {
lru.put(key, ());
miss_count += 1;
}
}
let elapsed = before.elapsed();
println!(
"lru mixed read/write {elapsed:?}, {:?} avg per operation, {} ops per second, {miss_count} misses",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let moka = moka::sync::Cache::new(CACHE_SIZE as u64);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut miss_count = 0;
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, ZIPF_EXP).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if moka.get(&key).is_none() {
moka.insert(key, ());
miss_count += 1;
}
}
let elapsed = before.elapsed();
println!(
"moka mixed read/write {elapsed:?}, {:?} avg per operation, {} ops per second {miss_count} misses",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let quick_cache = quick_cache::sync::Cache::new(CACHE_SIZE);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut miss_count = 0;
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, ZIPF_EXP).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if quick_cache.get(&key).is_none() {
quick_cache.insert(key, ());
miss_count += 1;
}
}
let elapsed = before.elapsed();
println!(
"quick_cache mixed read/write {elapsed:?}, {:?} avg per operation, {} ops per second {miss_count} misses",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let tinyufo = tinyufo::TinyUfo::new(CACHE_SIZE, CACHE_SIZE);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut miss_count = 0;
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, ZIPF_EXP).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if tinyufo.get(&key).is_none() {
tinyufo.put(key, (), 1);
miss_count +=1;
}
}
let elapsed = before.elapsed();
println!(
"tinyufo mixed read/write {elapsed:?}, {:?} avg per operation, {} ops per second, {miss_count} misses",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32,
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
let tinyufo_compact = tinyufo::TinyUfo::new(CACHE_SIZE, CACHE_SIZE);
let wg = Barrier::new(THREADS);
let before = Instant::now();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
let mut miss_count = 0;
let mut rng = rand::rng();
let zipf = rand_distr::Zipf::new(items as f64, ZIPF_EXP).unwrap();
wg.wait();
let before = Instant::now();
for _ in 0..ITERATIONS {
let key = zipf.sample(&mut rng) as u64;
if tinyufo_compact.get(&key).is_none() {
tinyufo_compact.put(key, (), 1);
miss_count +=1;
}
}
let elapsed = before.elapsed();
println!(
"tinyufo compact mixed read/write {elapsed:?}, {:?} avg per operation, {} ops per second, {miss_count} misses",
elapsed / ITERATIONS as u32,
(ITERATIONS as f32 / elapsed.as_secs_f32()) as u32,
);
});
}
});
let elapsed = before.elapsed();
println!(
"total {} ops per second",
(ITERATIONS as f32 * THREADS as f32 / elapsed.as_secs_f32()) as u32
);
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/discovery.rs | pingora-load-balancing/src/discovery.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Service discovery interface and implementations
use arc_swap::ArcSwap;
use async_trait::async_trait;
use http::Extensions;
use pingora_core::protocols::l4::socket::SocketAddr;
use pingora_error::Result;
use std::io::Result as IoResult;
use std::net::ToSocketAddrs;
use std::{
collections::{BTreeSet, HashMap},
sync::Arc,
};
use crate::Backend;
/// [ServiceDiscovery] is the interface to discover [Backend]s.
#[async_trait]
pub trait ServiceDiscovery {
/// Return the discovered collection of backends.
/// And *optionally* whether these backends are enabled to serve or not in a `HashMap`. Any backend
/// that is not explicitly in the set is considered enabled.
async fn discover(&self) -> Result<(BTreeSet<Backend>, HashMap<u64, bool>)>;
}
// TODO: add DNS base discovery
/// A static collection of [Backend]s for service discovery.
#[derive(Default)]
pub struct Static {
backends: ArcSwap<BTreeSet<Backend>>,
}
impl Static {
/// Create a new boxed [Static] service discovery with the given backends.
pub fn new(backends: BTreeSet<Backend>) -> Box<Self> {
Box::new(Static {
backends: ArcSwap::new(Arc::new(backends)),
})
}
/// Create a new boxed [Static] from a given iterator of items that implements [ToSocketAddrs].
pub fn try_from_iter<A, T: IntoIterator<Item = A>>(iter: T) -> IoResult<Box<Self>>
where
A: ToSocketAddrs,
{
let mut upstreams = BTreeSet::new();
for addrs in iter.into_iter() {
let addrs = addrs.to_socket_addrs()?.map(|addr| Backend {
addr: SocketAddr::Inet(addr),
weight: 1,
ext: Extensions::new(),
});
upstreams.extend(addrs);
}
Ok(Self::new(upstreams))
}
/// return the collection to backends
pub fn get(&self) -> BTreeSet<Backend> {
BTreeSet::clone(&self.backends.load())
}
// Concurrent set/add/remove might race with each other
// TODO: use a queue to avoid racing
// TODO: take an impl iter
#[allow(dead_code)]
pub(crate) fn set(&self, backends: BTreeSet<Backend>) {
self.backends.store(backends.into())
}
#[allow(dead_code)]
pub(crate) fn add(&self, backend: Backend) {
let mut new = self.get();
new.insert(backend);
self.set(new)
}
#[allow(dead_code)]
pub(crate) fn remove(&self, backend: &Backend) {
let mut new = self.get();
new.remove(backend);
self.set(new)
}
}
#[async_trait]
impl ServiceDiscovery for Static {
async fn discover(&self) -> Result<(BTreeSet<Backend>, HashMap<u64, bool>)> {
// no readiness
let health = HashMap::new();
Ok((self.get(), health))
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/lib.rs | pingora-load-balancing/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Pingora Load Balancing utilities
//! This crate provides common service discovery, health check and load balancing
//! algorithms for proxies to use.
// https://github.com/mcarton/rust-derivative/issues/112
// False positive for macro generated code
#![allow(clippy::non_canonical_partial_ord_impl)]
use arc_swap::ArcSwap;
use derivative::Derivative;
use futures::FutureExt;
pub use http::Extensions;
use pingora_core::protocols::l4::socket::SocketAddr;
use pingora_error::{ErrorType, OrErr, Result};
use std::collections::hash_map::DefaultHasher;
use std::collections::{BTreeSet, HashMap};
use std::hash::{Hash, Hasher};
use std::io::Result as IoResult;
use std::net::ToSocketAddrs;
use std::sync::Arc;
use std::time::Duration;
mod background;
pub mod discovery;
pub mod health_check;
pub mod selection;
use discovery::ServiceDiscovery;
use health_check::Health;
use selection::UniqueIterator;
use selection::{BackendIter, BackendSelection};
pub mod prelude {
pub use crate::health_check::TcpHealthCheck;
pub use crate::selection::RoundRobin;
pub use crate::LoadBalancer;
}
/// [Backend] represents a server to proxy or connect to.
#[derive(Derivative)]
#[derivative(Clone, Hash, PartialEq, PartialOrd, Eq, Ord, Debug)]
pub struct Backend {
/// The address to the backend server.
pub addr: SocketAddr,
/// The relative weight of the server. Load balancing algorithms will
/// proportionally distributed traffic according to this value.
pub weight: usize,
/// The extension field to put arbitrary data to annotate the Backend.
/// The data added here is opaque to this crate hence the data is ignored by
/// functionalities of this crate. For example, two backends with the same
/// [SocketAddr] and the same weight but different `ext` data are considered
/// identical.
/// See [Extensions] for how to add and read the data.
#[derivative(PartialEq = "ignore")]
#[derivative(PartialOrd = "ignore")]
#[derivative(Hash = "ignore")]
#[derivative(Ord = "ignore")]
pub ext: Extensions,
}
impl Backend {
/// Create a new [Backend] with `weight` 1. The function will try to parse
/// `addr` into a [std::net::SocketAddr].
pub fn new(addr: &str) -> Result<Self> {
Self::new_with_weight(addr, 1)
}
/// Creates a new [Backend] with the specified `weight`. The function will try to parse
/// `addr` into a [std::net::SocketAddr].
pub fn new_with_weight(addr: &str, weight: usize) -> Result<Self> {
let addr = addr
.parse()
.or_err(ErrorType::InternalError, "invalid socket addr")?;
Ok(Backend {
addr: SocketAddr::Inet(addr),
weight,
ext: Extensions::new(),
})
// TODO: UDS
}
pub(crate) fn hash_key(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.hash(&mut hasher);
hasher.finish()
}
}
impl std::ops::Deref for Backend {
type Target = SocketAddr;
fn deref(&self) -> &Self::Target {
&self.addr
}
}
impl std::ops::DerefMut for Backend {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.addr
}
}
impl std::net::ToSocketAddrs for Backend {
type Iter = std::iter::Once<std::net::SocketAddr>;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
self.addr.to_socket_addrs()
}
}
/// [Backends] is a collection of [Backend]s.
///
/// It includes a service discovery method (static or dynamic) to discover all
/// the available backends as well as an optional health check method to probe the liveness
/// of each backend.
pub struct Backends {
discovery: Box<dyn ServiceDiscovery + Send + Sync + 'static>,
health_check: Option<Arc<dyn health_check::HealthCheck + Send + Sync + 'static>>,
backends: ArcSwap<BTreeSet<Backend>>,
health: ArcSwap<HashMap<u64, Health>>,
}
impl Backends {
/// Create a new [Backends] with the given [ServiceDiscovery] implementation.
///
/// The health check method is by default empty.
pub fn new(discovery: Box<dyn ServiceDiscovery + Send + Sync + 'static>) -> Self {
Self {
discovery,
health_check: None,
backends: Default::default(),
health: Default::default(),
}
}
/// Set the health check method. See [health_check] for the methods provided.
pub fn set_health_check(
&mut self,
hc: Box<dyn health_check::HealthCheck + Send + Sync + 'static>,
) {
self.health_check = Some(hc.into())
}
/// Updates backends when the new is different from the current set,
/// the callback will be invoked when the new set of backend is different
/// from the current one so that the caller can update the selector accordingly.
fn do_update<F>(
&self,
new_backends: BTreeSet<Backend>,
enablement: HashMap<u64, bool>,
callback: F,
) where
F: Fn(Arc<BTreeSet<Backend>>),
{
if (**self.backends.load()) != new_backends {
let old_health = self.health.load();
let mut health = HashMap::with_capacity(new_backends.len());
for backend in new_backends.iter() {
let hash_key = backend.hash_key();
// use the default health if the backend is new
let backend_health = old_health.get(&hash_key).cloned().unwrap_or_default();
// override enablement
if let Some(backend_enabled) = enablement.get(&hash_key) {
backend_health.enable(*backend_enabled);
}
health.insert(hash_key, backend_health);
}
// TODO: put this all under 1 ArcSwap so the update is atomic
// It's important the `callback()` executes first since computing selector backends might
// be expensive. For example, if a caller checks `backends` to see if any are available
// they may encounter false positives if the selector isn't ready yet.
let new_backends = Arc::new(new_backends);
callback(new_backends.clone());
self.backends.store(new_backends);
self.health.store(Arc::new(health));
} else {
// no backend change, just check enablement
for (hash_key, backend_enabled) in enablement.iter() {
// override enablement if set
// this get should always be Some(_) because we already populate `health`` for all known backends
if let Some(backend_health) = self.health.load().get(hash_key) {
backend_health.enable(*backend_enabled);
}
}
}
}
/// Whether a certain [Backend] is ready to serve traffic.
///
/// This function returns true when the backend is both healthy and enabled.
/// This function returns true when the health check is unset but the backend is enabled.
/// When the health check is set, this function will return false for the `backend` it
/// doesn't know.
pub fn ready(&self, backend: &Backend) -> bool {
self.health
.load()
.get(&backend.hash_key())
// Racing: return `None` when this function is called between the
// backend store and the health store
.map_or(self.health_check.is_none(), |h| h.ready())
}
/// Manually set if a [Backend] is ready to serve traffic.
///
/// This method does not override the health of the backend. It is meant to be used
/// to stop a backend from accepting traffic when it is still healthy.
///
/// This method is noop when the given backend doesn't exist in the service discovery.
pub fn set_enable(&self, backend: &Backend, enabled: bool) {
// this should always be Some(_) because health is always populated during update
if let Some(h) = self.health.load().get(&backend.hash_key()) {
h.enable(enabled)
};
}
/// Return the collection of the backends.
pub fn get_backend(&self) -> Arc<BTreeSet<Backend>> {
self.backends.load_full()
}
/// Call the service discovery method to update the collection of backends.
///
/// The callback will be invoked when the new set of backend is different
/// from the current one so that the caller can update the selector accordingly.
pub async fn update<F>(&self, callback: F) -> Result<()>
where
F: Fn(Arc<BTreeSet<Backend>>),
{
let (new_backends, enablement) = self.discovery.discover().await?;
self.do_update(new_backends, enablement, callback);
Ok(())
}
/// Run health check on all backends if it is set.
///
/// When `parallel: true`, all backends are checked in parallel instead of sequentially
pub async fn run_health_check(&self, parallel: bool) {
use crate::health_check::HealthCheck;
use log::{info, warn};
use pingora_runtime::current_handle;
async fn check_and_report(
backend: &Backend,
check: &Arc<dyn HealthCheck + Send + Sync>,
health_table: &HashMap<u64, Health>,
) {
let errored = check.check(backend).await.err();
if let Some(h) = health_table.get(&backend.hash_key()) {
let flipped =
h.observe_health(errored.is_none(), check.health_threshold(errored.is_none()));
if flipped {
check.health_status_change(backend, errored.is_none()).await;
let summary = check.backend_summary(backend);
if let Some(e) = errored {
warn!("{summary} becomes unhealthy, {e}");
} else {
info!("{summary} becomes healthy");
}
}
}
}
let Some(health_check) = self.health_check.as_ref() else {
return;
};
let backends = self.backends.load();
if parallel {
let health_table = self.health.load_full();
let runtime = current_handle();
let jobs = backends.iter().map(|backend| {
let backend = backend.clone();
let check = health_check.clone();
let ht = health_table.clone();
runtime.spawn(async move {
check_and_report(&backend, &check, &ht).await;
})
});
futures::future::join_all(jobs).await;
} else {
for backend in backends.iter() {
check_and_report(backend, health_check, &self.health.load()).await;
}
}
}
}
/// A [LoadBalancer] instance contains the service discovery, health check and backend selection
/// all together.
///
/// In order to run service discovery and health check at the designated frequencies, the [LoadBalancer]
/// needs to be run as a [pingora_core::services::background::BackgroundService].
pub struct LoadBalancer<S> {
backends: Backends,
selector: ArcSwap<S>,
/// How frequent the health check logic (if set) should run.
///
/// If `None`, the health check logic will only run once at the beginning.
pub health_check_frequency: Option<Duration>,
/// How frequent the service discovery should run.
///
/// If `None`, the service discovery will only run once at the beginning.
pub update_frequency: Option<Duration>,
/// Whether to run health check to all backends in parallel. Default is false.
pub parallel_health_check: bool,
}
impl<S> LoadBalancer<S>
where
S: BackendSelection + 'static,
S::Iter: BackendIter,
{
/// Build a [LoadBalancer] with static backends created from the iter.
///
/// Note: [ToSocketAddrs] will invoke blocking network IO for DNS lookup if
/// the input cannot be directly parsed as [SocketAddr].
pub fn try_from_iter<A, T: IntoIterator<Item = A>>(iter: T) -> IoResult<Self>
where
A: ToSocketAddrs,
{
let discovery = discovery::Static::try_from_iter(iter)?;
let backends = Backends::new(discovery);
let lb = Self::from_backends(backends);
lb.update()
.now_or_never()
.expect("static should not block")
.expect("static should not error");
Ok(lb)
}
/// Build a [LoadBalancer] with the given [Backends] and the config.
pub fn from_backends_with_config(backends: Backends, config: &S::Config) -> Self {
let selector = ArcSwap::new(Arc::new(S::build_with_config(
&backends.get_backend(),
config,
)));
LoadBalancer {
backends,
selector,
health_check_frequency: None,
update_frequency: None,
parallel_health_check: false,
}
}
/// Build a [LoadBalancer] with the given [Backends].
pub fn from_backends(backends: Backends) -> Self {
let selector = ArcSwap::new(Arc::new(S::build(&backends.get_backend())));
LoadBalancer {
backends,
selector,
health_check_frequency: None,
update_frequency: None,
parallel_health_check: false,
}
}
/// Run the service discovery and update the selection algorithm.
///
/// This function will be called every `update_frequency` if this [LoadBalancer] instance
/// is running as a background service.
pub async fn update(&self) -> Result<()> {
self.backends
.update(|backends| self.selector.store(Arc::new(S::build(&backends))))
.await
}
/// Return the first healthy [Backend] according to the selection algorithm and the
/// health check results.
///
/// The `key` is used for hash based selection and is ignored if the selection is random or
/// round robin.
///
/// the `max_iterations` is there to bound the search time for the next Backend. In certain
/// algorithm like Ketama hashing, the search for the next backend is linear and could take
/// a lot steps.
// TODO: consider remove `max_iterations` as users have no idea how to set it.
pub fn select(&self, key: &[u8], max_iterations: usize) -> Option<Backend> {
self.select_with(key, max_iterations, |_, health| health)
}
/// Similar to [Self::select], return the first healthy [Backend] according to the selection algorithm
/// and the user defined `accept` function.
///
/// The `accept` function takes two inputs, the backend being selected and the internal health of that
/// backend. The function can do things like ignoring the internal health checks or skipping this backend
/// because it failed before. The `accept` function is called multiple times iterating over backends
/// until it returns `true`.
pub fn select_with<F>(&self, key: &[u8], max_iterations: usize, accept: F) -> Option<Backend>
where
F: Fn(&Backend, bool) -> bool,
{
let selection = self.selector.load();
let mut iter = UniqueIterator::new(selection.iter(key), max_iterations);
while let Some(b) = iter.get_next() {
if accept(&b, self.backends.ready(&b)) {
return Some(b);
}
}
None
}
/// Set the health check method. See [health_check].
pub fn set_health_check(
&mut self,
hc: Box<dyn health_check::HealthCheck + Send + Sync + 'static>,
) {
self.backends.set_health_check(hc);
}
/// Access the [Backends] of this [LoadBalancer]
pub fn backends(&self) -> &Backends {
&self.backends
}
}
#[cfg(test)]
mod test {
use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
use super::*;
use async_trait::async_trait;
#[tokio::test]
async fn test_static_backends() {
let backends: LoadBalancer<selection::RoundRobin> =
LoadBalancer::try_from_iter(["1.1.1.1:80", "1.0.0.1:80"]).unwrap();
let backend1 = Backend::new("1.1.1.1:80").unwrap();
let backend2 = Backend::new("1.0.0.1:80").unwrap();
let backend = backends.backends().get_backend();
assert!(backend.contains(&backend1));
assert!(backend.contains(&backend2));
}
#[tokio::test]
async fn test_backends() {
let discovery = discovery::Static::default();
let good1 = Backend::new("1.1.1.1:80").unwrap();
discovery.add(good1.clone());
let good2 = Backend::new("1.0.0.1:80").unwrap();
discovery.add(good2.clone());
let bad = Backend::new("127.0.0.1:79").unwrap();
discovery.add(bad.clone());
let mut backends = Backends::new(Box::new(discovery));
let check = health_check::TcpHealthCheck::new();
backends.set_health_check(check);
// true: new backend discovered
let updated = AtomicBool::new(false);
backends
.update(|_| updated.store(true, Relaxed))
.await
.unwrap();
assert!(updated.load(Relaxed));
// false: no new backend discovered
let updated = AtomicBool::new(false);
backends
.update(|_| updated.store(true, Relaxed))
.await
.unwrap();
assert!(!updated.load(Relaxed));
backends.run_health_check(false).await;
let backend = backends.get_backend();
assert!(backend.contains(&good1));
assert!(backend.contains(&good2));
assert!(backend.contains(&bad));
assert!(backends.ready(&good1));
assert!(backends.ready(&good2));
assert!(!backends.ready(&bad));
}
#[tokio::test]
async fn test_backends_with_ext() {
let discovery = discovery::Static::default();
let mut b1 = Backend::new("1.1.1.1:80").unwrap();
b1.ext.insert(true);
let mut b2 = Backend::new("1.0.0.1:80").unwrap();
b2.ext.insert(1u8);
discovery.add(b1.clone());
discovery.add(b2.clone());
let backends = Backends::new(Box::new(discovery));
// fill in the backends
backends.update(|_| {}).await.unwrap();
let backend = backends.get_backend();
assert!(backend.contains(&b1));
assert!(backend.contains(&b2));
let b2 = backend.first().unwrap();
assert_eq!(b2.ext.get::<u8>(), Some(&1));
let b1 = backend.last().unwrap();
assert_eq!(b1.ext.get::<bool>(), Some(&true));
}
#[tokio::test]
async fn test_discovery_readiness() {
use discovery::Static;
struct TestDiscovery(Static);
#[async_trait]
impl ServiceDiscovery for TestDiscovery {
async fn discover(&self) -> Result<(BTreeSet<Backend>, HashMap<u64, bool>)> {
let bad = Backend::new("127.0.0.1:79").unwrap();
let (backends, mut readiness) = self.0.discover().await?;
readiness.insert(bad.hash_key(), false);
Ok((backends, readiness))
}
}
let discovery = Static::default();
let good1 = Backend::new("1.1.1.1:80").unwrap();
discovery.add(good1.clone());
let good2 = Backend::new("1.0.0.1:80").unwrap();
discovery.add(good2.clone());
let bad = Backend::new("127.0.0.1:79").unwrap();
discovery.add(bad.clone());
let discovery = TestDiscovery(discovery);
let backends = Backends::new(Box::new(discovery));
// true: new backend discovered
let updated = AtomicBool::new(false);
backends
.update(|_| updated.store(true, Relaxed))
.await
.unwrap();
assert!(updated.load(Relaxed));
let backend = backends.get_backend();
assert!(backend.contains(&good1));
assert!(backend.contains(&good2));
assert!(backend.contains(&bad));
assert!(backends.ready(&good1));
assert!(backends.ready(&good2));
assert!(!backends.ready(&bad));
}
#[tokio::test]
async fn test_parallel_health_check() {
let discovery = discovery::Static::default();
let good1 = Backend::new("1.1.1.1:80").unwrap();
discovery.add(good1.clone());
let good2 = Backend::new("1.0.0.1:80").unwrap();
discovery.add(good2.clone());
let bad = Backend::new("127.0.0.1:79").unwrap();
discovery.add(bad.clone());
let mut backends = Backends::new(Box::new(discovery));
let check = health_check::TcpHealthCheck::new();
backends.set_health_check(check);
// true: new backend discovered
let updated = AtomicBool::new(false);
backends
.update(|_| updated.store(true, Relaxed))
.await
.unwrap();
assert!(updated.load(Relaxed));
backends.run_health_check(true).await;
assert!(backends.ready(&good1));
assert!(backends.ready(&good2));
assert!(!backends.ready(&bad));
}
mod thread_safety {
use super::*;
struct MockDiscovery {
expected: usize,
}
#[async_trait]
impl ServiceDiscovery for MockDiscovery {
async fn discover(&self) -> Result<(BTreeSet<Backend>, HashMap<u64, bool>)> {
let mut d = BTreeSet::new();
let mut m = HashMap::with_capacity(self.expected);
for i in 0..self.expected {
let b = Backend::new(&format!("1.1.1.1:{i}")).unwrap();
m.insert(i as u64, true);
d.insert(b);
}
Ok((d, m))
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_consistency() {
let expected = 3000;
let discovery = MockDiscovery { expected };
let lb = Arc::new(LoadBalancer::<selection::Consistent>::from_backends(
Backends::new(Box::new(discovery)),
));
let lb2 = lb.clone();
tokio::spawn(async move {
assert!(lb2.update().await.is_ok());
});
let mut backend_count = 0;
while backend_count == 0 {
let backends = lb.backends();
backend_count = backends.backends.load_full().len();
}
assert_eq!(backend_count, expected);
assert!(lb.select_with(b"test", 1, |_, _| true).is_some());
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/health_check.rs | pingora-load-balancing/src/health_check.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Health Check interface and methods.
use crate::Backend;
use arc_swap::ArcSwap;
use async_trait::async_trait;
use pingora_core::connectors::http::custom;
use pingora_core::connectors::{http::Connector as HttpConnector, TransportConnector};
use pingora_core::custom_session;
use pingora_core::protocols::http::custom::client::Session;
use pingora_core::upstreams::peer::{BasicPeer, HttpPeer, Peer};
use pingora_error::{Error, ErrorType::CustomCode, Result};
use pingora_http::{RequestHeader, ResponseHeader};
use std::sync::Arc;
use std::time::Duration;
/// [HealthObserve] is an interface for observing health changes of backends,
/// this is what's used for our health observation callback.
#[async_trait]
pub trait HealthObserve {
/// Observes the health of a [Backend], can be used for monitoring purposes.
async fn observe(&self, target: &Backend, healthy: bool);
}
/// Provided to a [HealthCheck] to observe changes to [Backend] health.
pub type HealthObserveCallback = Box<dyn HealthObserve + Send + Sync>;
/// Provided to a [HealthCheck] to fetch [Backend] summary for detailed logging.
pub type BackendSummary = Box<dyn Fn(&Backend) -> String + Send + Sync>;
/// [HealthCheck] is the interface to implement health check for backends
#[async_trait]
pub trait HealthCheck {
/// Check the given backend.
///
/// `Ok(())`` if the check passes, otherwise the check fails.
async fn check(&self, target: &Backend) -> Result<()>;
/// Called when the health changes for a [Backend].
async fn health_status_change(&self, _target: &Backend, _healthy: bool) {}
/// Called when a detailed [Backend] summary is needed.
fn backend_summary(&self, target: &Backend) -> String {
format!("{target:?}")
}
/// This function defines how many *consecutive* checks should flip the health of a backend.
///
/// For example: with `success``: `true`: this function should return the
/// number of check need to flip from unhealthy to healthy.
fn health_threshold(&self, success: bool) -> usize;
}
/// TCP health check
///
/// This health check checks if a TCP (or TLS) connection can be established to a given backend.
pub struct TcpHealthCheck {
/// Number of successful checks to flip from unhealthy to healthy.
pub consecutive_success: usize,
/// Number of failed checks to flip from healthy to unhealthy.
pub consecutive_failure: usize,
/// How to connect to the backend.
///
/// This field defines settings like the connect timeout and src IP to bind.
/// The SocketAddr of `peer_template` is just a placeholder which will be replaced by the
/// actual address of the backend when the health check runs.
///
/// By default, this check will try to establish a TCP connection. When the `sni` field is
/// set, it will also try to establish a TLS connection on top of the TCP connection.
pub peer_template: BasicPeer,
connector: TransportConnector,
/// A callback that is invoked when the `healthy` status changes for a [Backend].
pub health_changed_callback: Option<HealthObserveCallback>,
}
impl Default for TcpHealthCheck {
fn default() -> Self {
let mut peer_template = BasicPeer::new("0.0.0.0:1");
peer_template.options.connection_timeout = Some(Duration::from_secs(1));
TcpHealthCheck {
consecutive_success: 1,
consecutive_failure: 1,
peer_template,
connector: TransportConnector::new(None),
health_changed_callback: None,
}
}
}
impl TcpHealthCheck {
/// Create a new [TcpHealthCheck] with the following default values
/// * connect timeout: 1 second
/// * consecutive_success: 1
/// * consecutive_failure: 1
pub fn new() -> Box<Self> {
Box::<TcpHealthCheck>::default()
}
/// Create a new [TcpHealthCheck] that tries to establish a TLS connection.
///
/// The default values are the same as [Self::new()].
pub fn new_tls(sni: &str) -> Box<Self> {
let mut new = Self::default();
new.peer_template.sni = sni.into();
Box::new(new)
}
/// Replace the internal tcp connector with the given [TransportConnector]
pub fn set_connector(&mut self, connector: TransportConnector) {
self.connector = connector;
}
}
#[async_trait]
impl HealthCheck for TcpHealthCheck {
fn health_threshold(&self, success: bool) -> usize {
if success {
self.consecutive_success
} else {
self.consecutive_failure
}
}
async fn check(&self, target: &Backend) -> Result<()> {
let mut peer = self.peer_template.clone();
peer._address = target.addr.clone();
self.connector.get_stream(&peer).await.map(|_| {})
}
async fn health_status_change(&self, target: &Backend, healthy: bool) {
if let Some(callback) = &self.health_changed_callback {
callback.observe(target, healthy).await;
}
}
}
type Validator = Box<dyn Fn(&ResponseHeader) -> Result<()> + Send + Sync>;
/// HTTP health check
///
/// This health check checks if it can receive the expected HTTP(s) response from the given backend.
pub struct HttpHealthCheck<C = ()>
where
C: custom::Connector,
{
/// Number of successful checks to flip from unhealthy to healthy.
pub consecutive_success: usize,
/// Number of failed checks to flip from healthy to unhealthy.
pub consecutive_failure: usize,
/// How to connect to the backend.
///
/// This field defines settings like the connect timeout and src IP to bind.
/// The SocketAddr of `peer_template` is just a placeholder which will be replaced by the
/// actual address of the backend when the health check runs.
///
/// Set the `scheme` field to use HTTPs.
pub peer_template: HttpPeer,
/// Whether the underlying TCP/TLS connection can be reused across checks.
///
/// * `false` will make sure that every health check goes through TCP (and TLS) handshakes.
/// Established connections sometimes hide the issue of firewalls and L4 LB.
/// * `true` will try to reuse connections across checks, this is the more efficient and fast way
/// to perform health checks.
pub reuse_connection: bool,
/// The request header to send to the backend
pub req: RequestHeader,
connector: HttpConnector<C>,
/// Optional field to define how to validate the response from the server.
///
/// If not set, any response with a `200 OK` is considered a successful check.
pub validator: Option<Validator>,
/// Sometimes the health check endpoint lives one a different port than the actual backend.
/// Setting this option allows the health check to perform on the given port of the backend IP.
pub port_override: Option<u16>,
/// A callback that is invoked when the `healthy` status changes for a [Backend].
pub health_changed_callback: Option<HealthObserveCallback>,
/// An optional callback for backend summary reporting.
pub backend_summary_callback: Option<BackendSummary>,
}
impl HttpHealthCheck<()> {
/// Create a new [HttpHealthCheck] with the following default settings
/// * connect timeout: 1 second
/// * read timeout: 1 second
/// * req: a GET to the `/` of the given host name
/// * consecutive_success: 1
/// * consecutive_failure: 1
/// * reuse_connection: false
/// * validator: `None`, any 200 response is considered successful
pub fn new(host: &str, tls: bool) -> Self {
let mut req = RequestHeader::build("GET", b"/", None).unwrap();
req.append_header("Host", host).unwrap();
let sni = if tls { host.into() } else { String::new() };
let mut peer_template = HttpPeer::new("0.0.0.0:1", tls, sni);
peer_template.options.connection_timeout = Some(Duration::from_secs(1));
peer_template.options.read_timeout = Some(Duration::from_secs(1));
HttpHealthCheck {
consecutive_success: 1,
consecutive_failure: 1,
peer_template,
connector: HttpConnector::new(None),
reuse_connection: false,
req,
validator: None,
port_override: None,
health_changed_callback: None,
backend_summary_callback: None,
}
}
}
impl<C> HttpHealthCheck<C>
where
C: custom::Connector,
{
/// Create a new [HttpHealthCheck] with the following default settings
/// * connect timeout: 1 second
/// * read timeout: 1 second
/// * req: a GET to the `/` of the given host name
/// * consecutive_success: 1
/// * consecutive_failure: 1
/// * reuse_connection: false
/// * validator: `None`, any 200 response is considered successful
pub fn new_custom(host: &str, tls: bool, custom: HttpConnector<C>) -> Self {
let mut req = RequestHeader::build("GET", b"/", None).unwrap();
req.append_header("Host", host).unwrap();
let sni = if tls { host.into() } else { String::new() };
let mut peer_template = HttpPeer::new("0.0.0.0:1", tls, sni);
peer_template.options.connection_timeout = Some(Duration::from_secs(1));
peer_template.options.read_timeout = Some(Duration::from_secs(1));
HttpHealthCheck {
consecutive_success: 1,
consecutive_failure: 1,
peer_template,
connector: custom,
reuse_connection: false,
req,
validator: None,
port_override: None,
health_changed_callback: None,
backend_summary_callback: None,
}
}
/// Replace the internal http connector with the given [HttpConnector]
pub fn set_connector(&mut self, connector: HttpConnector<C>) {
self.connector = connector;
}
pub fn set_backend_summary<F>(&mut self, callback: F)
where
F: Fn(&Backend) -> String + Send + Sync + 'static,
{
self.backend_summary_callback = Some(Box::new(callback));
}
}
#[async_trait]
impl<C> HealthCheck for HttpHealthCheck<C>
where
C: custom::Connector,
{
fn health_threshold(&self, success: bool) -> usize {
if success {
self.consecutive_success
} else {
self.consecutive_failure
}
}
async fn check(&self, target: &Backend) -> Result<()> {
let mut peer = self.peer_template.clone();
peer._address = target.addr.clone();
if let Some(port) = self.port_override {
peer._address.set_port(port);
}
let session = self.connector.get_http_session(&peer).await?;
let mut session = session.0;
let req = Box::new(self.req.clone());
session.write_request_header(req).await?;
session.finish_request_body().await?;
custom_session!(session.finish_custom().await?);
if let Some(read_timeout) = peer.options.read_timeout {
session.set_read_timeout(Some(read_timeout));
}
session.read_response_header().await?;
let resp = session.response_header().expect("just read");
if let Some(validator) = self.validator.as_ref() {
validator(resp)?;
} else if resp.status != 200 {
return Error::e_explain(
CustomCode("non 200 code", resp.status.as_u16()),
"during http healthcheck",
);
};
while session.read_response_body().await?.is_some() {
// drain the body if any
}
// TODO(slava): do it concurrently wtih body drain?
custom_session!(session.drain_custom_messages().await?);
if self.reuse_connection {
let idle_timeout = peer.idle_timeout();
self.connector
.release_http_session(session, &peer, idle_timeout)
.await;
}
Ok(())
}
async fn health_status_change(&self, target: &Backend, healthy: bool) {
if let Some(callback) = &self.health_changed_callback {
callback.observe(target, healthy).await;
}
}
fn backend_summary(&self, target: &Backend) -> String {
if let Some(callback) = &self.backend_summary_callback {
callback(target)
} else {
format!("{target:?}")
}
}
}
#[derive(Clone)]
struct HealthInner {
/// Whether the endpoint is healthy to serve traffic
healthy: bool,
/// Whether the endpoint is allowed to serve traffic independent of its health
enabled: bool,
/// The counter for stateful transition between healthy and unhealthy.
/// When [healthy] is true, this counts the number of consecutive health check failures
/// so that the caller can flip the healthy when a certain threshold is met, and vise versa.
consecutive_counter: usize,
}
/// Health of backends that can be updated atomically
pub(crate) struct Health(ArcSwap<HealthInner>);
impl Default for Health {
fn default() -> Self {
Health(ArcSwap::new(Arc::new(HealthInner {
healthy: true, // TODO: allow to start with unhealthy
enabled: true,
consecutive_counter: 0,
})))
}
}
impl Clone for Health {
fn clone(&self) -> Self {
let inner = self.0.load_full();
Health(ArcSwap::new(inner))
}
}
impl Health {
pub fn ready(&self) -> bool {
let h = self.0.load();
h.healthy && h.enabled
}
pub fn enable(&self, enabled: bool) {
let h = self.0.load();
if h.enabled != enabled {
// clone the inner
let mut new_health = (**h).clone();
new_health.enabled = enabled;
self.0.store(Arc::new(new_health));
};
}
// return true when the health is flipped
pub fn observe_health(&self, health: bool, flip_threshold: usize) -> bool {
let h = self.0.load();
let mut flipped = false;
if h.healthy != health {
// opposite health observed, ready to increase the counter
// clone the inner
let mut new_health = (**h).clone();
new_health.consecutive_counter += 1;
if new_health.consecutive_counter >= flip_threshold {
new_health.healthy = health;
new_health.consecutive_counter = 0;
flipped = true;
}
self.0.store(Arc::new(new_health));
} else if h.consecutive_counter > 0 {
// observing the same health as the current state.
// reset the counter, if it is non-zero, because it is no longer consecutive
let mut new_health = (**h).clone();
new_health.consecutive_counter = 0;
self.0.store(Arc::new(new_health));
}
flipped
}
}
#[cfg(test)]
mod test {
use std::{
collections::{BTreeSet, HashMap},
sync::atomic::{AtomicU16, Ordering},
};
use super::*;
use crate::{discovery, Backends, SocketAddr};
use async_trait::async_trait;
use http::Extensions;
#[tokio::test]
async fn test_tcp_check() {
let tcp_check = TcpHealthCheck::default();
let backend = Backend {
addr: SocketAddr::Inet("1.1.1.1:80".parse().unwrap()),
weight: 1,
ext: Extensions::new(),
};
assert!(tcp_check.check(&backend).await.is_ok());
let backend = Backend {
addr: SocketAddr::Inet("1.1.1.1:79".parse().unwrap()),
weight: 1,
ext: Extensions::new(),
};
assert!(tcp_check.check(&backend).await.is_err());
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_check() {
let tls_check = TcpHealthCheck::new_tls("one.one.one.one");
let backend = Backend {
addr: SocketAddr::Inet("1.1.1.1:443".parse().unwrap()),
weight: 1,
ext: Extensions::new(),
};
assert!(tls_check.check(&backend).await.is_ok());
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_https_check() {
let https_check = HttpHealthCheck::new("one.one.one.one", true);
let backend = Backend {
addr: SocketAddr::Inet("1.1.1.1:443".parse().unwrap()),
weight: 1,
ext: Extensions::new(),
};
assert!(https_check.check(&backend).await.is_ok());
}
#[tokio::test]
async fn test_http_custom_check() {
let mut http_check = HttpHealthCheck::new("one.one.one.one", false);
http_check.validator = Some(Box::new(|resp: &ResponseHeader| {
if resp.status == 301 {
Ok(())
} else {
Error::e_explain(
CustomCode("non 301 code", resp.status.as_u16()),
"during http healthcheck",
)
}
}));
let backend = Backend {
addr: SocketAddr::Inet("1.1.1.1:80".parse().unwrap()),
weight: 1,
ext: Extensions::new(),
};
http_check.check(&backend).await.unwrap();
assert!(http_check.check(&backend).await.is_ok());
}
#[tokio::test]
async fn test_health_observe() {
struct Observe {
unhealthy_count: Arc<AtomicU16>,
}
#[async_trait]
impl HealthObserve for Observe {
async fn observe(&self, _target: &Backend, healthy: bool) {
if !healthy {
self.unhealthy_count.fetch_add(1, Ordering::Relaxed);
}
}
}
let good_backend = Backend::new("127.0.0.1:79").unwrap();
let new_good_backends = || -> (BTreeSet<Backend>, HashMap<u64, bool>) {
let mut healthy = HashMap::new();
healthy.insert(good_backend.hash_key(), true);
let mut backends = BTreeSet::new();
backends.extend(vec![good_backend.clone()]);
(backends, healthy)
};
// tcp health check
{
let unhealthy_count = Arc::new(AtomicU16::new(0));
let ob = Observe {
unhealthy_count: unhealthy_count.clone(),
};
let bob = Box::new(ob);
let tcp_check = TcpHealthCheck {
health_changed_callback: Some(bob),
..Default::default()
};
let discovery = discovery::Static::default();
let mut backends = Backends::new(Box::new(discovery));
backends.set_health_check(Box::new(tcp_check));
let result = new_good_backends();
backends.do_update(result.0, result.1, |_backend: Arc<BTreeSet<Backend>>| {});
// the backend is ready
assert!(backends.ready(&good_backend));
// run health check
backends.run_health_check(false).await;
assert!(1 == unhealthy_count.load(Ordering::Relaxed));
// backend is unhealthy
assert!(!backends.ready(&good_backend));
}
// http health check
{
let unhealthy_count = Arc::new(AtomicU16::new(0));
let ob = Observe {
unhealthy_count: unhealthy_count.clone(),
};
let bob = Box::new(ob);
let mut https_check = HttpHealthCheck::new("one.one.one.one", true);
https_check.health_changed_callback = Some(bob);
let discovery = discovery::Static::default();
let mut backends = Backends::new(Box::new(discovery));
backends.set_health_check(Box::new(https_check));
let result = new_good_backends();
backends.do_update(result.0, result.1, |_backend: Arc<BTreeSet<Backend>>| {});
// the backend is ready
assert!(backends.ready(&good_backend));
// run health check
backends.run_health_check(false).await;
assert!(1 == unhealthy_count.load(Ordering::Relaxed));
assert!(!backends.ready(&good_backend));
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/background.rs | pingora-load-balancing/src/background.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implement [BackgroundService] for [LoadBalancer]
use std::time::{Duration, Instant};
use super::{BackendIter, BackendSelection, LoadBalancer};
use async_trait::async_trait;
use pingora_core::services::background::BackgroundService;
#[async_trait]
impl<S: Send + Sync + BackendSelection + 'static> BackgroundService for LoadBalancer<S>
where
S::Iter: BackendIter,
{
async fn start(&self, shutdown: pingora_core::server::ShutdownWatch) -> () {
// 136 years
const NEVER: Duration = Duration::from_secs(u32::MAX as u64);
let mut now = Instant::now();
// run update and health check once
let mut next_update = now;
let mut next_health_check = now;
loop {
if *shutdown.borrow() {
return;
}
if next_update <= now {
// TODO: log err
let _ = self.update().await;
next_update = now + self.update_frequency.unwrap_or(NEVER);
}
if next_health_check <= now {
self.backends
.run_health_check(self.parallel_health_check)
.await;
next_health_check = now + self.health_check_frequency.unwrap_or(NEVER);
}
if self.update_frequency.is_none() && self.health_check_frequency.is_none() {
return;
}
let to_wake = std::cmp::min(next_update, next_health_check);
tokio::time::sleep_until(to_wake.into()).await;
now = Instant::now();
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/selection/consistent.rs | pingora-load-balancing/src/selection/consistent.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Consistent Hashing
use super::*;
use pingora_core::protocols::l4::socket::SocketAddr;
use pingora_ketama::{Bucket, Continuum, Version};
use std::collections::HashMap;
/// Weighted Ketama consistent hashing
pub struct KetamaHashing {
ring: Continuum,
// TODO: update Ketama to just store this
backends: HashMap<SocketAddr, Backend>,
}
#[derive(Clone, Debug, Copy, Default)]
pub struct KetamaConfig {
pub point_multiple: Option<u32>,
}
impl BackendSelection for KetamaHashing {
type Iter = OwnedNodeIterator;
type Config = KetamaConfig;
fn build_with_config(backends: &BTreeSet<Backend>, config: &Self::Config) -> Self {
let KetamaConfig { point_multiple } = *config;
let buckets: Vec<_> = backends
.iter()
.filter_map(|b| {
// FIXME: ketama only supports Inet addr, UDS addrs are ignored here
if let SocketAddr::Inet(addr) = b.addr {
Some(Bucket::new(addr, b.weight as u32))
} else {
None
}
})
.collect();
let new_backends = backends
.iter()
.map(|b| (b.addr.clone(), b.clone()))
.collect();
#[allow(unused)]
let version = if let Some(point_multiple) = point_multiple {
match () {
#[cfg(feature = "v2")]
() => Version::V2 { point_multiple },
#[cfg(not(feature = "v2"))]
() => Version::V1,
}
} else {
Version::V1
};
KetamaHashing {
ring: Continuum::new_with_version(&buckets, version),
backends: new_backends,
}
}
fn build(backends: &BTreeSet<Backend>) -> Self {
Self::build_with_config(backends, &KetamaConfig::default())
}
fn iter(self: &Arc<Self>, key: &[u8]) -> Self::Iter {
OwnedNodeIterator {
idx: self.ring.node_idx(key),
ring: self.clone(),
}
}
}
/// Iterator over a Continuum
pub struct OwnedNodeIterator {
idx: usize,
ring: Arc<KetamaHashing>,
}
impl BackendIter for OwnedNodeIterator {
fn next(&mut self) -> Option<&Backend> {
self.ring.ring.get_addr(&mut self.idx).and_then(|addr| {
let addr = SocketAddr::Inet(*addr);
self.ring.backends.get(&addr)
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_ketama() {
let b1 = Backend::new("1.1.1.1:80").unwrap();
let b2 = Backend::new("1.0.0.1:80").unwrap();
let b3 = Backend::new("1.0.0.255:80").unwrap();
let backends = BTreeSet::from_iter([b1.clone(), b2.clone(), b3.clone()]);
let hash = Arc::new(KetamaHashing::build(&backends));
let mut iter = hash.iter(b"test0");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test2");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test3");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test4");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test5");
assert_eq!(iter.next(), Some(&b3));
let mut iter = hash.iter(b"test6");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test7");
assert_eq!(iter.next(), Some(&b3));
let mut iter = hash.iter(b"test8");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test9");
assert_eq!(iter.next(), Some(&b2));
// remove b3
let backends = BTreeSet::from_iter([b1.clone(), b2.clone()]);
let hash = Arc::new(KetamaHashing::build(&backends));
let mut iter = hash.iter(b"test0");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test2");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test3");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test4");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test5");
assert_eq!(iter.next(), Some(&b2)); // changed
let mut iter = hash.iter(b"test6");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test7");
assert_eq!(iter.next(), Some(&b1)); // changed
let mut iter = hash.iter(b"test8");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test9");
assert_eq!(iter.next(), Some(&b2));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/selection/weighted.rs | pingora-load-balancing/src/selection/weighted.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Weighted Selection
use super::{Backend, BackendIter, BackendSelection, SelectionAlgorithm};
use fnv::FnvHasher;
use std::collections::BTreeSet;
use std::sync::Arc;
/// Weighted selection with a given selection algorithm
///
/// The default algorithm is [FnvHasher]. See [super::algorithms] for more choices.
pub struct Weighted<H = FnvHasher> {
backends: Box<[Backend]>,
// each item is an index to the `backends`, use u16 to save memory, support up to 2^16 backends
weighted: Box<[u16]>,
algorithm: H,
}
impl<H: SelectionAlgorithm> BackendSelection for Weighted<H> {
type Iter = WeightedIterator<H>;
type Config = ();
fn build(backends: &BTreeSet<Backend>) -> Self {
assert!(
backends.len() <= u16::MAX as usize,
"support up to 2^16 backends"
);
let backends = Vec::from_iter(backends.iter().cloned()).into_boxed_slice();
let mut weighted = Vec::with_capacity(backends.len());
for (index, b) in backends.iter().enumerate() {
for _ in 0..b.weight {
weighted.push(index as u16);
}
}
Weighted {
backends,
weighted: weighted.into_boxed_slice(),
algorithm: H::new(),
}
}
fn iter(self: &Arc<Self>, key: &[u8]) -> Self::Iter {
WeightedIterator::new(key, self.clone())
}
}
/// An iterator over the backends of a [Weighted] selection.
///
/// See [super::BackendSelection] for more information.
pub struct WeightedIterator<H> {
// the unbounded index seed
index: u64,
backend: Arc<Weighted<H>>,
first: bool,
}
impl<H: SelectionAlgorithm> WeightedIterator<H> {
/// Constructs a new [WeightedIterator].
fn new(input: &[u8], backend: Arc<Weighted<H>>) -> Self {
Self {
index: backend.algorithm.next(input),
backend,
first: true,
}
}
}
impl<H: SelectionAlgorithm> BackendIter for WeightedIterator<H> {
fn next(&mut self) -> Option<&Backend> {
if self.backend.backends.is_empty() {
// short circuit if empty
return None;
}
if self.first {
// initial hash, select from the weighted list
self.first = false;
let len = self.backend.weighted.len();
let index = self.backend.weighted[self.index as usize % len];
Some(&self.backend.backends[index as usize])
} else {
// fallback, select from the unique list
// deterministically select the next item
self.index = self.backend.algorithm.next(&self.index.to_le_bytes());
let len = self.backend.backends.len();
Some(&self.backend.backends[self.index as usize % len])
}
}
}
#[cfg(test)]
mod test {
use super::super::algorithms::*;
use super::*;
use std::collections::HashMap;
#[test]
fn test_fnv() {
let b1 = Backend::new("1.1.1.1:80").unwrap();
let mut b2 = Backend::new("1.0.0.1:80").unwrap();
b2.weight = 10; // 10x than the rest
let b3 = Backend::new("1.0.0.255:80").unwrap();
let backends = BTreeSet::from_iter([b1.clone(), b2.clone(), b3.clone()]);
let hash: Arc<Weighted> = Arc::new(Weighted::build(&backends));
// same hash iter over
let mut iter = hash.iter(b"test");
// first, should be weighted
assert_eq!(iter.next(), Some(&b2));
// fallbacks, should be uniform, not weighted
assert_eq!(iter.next(), Some(&b2));
assert_eq!(iter.next(), Some(&b2));
assert_eq!(iter.next(), Some(&b1));
assert_eq!(iter.next(), Some(&b3));
assert_eq!(iter.next(), Some(&b2));
assert_eq!(iter.next(), Some(&b2));
assert_eq!(iter.next(), Some(&b1));
assert_eq!(iter.next(), Some(&b2));
assert_eq!(iter.next(), Some(&b3));
assert_eq!(iter.next(), Some(&b1));
// different hashes, the first selection should be weighted
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test2");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test3");
assert_eq!(iter.next(), Some(&b3));
let mut iter = hash.iter(b"test4");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test5");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test6");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test7");
assert_eq!(iter.next(), Some(&b2));
}
#[test]
fn test_round_robin() {
let b1 = Backend::new("1.1.1.1:80").unwrap();
let mut b2 = Backend::new("1.0.0.1:80").unwrap();
b2.weight = 8; // 8x than the rest
let b3 = Backend::new("1.0.0.255:80").unwrap();
// sorted with: [b2, b3, b1]
// weighted: [0, 0, 0, 0, 0, 0, 0, 0, 1, 2]
let backends = BTreeSet::from_iter([b1.clone(), b2.clone(), b3.clone()]);
let hash: Arc<Weighted<RoundRobin>> = Arc::new(Weighted::build(&backends));
// same hash iter over
let mut iter = hash.iter(b"test");
// first, should be weighted
// weighted: [0, 0, 0, 0, 0, 0, 0, 0, 1, 2]
// ^
assert_eq!(iter.next(), Some(&b2));
// fallbacks, should be round robin
assert_eq!(iter.next(), Some(&b3));
assert_eq!(iter.next(), Some(&b1));
assert_eq!(iter.next(), Some(&b2));
assert_eq!(iter.next(), Some(&b3));
// round robin, ignoring the hash key
// index advanced 5 steps
// weighted: [0, 0, 0, 0, 0, 0, 0, 0, 1, 2]
// ^
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b3));
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b1));
let mut iter = hash.iter(b"test1");
// rounded
assert_eq!(iter.next(), Some(&b2));
let mut iter = hash.iter(b"test1");
assert_eq!(iter.next(), Some(&b2));
}
#[test]
fn test_random() {
let b1 = Backend::new("1.1.1.1:80").unwrap();
let mut b2 = Backend::new("1.0.0.1:80").unwrap();
b2.weight = 8; // 8x than the rest
let b3 = Backend::new("1.0.0.255:80").unwrap();
let backends = BTreeSet::from_iter([b1.clone(), b2.clone(), b3.clone()]);
let hash: Arc<Weighted<Random>> = Arc::new(Weighted::build(&backends));
let mut count = HashMap::new();
count.insert(b1.clone(), 0);
count.insert(b2.clone(), 0);
count.insert(b3.clone(), 0);
for _ in 0..10000 {
let mut iter = hash.iter(b"test");
*count.get_mut(iter.next().unwrap()).unwrap() += 1;
}
let b2_count = *count.get(&b2).unwrap();
assert!((7000..=9000).contains(&b2_count));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/selection/algorithms.rs | pingora-load-balancing/src/selection/algorithms.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of algorithms for weighted selection
//!
//! All [std::hash::Hasher] + [Default] can be used directly as a selection algorithm.
use super::*;
use std::hash::Hasher;
use std::sync::atomic::{AtomicUsize, Ordering};
impl<H> SelectionAlgorithm for H
where
H: Default + Hasher,
{
fn new() -> Self {
H::default()
}
fn next(&self, key: &[u8]) -> u64 {
let mut hasher = H::default();
hasher.write(key);
hasher.finish()
}
}
/// Round Robin selection
pub struct RoundRobin(AtomicUsize);
impl SelectionAlgorithm for RoundRobin {
fn new() -> Self {
Self(AtomicUsize::new(0))
}
fn next(&self, _key: &[u8]) -> u64 {
self.0.fetch_add(1, Ordering::Relaxed) as u64
}
}
/// Random selection
pub struct Random;
impl SelectionAlgorithm for Random {
fn new() -> Self {
Self
}
fn next(&self, _key: &[u8]) -> u64 {
use rand::Rng;
let mut rng = rand::thread_rng();
rng.gen()
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-load-balancing/src/selection/mod.rs | pingora-load-balancing/src/selection/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Backend selection interfaces and algorithms
pub mod algorithms;
pub mod consistent;
pub mod weighted;
use super::Backend;
use std::collections::{BTreeSet, HashSet};
use std::sync::Arc;
use weighted::Weighted;
/// [BackendSelection] is the interface to implement backend selection mechanisms.
pub trait BackendSelection: Sized {
/// The [BackendIter] returned from iter() below.
type Iter;
/// The configuration type constructing [BackendSelection]
type Config;
/// Create a [BackendSelection] from a set of backends and the given configuration. The
/// default implementation ignores the configuration and simply calls [Self::build]
fn build_with_config(backends: &BTreeSet<Backend>, _config: &Self::Config) -> Self {
Self::build(backends)
}
/// The function to create a [BackendSelection] implementation.
fn build(backends: &BTreeSet<Backend>) -> Self;
/// Select backends for a given key.
///
/// An [BackendIter] should be returned. The first item in the iter is the first
/// choice backend. The user should continue to iterate over it if the first backend
/// cannot be used due to its health or other reasons.
fn iter(self: &Arc<Self>, key: &[u8]) -> Self::Iter
where
Self::Iter: BackendIter;
}
/// An iterator to find the suitable backend
///
/// Similar to [Iterator] but allow self referencing.
pub trait BackendIter {
/// Return `Some(&Backend)` when there are more backends left to choose from.
fn next(&mut self) -> Option<&Backend>;
}
/// [SelectionAlgorithm] is the interface to implement selection algorithms.
///
/// All [std::hash::Hasher] + [Default] can be used directly as a selection algorithm.
pub trait SelectionAlgorithm {
/// Create a new implementation
fn new() -> Self;
/// Return the next index of backend. The caller should perform modulo to get
/// the valid index of the backend.
fn next(&self, key: &[u8]) -> u64;
}
/// [FNV](https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function) hashing
/// on weighted backends
pub type FNVHash = Weighted<fnv::FnvHasher>;
/// Alias of [`FNVHash`] for backwards compatibility until the next breaking change
#[doc(hidden)]
pub type FVNHash = Weighted<fnv::FnvHasher>;
/// Random selection on weighted backends
pub type Random = Weighted<algorithms::Random>;
/// Round robin selection on weighted backends
pub type RoundRobin = Weighted<algorithms::RoundRobin>;
/// Consistent Ketama hashing on weighted backends
pub type Consistent = consistent::KetamaHashing;
// TODO: least conn
/// An iterator which wraps another iterator and yields unique items. It optionally takes a max
/// number of iterations if the wrapped iterator never returns.
pub struct UniqueIterator<I>
where
I: BackendIter,
{
iter: I,
seen: HashSet<u64>,
max_iterations: usize,
steps: usize,
}
impl<I> UniqueIterator<I>
where
I: BackendIter,
{
/// Wrap a new iterator and specify the maximum number of times we want to iterate.
pub fn new(iter: I, max_iterations: usize) -> Self {
Self {
iter,
max_iterations,
seen: HashSet::new(),
steps: 0,
}
}
pub fn get_next(&mut self) -> Option<Backend> {
while let Some(item) = self.iter.next() {
if self.steps >= self.max_iterations {
return None;
}
self.steps += 1;
let hash_key = item.hash_key();
if !self.seen.contains(&hash_key) {
self.seen.insert(hash_key);
return Some(item.clone());
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
struct TestIter {
seq: Vec<Backend>,
idx: usize,
}
impl TestIter {
fn new(input: &[&Backend]) -> Self {
Self {
seq: input.iter().cloned().cloned().collect(),
idx: 0,
}
}
}
impl BackendIter for TestIter {
fn next(&mut self) -> Option<&Backend> {
let idx = self.idx;
self.idx += 1;
self.seq.get(idx)
}
}
#[test]
fn unique_iter_max_iterations_is_correct() {
let b1 = Backend::new("1.1.1.1:80").unwrap();
let b2 = Backend::new("1.0.0.1:80").unwrap();
let b3 = Backend::new("1.0.0.255:80").unwrap();
let items = [&b1, &b2, &b3];
let mut all = UniqueIterator::new(TestIter::new(&items), 3);
assert_eq!(all.get_next(), Some(b1.clone()));
assert_eq!(all.get_next(), Some(b2.clone()));
assert_eq!(all.get_next(), Some(b3.clone()));
assert_eq!(all.get_next(), None);
let mut stop = UniqueIterator::new(TestIter::new(&items), 1);
assert_eq!(stop.get_next(), Some(b1));
assert_eq!(stop.get_next(), None);
}
#[test]
fn unique_iter_duplicate_items_are_filtered() {
let b1 = Backend::new("1.1.1.1:80").unwrap();
let b2 = Backend::new("1.0.0.1:80").unwrap();
let b3 = Backend::new("1.0.0.255:80").unwrap();
let items = [&b1, &b1, &b2, &b2, &b2, &b3];
let mut uniq = UniqueIterator::new(TestIter::new(&items), 10);
assert_eq!(uniq.get_next(), Some(b1));
assert_eq!(uniq.get_next(), Some(b2));
assert_eq!(uniq.get_next(), Some(b3));
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/lib.rs | pingora-proxy/src/lib.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # pingora-proxy
//!
//! Programmable HTTP proxy built on top of [pingora_core].
//!
//! # Features
//! - HTTP/1.x and HTTP/2 for both downstream and upstream
//! - Connection pooling
//! - TLSv1.3, mutual TLS, customizable CA
//! - Request/Response scanning, modification or rejection
//! - Dynamic upstream selection
//! - Configurable retry and failover
//! - Fully programmable and customizable at any stage of a HTTP request
//!
//! # How to use
//!
//! Users of this crate defines their proxy by implementing [ProxyHttp] trait, which contains the
//! callbacks to be invoked at each stage of a HTTP request.
//!
//! Then the service can be passed into [`http_proxy_service()`] for a [pingora_core::server::Server] to
//! run it.
//!
//! See `examples/load_balancer.rs` for a detailed example.
use async_trait::async_trait;
use bytes::Bytes;
use futures::future::BoxFuture;
use futures::future::FutureExt;
use http::{header, version::Version};
use log::{debug, error, trace, warn};
use once_cell::sync::Lazy;
use pingora_http::{RequestHeader, ResponseHeader};
use std::fmt::Debug;
use std::str;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::time::Duration;
use tokio::sync::{mpsc, Notify};
use tokio::time;
use pingora_cache::NoCacheReason;
use pingora_core::apps::{
HttpPersistentSettings, HttpServerApp, HttpServerOptions, ReusedHttpStream,
};
use pingora_core::connectors::http::custom;
use pingora_core::connectors::{http::Connector, ConnectorOptions};
use pingora_core::modules::http::compression::ResponseCompressionBuilder;
use pingora_core::modules::http::{HttpModuleCtx, HttpModules};
use pingora_core::protocols::http::client::HttpSession as ClientSession;
use pingora_core::protocols::http::custom::CustomMessageWrite;
use pingora_core::protocols::http::v1::client::HttpSession as HttpSessionV1;
use pingora_core::protocols::http::v2::server::H2Options;
use pingora_core::protocols::http::HttpTask;
use pingora_core::protocols::http::ServerSession as HttpSession;
use pingora_core::protocols::http::SERVER_NAME;
use pingora_core::protocols::Stream;
use pingora_core::protocols::{Digest, UniqueID};
use pingora_core::server::configuration::ServerConf;
use pingora_core::server::ShutdownWatch;
use pingora_core::upstreams::peer::{HttpPeer, Peer};
use pingora_error::{Error, ErrorSource, ErrorType::*, OrErr, Result};
const TASK_BUFFER_SIZE: usize = 4;
mod proxy_cache;
mod proxy_common;
mod proxy_custom;
mod proxy_h1;
mod proxy_h2;
mod proxy_purge;
mod proxy_trait;
pub mod subrequest;
use subrequest::{BodyMode, Ctx as SubrequestCtx};
pub use proxy_cache::range_filter::{range_header_filter, MultiRangeInfo, RangeType};
pub use proxy_purge::PurgeStatus;
pub use proxy_trait::{FailToProxy, ProxyHttp};
pub mod prelude {
pub use crate::{http_proxy, http_proxy_service, ProxyHttp, Session};
}
pub type ProcessCustomSession<SV, C> = Arc<
dyn Fn(Arc<HttpProxy<SV, C>>, Stream, &ShutdownWatch) -> BoxFuture<'static, Option<Stream>>
+ Send
+ Sync
+ Unpin
+ 'static,
>;
/// The concrete type that holds the user defined HTTP proxy.
///
/// Users don't need to interact with this object directly.
pub struct HttpProxy<SV, C = ()>
where
C: custom::Connector, // Upstream custom connector
{
inner: SV, // TODO: name it better than inner
client_upstream: Connector<C>,
shutdown: Notify,
shutdown_flag: Arc<AtomicBool>,
pub server_options: Option<HttpServerOptions>,
pub h2_options: Option<H2Options>,
pub downstream_modules: HttpModules,
max_retries: usize,
process_custom_session: Option<ProcessCustomSession<SV, C>>,
}
impl<SV> HttpProxy<SV, ()> {
/// Create a new [`HttpProxy`] with the given [`ProxyHttp`] implementation and [`ServerConf`].
///
/// After creating an `HttpProxy`, you should call [`HttpProxy::handle_init_modules()`] to
/// initialize the downstream modules before processing requests.
///
/// For most use cases, prefer using [`http_proxy_service()`] which wraps the `HttpProxy` in a
/// [`Service`]. This constructor is useful when you need to integrate `HttpProxy` into a custom
/// accept loop (e.g., for SNI-based routing decisions before TLS termination).
///
/// # Example
///
/// ```ignore
/// use pingora_proxy::HttpProxy;
/// use std::sync::Arc;
///
/// let mut proxy = HttpProxy::new(my_proxy_app, server_conf);
/// proxy.handle_init_modules();
/// let proxy = Arc::new(proxy);
/// // Use proxy.process_new_http() in your custom accept loop
/// ```
pub fn new(inner: SV, conf: Arc<ServerConf>) -> Self {
HttpProxy {
inner,
client_upstream: Connector::new(Some(ConnectorOptions::from_server_conf(&conf))),
shutdown: Notify::new(),
shutdown_flag: Arc::new(AtomicBool::new(false)),
server_options: None,
h2_options: None,
downstream_modules: HttpModules::new(),
max_retries: conf.max_retries,
process_custom_session: None,
}
}
}
impl<SV, C> HttpProxy<SV, C>
where
C: custom::Connector,
{
fn new_custom(
inner: SV,
conf: Arc<ServerConf>,
connector: C,
on_custom: ProcessCustomSession<SV, C>,
) -> Self
where
SV: ProxyHttp + Send + Sync + 'static,
SV::CTX: Send + Sync,
{
let client_upstream =
Connector::new_custom(Some(ConnectorOptions::from_server_conf(&conf)), connector);
HttpProxy {
inner,
client_upstream,
shutdown: Notify::new(),
shutdown_flag: Arc::new(AtomicBool::new(false)),
server_options: None,
downstream_modules: HttpModules::new(),
max_retries: conf.max_retries,
process_custom_session: Some(on_custom),
h2_options: None,
}
}
/// Initialize the downstream modules for this proxy.
///
/// This method must be called after creating an [`HttpProxy`] with [`HttpProxy::new()`]
/// and before processing any requests. It invokes [`ProxyHttp::init_downstream_modules()`]
/// to set up any HTTP modules configured by the user's proxy implementation.
///
/// Note: When using [`http_proxy_service()`] or [`http_proxy_service_with_name()`],
/// this method is called automatically.
pub fn handle_init_modules(&mut self)
where
SV: ProxyHttp,
{
self.inner
.init_downstream_modules(&mut self.downstream_modules);
}
async fn handle_new_request(
&self,
mut downstream_session: Box<HttpSession>,
) -> Option<Box<HttpSession>>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
// phase 1 read request header
let res = tokio::select! {
biased; // biased select is cheaper, and we don't want to drop already buffered requests
res = downstream_session.read_request() => { res }
_ = self.shutdown.notified() => {
// service shutting down, dropping the connection to stop more req from coming in
return None;
}
};
match res {
Ok(true) => {
// TODO: check n==0
debug!("Successfully get a new request");
}
Ok(false) => {
return None; // TODO: close connection?
}
Err(mut e) => {
e.as_down();
error!("Fail to proxy: {e}");
if matches!(e.etype, InvalidHTTPHeader) {
downstream_session
.respond_error(400)
.await
.unwrap_or_else(|e| {
error!("failed to send error response to downstream: {e}");
});
} // otherwise the connection must be broken, no need to send anything
downstream_session.shutdown().await;
return None;
}
}
trace!(
"Request header: {:?}",
downstream_session.req_header().as_ref()
);
Some(downstream_session)
}
// return bool: server_session can be reused, and error if any
async fn proxy_to_upstream(
&self,
session: &mut Session,
ctx: &mut SV::CTX,
) -> (bool, Option<Box<Error>>)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
let peer = match self.inner.upstream_peer(session, ctx).await {
Ok(p) => p,
Err(e) => return (false, Some(e)),
};
let client_session = self.client_upstream.get_http_session(&*peer).await;
match client_session {
Ok((client_session, client_reused)) => {
let (server_reused, error) = match client_session {
ClientSession::H1(mut h1) => {
let (server_reused, client_reuse, error) = self
.proxy_to_h1_upstream(session, &mut h1, client_reused, &peer, ctx)
.await;
if client_reuse {
let session = ClientSession::H1(h1);
self.client_upstream
.release_http_session(session, &*peer, peer.idle_timeout())
.await;
}
(server_reused, error)
}
ClientSession::H2(mut h2) => {
let (server_reused, mut error) = self
.proxy_to_h2_upstream(session, &mut h2, client_reused, &peer, ctx)
.await;
let session = ClientSession::H2(h2);
self.client_upstream
.release_http_session(session, &*peer, peer.idle_timeout())
.await;
if let Some(e) = error.as_mut() {
// try to downgrade if A. origin says so or B. origin sends an invalid
// response, which usually means origin h2 is not production ready
if matches!(e.etype, H2Downgrade | InvalidH2) {
if peer
.get_alpn()
.is_none_or(|alpn| alpn.get_min_http_version() == 1)
{
// Add the peer to prefer h1 so that all following requests
// will use h1
self.client_upstream.prefer_h1(&*peer);
} else {
// the peer doesn't allow downgrading to h1 (e.g. gRPC)
e.retry = false.into();
}
}
}
(server_reused, error)
}
ClientSession::Custom(mut c) => {
let (server_reused, error) = self
.proxy_to_custom_upstream(session, &mut c, client_reused, &peer, ctx)
.await;
let session = ClientSession::Custom(c);
self.client_upstream
.release_http_session(session, &*peer, peer.idle_timeout())
.await;
(server_reused, error)
}
};
(
server_reused,
error.map(|e| {
self.inner
.error_while_proxy(&peer, session, e, ctx, client_reused)
}),
)
}
Err(mut e) => {
e.as_up();
let new_err = self.inner.fail_to_connect(session, &peer, ctx, e);
(false, Some(new_err.into_up()))
}
}
}
async fn upstream_filter(
&self,
session: &mut Session,
task: &mut HttpTask,
ctx: &mut SV::CTX,
) -> Result<Option<Duration>>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
let duration = match task {
HttpTask::Header(header, _eos) => {
self.inner
.upstream_response_filter(session, header, ctx)
.await?;
None
}
HttpTask::Body(data, eos) => self
.inner
.upstream_response_body_filter(session, data, *eos, ctx)?,
HttpTask::Trailer(Some(trailers)) => {
self.inner
.upstream_response_trailer_filter(session, trailers, ctx)?;
None
}
_ => {
// task does not support a filter
None
}
};
Ok(duration)
}
async fn finish(
&self,
mut session: Session,
ctx: &mut SV::CTX,
reuse: bool,
error: Option<&Error>,
) -> Option<ReusedHttpStream>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
self.inner.logging(&mut session, error, ctx).await;
if reuse {
// TODO: log error
let persistent_settings = HttpPersistentSettings::for_session(&session);
session
.downstream_session
.finish()
.await
.ok()
.flatten()
.map(|s| ReusedHttpStream::new(s, Some(persistent_settings)))
} else {
None
}
}
fn cleanup_sub_req(&self, session: &mut Session) {
if let Some(ctx) = session.subrequest_ctx.as_mut() {
ctx.release_write_lock();
}
}
}
use pingora_cache::HttpCache;
use pingora_core::protocols::http::compression::ResponseCompressionCtx;
/// The established HTTP session
///
/// This object is what users interact with in order to access the request itself or change the proxy
/// behavior.
pub struct Session {
/// the HTTP session to downstream (the client)
pub downstream_session: Box<HttpSession>,
/// The interface to control HTTP caching
pub cache: HttpCache,
/// (de)compress responses coming into the proxy (from upstream)
pub upstream_compression: ResponseCompressionCtx,
/// ignore downstream range (skip downstream range filters)
pub ignore_downstream_range: bool,
/// Were the upstream request headers modified?
pub upstream_headers_mutated_for_cache: bool,
/// The context from parent request, if this is a subrequest.
pub subrequest_ctx: Option<Box<SubrequestCtx>>,
/// Handle to allow spawning subrequests, assigned by the `Subrequest` app logic.
pub subrequest_spawner: Option<SubrequestSpawner>,
// Downstream filter modules
pub downstream_modules_ctx: HttpModuleCtx,
/// Upstream response body bytes received (payload only). Set by proxy layer.
/// TODO: move this into an upstream session digest for future fields.
upstream_body_bytes_received: usize,
/// Flag that is set when the shutdown process has begun.
shutdown_flag: Arc<AtomicBool>,
}
impl Session {
fn new(
downstream_session: impl Into<Box<HttpSession>>,
downstream_modules: &HttpModules,
shutdown_flag: Arc<AtomicBool>,
) -> Self {
Session {
downstream_session: downstream_session.into(),
cache: HttpCache::new(),
// disable both upstream and downstream compression
upstream_compression: ResponseCompressionCtx::new(0, false, false),
ignore_downstream_range: false,
upstream_headers_mutated_for_cache: false,
subrequest_ctx: None,
subrequest_spawner: None, // optionally set later on
downstream_modules_ctx: downstream_modules.build_ctx(),
upstream_body_bytes_received: 0,
shutdown_flag,
}
}
/// Create a new [Session] from the given [Stream]
///
/// This function is mostly used for testing and mocking, given the downstream modules and
/// shutdown flags will never be set.
pub fn new_h1(stream: Stream) -> Self {
let modules = HttpModules::new();
Self::new(
Box::new(HttpSession::new_http1(stream)),
&modules,
Arc::new(AtomicBool::new(false)),
)
}
/// Create a new [Session] from the given [Stream] with modules
///
/// This function is mostly used for testing and mocking, given the shutdown flag will never be
/// set.
pub fn new_h1_with_modules(stream: Stream, downstream_modules: &HttpModules) -> Self {
Self::new(
Box::new(HttpSession::new_http1(stream)),
downstream_modules,
Arc::new(AtomicBool::new(false)),
)
}
pub fn as_downstream_mut(&mut self) -> &mut HttpSession {
&mut self.downstream_session
}
pub fn as_downstream(&self) -> &HttpSession {
&self.downstream_session
}
/// Write HTTP response with the given error code to the downstream.
pub async fn respond_error(&mut self, error: u16) -> Result<()> {
self.as_downstream_mut().respond_error(error).await
}
/// Write HTTP response with the given error code to the downstream with a body.
pub async fn respond_error_with_body(&mut self, error: u16, body: Bytes) -> Result<()> {
self.as_downstream_mut()
.respond_error_with_body(error, body)
.await
}
/// Write the given HTTP response header to the downstream
///
/// Different from directly calling [HttpSession::write_response_header], this function also
/// invokes the filter modules.
pub async fn write_response_header(
&mut self,
mut resp: Box<ResponseHeader>,
end_of_stream: bool,
) -> Result<()> {
self.downstream_modules_ctx
.response_header_filter(&mut resp, end_of_stream)
.await?;
self.downstream_session.write_response_header(resp).await
}
/// Similar to `write_response_header()`, this fn will clone the `resp` internally
pub async fn write_response_header_ref(
&mut self,
resp: &ResponseHeader,
end_of_stream: bool,
) -> Result<(), Box<Error>> {
self.write_response_header(Box::new(resp.clone()), end_of_stream)
.await
}
/// Write the given HTTP response body chunk to the downstream
///
/// Different from directly calling [HttpSession::write_response_body], this function also
/// invokes the filter modules.
pub async fn write_response_body(
&mut self,
mut body: Option<Bytes>,
end_of_stream: bool,
) -> Result<()> {
self.downstream_modules_ctx
.response_body_filter(&mut body, end_of_stream)?;
if body.is_none() && !end_of_stream {
return Ok(());
}
let data = body.unwrap_or_default();
self.downstream_session
.write_response_body(data, end_of_stream)
.await
}
pub async fn write_response_tasks(&mut self, mut tasks: Vec<HttpTask>) -> Result<bool> {
for task in tasks.iter_mut() {
match task {
HttpTask::Header(resp, end) => {
self.downstream_modules_ctx
.response_header_filter(resp, *end)
.await?;
}
HttpTask::Body(data, end) => {
self.downstream_modules_ctx
.response_body_filter(data, *end)?;
}
HttpTask::Trailer(trailers) => {
if let Some(buf) = self
.downstream_modules_ctx
.response_trailer_filter(trailers)?
{
// Write the trailers into the body if the filter
// returns a buffer.
//
// Note, this will not work if end of stream has already
// been seen or we've written content-length bytes.
*task = HttpTask::Body(Some(buf), true);
}
}
HttpTask::Done => {
// `Done` can be sent in certain response paths to mark end
// of response if not already done via trailers or body with
// end flag set.
// If the filter returns body bytes on Done,
// write them into the response.
//
// Note, this will not work if end of stream has already
// been seen or we've written content-length bytes.
if let Some(buf) = self.downstream_modules_ctx.response_done_filter()? {
*task = HttpTask::Body(Some(buf), true);
}
}
_ => { /* Failed */ }
}
}
self.downstream_session.response_duplex_vec(tasks).await
}
/// Mark the upstream headers as modified by caching. This should lead to range filters being
/// skipped when responding to the downstream.
pub fn mark_upstream_headers_mutated_for_cache(&mut self) {
self.upstream_headers_mutated_for_cache = true;
}
/// Check whether the upstream headers were marked as mutated during the request.
pub fn upstream_headers_mutated_for_cache(&self) -> bool {
self.upstream_headers_mutated_for_cache
}
/// Get the total upstream response body bytes received (payload only) recorded by the proxy layer.
pub fn upstream_body_bytes_received(&self) -> usize {
self.upstream_body_bytes_received
}
/// Set the total upstream response body bytes received (payload only). Intended for internal use by proxy layer.
pub(crate) fn set_upstream_body_bytes_received(&mut self, n: usize) {
self.upstream_body_bytes_received = n;
}
/// Is the proxy process in the process of shutting down (e.g. due to graceful upgrade)?
pub fn is_process_shutting_down(&self) -> bool {
self.shutdown_flag.load(Ordering::Acquire)
}
pub fn downstream_custom_message(
&mut self,
) -> Result<
Option<Box<dyn futures::Stream<Item = Result<Bytes>> + Unpin + Send + Sync + 'static>>,
> {
if let Some(custom_session) = self.downstream_session.as_custom_mut() {
custom_session
.take_custom_message_reader()
.map(Some)
.ok_or(Error::explain(
ReadError,
"can't extract custom reader from downstream",
))
} else {
Ok(None)
}
}
}
impl AsRef<HttpSession> for Session {
fn as_ref(&self) -> &HttpSession {
&self.downstream_session
}
}
impl AsMut<HttpSession> for Session {
fn as_mut(&mut self) -> &mut HttpSession {
&mut self.downstream_session
}
}
use std::ops::{Deref, DerefMut};
impl Deref for Session {
type Target = HttpSession;
fn deref(&self) -> &Self::Target {
&self.downstream_session
}
}
impl DerefMut for Session {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.downstream_session
}
}
// generic HTTP 502 response sent when proxy_upstream_filter refuses to connect to upstream
static BAD_GATEWAY: Lazy<ResponseHeader> = Lazy::new(|| {
let mut resp = ResponseHeader::build(http::StatusCode::BAD_GATEWAY, Some(3)).unwrap();
resp.insert_header(header::SERVER, &SERVER_NAME[..])
.unwrap();
resp.insert_header(header::CONTENT_LENGTH, 0).unwrap();
resp.insert_header(header::CACHE_CONTROL, "private, no-store")
.unwrap();
resp
});
impl<SV, C> HttpProxy<SV, C>
where
C: custom::Connector,
{
async fn process_request(
self: &Arc<Self>,
mut session: Session,
mut ctx: <SV as ProxyHttp>::CTX,
) -> Option<ReusedHttpStream>
where
SV: ProxyHttp + Send + Sync + 'static,
<SV as ProxyHttp>::CTX: Send + Sync,
{
if let Err(e) = self
.inner
.early_request_filter(&mut session, &mut ctx)
.await
{
return self
.handle_error(session, &mut ctx, e, "Fail to early filter request:")
.await;
}
if self.inner.allow_spawning_subrequest(&session, &ctx) {
session.subrequest_spawner = Some(SubrequestSpawner::new(self.clone()));
}
let req = session.downstream_session.req_header_mut();
// Built-in downstream request filters go first
if let Err(e) = session
.downstream_modules_ctx
.request_header_filter(req)
.await
{
return self
.handle_error(
session,
&mut ctx,
e,
"Failed in downstream modules request filter:",
)
.await;
}
match self.inner.request_filter(&mut session, &mut ctx).await {
Ok(response_sent) => {
if response_sent {
// TODO: log error
self.inner.logging(&mut session, None, &mut ctx).await;
self.cleanup_sub_req(&mut session);
let persistent_settings = HttpPersistentSettings::for_session(&session);
return session
.downstream_session
.finish()
.await
.ok()
.flatten()
.map(|s| ReusedHttpStream::new(s, Some(persistent_settings)));
}
/* else continue */
}
Err(e) => {
return self
.handle_error(session, &mut ctx, e, "Fail to filter request:")
.await;
}
}
if let Some((reuse, err)) = self.proxy_cache(&mut session, &mut ctx).await {
// cache hit
return self.finish(session, &mut ctx, reuse, err.as_deref()).await;
}
// either uncacheable, or cache miss
// there should not be a write lock in the sub req ctx after this point
self.cleanup_sub_req(&mut session);
// decide if the request is allowed to go to upstream
match self
.inner
.proxy_upstream_filter(&mut session, &mut ctx)
.await
{
Ok(proxy_to_upstream) => {
if !proxy_to_upstream {
// The hook can choose to write its own response, but if it doesn't, we respond
// with a generic 502
if session.cache.enabled() {
// drop the cache lock that this request may be holding onto
session.cache.disable(NoCacheReason::DeclinedToUpstream);
}
if session.response_written().is_none() {
match session.write_response_header_ref(&BAD_GATEWAY, true).await {
Ok(()) => {}
Err(e) => {
return self
.handle_error(
session,
&mut ctx,
e,
"Error responding with Bad Gateway:",
)
.await;
}
}
}
return self.finish(session, &mut ctx, true, None).await;
}
/* else continue */
}
Err(e) => {
if session.cache.enabled() {
session.cache.disable(NoCacheReason::InternalError);
}
return self
.handle_error(
session,
&mut ctx,
e,
"Error deciding if we should proxy to upstream:",
)
.await;
}
}
let mut retries: usize = 0;
let mut server_reuse = false;
let mut proxy_error: Option<Box<Error>> = None;
while retries < self.max_retries {
retries += 1;
let (reuse, e) = self.proxy_to_upstream(&mut session, &mut ctx).await;
server_reuse = reuse;
match e {
Some(error) => {
let retry = error.retry();
proxy_error = Some(error);
if !retry {
break;
}
// only log error that will be retried here, the final error will be logged below
warn!(
"Fail to proxy: {}, tries: {}, retry: {}, {}",
proxy_error.as_ref().unwrap(),
retries,
retry,
self.inner.request_summary(&session, &ctx)
);
}
None => {
proxy_error = None;
break;
}
};
}
// serve stale if error
// Check both error and cache before calling the function because await is not cheap
// allow unwrap until if let chains
#[allow(clippy::unnecessary_unwrap)]
let serve_stale_result = if proxy_error.is_some() && session.cache.can_serve_stale_error() {
self.handle_stale_if_error(&mut session, &mut ctx, proxy_error.as_ref().unwrap())
.await
} else {
None
};
let final_error = if let Some((reuse, stale_cache_error)) = serve_stale_result {
// don't reuse server conn if serve stale polluted it
server_reuse = server_reuse && reuse;
stale_cache_error
} else {
proxy_error
};
if let Some(e) = final_error.as_ref() {
// If we have errored and are still holding a cache lock, release it.
if session.cache.enabled() {
let reason = if *e.esource() == ErrorSource::Upstream {
NoCacheReason::UpstreamError
} else {
NoCacheReason::InternalError
};
session.cache.disable(reason);
}
let res = self.inner.fail_to_proxy(&mut session, e, &mut ctx).await;
// final error will have > 0 status unless downstream connection is dead
if !self.inner.suppress_error_log(&session, &ctx, e) {
error!(
"Fail to proxy: {}, status: {}, tries: {}, retry: {}, {}",
final_error.as_ref().unwrap(),
res.error_code,
retries,
false, // we never retry here
self.inner.request_summary(&session, &ctx),
);
}
}
// logging() will be called in finish()
self.finish(session, &mut ctx, server_reuse, final_error.as_deref())
.await
}
async fn handle_error(
&self,
mut session: Session,
ctx: &mut <SV as ProxyHttp>::CTX,
e: Box<Error>,
context: &str,
) -> Option<ReusedHttpStream>
where
SV: ProxyHttp + Send + Sync + 'static,
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/proxy_h1.rs | pingora-proxy/src/proxy_h1.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::future::OptionFuture;
use futures::StreamExt;
use super::*;
use crate::proxy_cache::{range_filter::RangeBodyFilter, ServeFromCache};
use crate::proxy_common::*;
use pingora_cache::CachePhase;
use pingora_core::protocols::http::custom::CUSTOM_MESSAGE_QUEUE_SIZE;
impl<SV, C> HttpProxy<SV, C>
where
C: custom::Connector,
{
pub(crate) async fn proxy_1to1(
&self,
session: &mut Session,
client_session: &mut HttpSessionV1,
peer: &HttpPeer,
ctx: &mut SV::CTX,
) -> (bool, bool, Option<Box<Error>>)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
client_session.read_timeout = peer.options.read_timeout;
client_session.write_timeout = peer.options.write_timeout;
// phase 2 send to upstream
let mut req = session.req_header().clone();
// Convert HTTP2 headers to H1
if req.version == Version::HTTP_2 {
req.set_version(Version::HTTP_11);
// if client has body but has no content length, add chunked encoding
// https://datatracker.ietf.org/doc/html/rfc9112#name-message-body
// "The presence of a message body in a request is signaled by a Content-Length or Transfer-Encoding header field."
if !session.is_body_empty() && session.get_header(header::CONTENT_LENGTH).is_none() {
req.insert_header(header::TRANSFER_ENCODING, "chunked")
.unwrap();
}
if session.get_header(header::HOST).is_none() {
// H2 is required to set :authority, but no necessarily header
// most H1 server expect host header, so convert
let host = req.uri.authority().map_or("", |a| a.as_str()).to_owned();
req.insert_header(header::HOST, host).unwrap();
}
// TODO: Add keepalive header for connection reuse, but this is not required per RFC
}
if session.cache.enabled() {
pingora_cache::filters::upstream::request_filter(
&mut req,
session.cache.maybe_cache_meta(),
);
session.mark_upstream_headers_mutated_for_cache();
}
match self
.inner
.upstream_request_filter(session, &mut req, ctx)
.await
{
Ok(_) => { /* continue */ }
Err(e) => {
return (false, true, Some(e));
}
}
session.upstream_compression.request_filter(&req);
debug!("Sending header to upstream {:?}", req);
match client_session.write_request_header(Box::new(req)).await {
Ok(_) => { /* Continue */ }
Err(e) => {
return (false, false, Some(e.into_up()));
}
}
let mut downstream_custom_message_writer = session
.downstream_session
.as_custom_mut()
.and_then(|c| c.take_custom_message_writer());
let (tx_upstream, rx_upstream) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
let (tx_downstream, rx_downstream) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
session.as_mut().enable_retry_buffering();
// start bi-directional streaming
let ret = tokio::try_join!(
self.proxy_handle_downstream(
session,
tx_downstream,
rx_upstream,
ctx,
&mut downstream_custom_message_writer
),
self.proxy_handle_upstream(client_session, tx_upstream, rx_downstream),
);
if let Some(custom_session) = session.downstream_session.as_custom_mut() {
match custom_session.restore_custom_message_writer(
downstream_custom_message_writer.expect("downstream be present"),
) {
Ok(_) => { /* continue */ }
Err(e) => {
return (false, false, Some(e));
}
}
}
match ret {
Ok((downstream_can_reuse, _upstream)) => (downstream_can_reuse, true, None),
Err(e) => (false, false, Some(e)),
}
}
pub(crate) async fn proxy_to_h1_upstream(
&self,
session: &mut Session,
client_session: &mut HttpSessionV1,
reused: bool,
peer: &HttpPeer,
ctx: &mut SV::CTX,
) -> (bool, bool, Option<Box<Error>>)
// (reuse_server, reuse_client, error)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
#[cfg(windows)]
let raw = client_session.id() as std::os::windows::io::RawSocket;
#[cfg(unix)]
let raw = client_session.id();
if let Err(e) = self
.inner
.connected_to_upstream(
session,
reused,
peer,
raw,
Some(client_session.digest()),
ctx,
)
.await
{
return (false, false, Some(e));
}
let (server_session_reuse, client_session_reuse, error) =
self.proxy_1to1(session, client_session, peer, ctx).await;
// Record upstream response body bytes received (payload only) for logging consumers.
let upstream_bytes_total = client_session.body_bytes_received();
session.set_upstream_body_bytes_received(upstream_bytes_total);
(server_session_reuse, client_session_reuse, error)
}
async fn proxy_handle_upstream(
&self,
client_session: &mut HttpSessionV1,
tx: mpsc::Sender<HttpTask>,
mut rx: mpsc::Receiver<HttpTask>,
) -> Result<()>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
let mut request_done = false;
let mut response_done = false;
let mut send_error = None;
/* duplex mode, wait for either to complete */
while !request_done || !response_done {
tokio::select! {
res = client_session.read_response_task(), if !response_done => {
match res {
Ok(task) => {
response_done = task.is_end();
let type_str = task.type_str();
let result = tx.send(task)
.await.or_err_with(
InternalError,
|| format!("Failed to send upstream task {type_str}{} to pipe",
if response_done { " (end)" } else {""})
);
// If the request is upgraded, the downstream pipe can early exit
// when the downstream connection is closed.
// In that case, this function should ignore that the pipe is closed.
// So that this function could read the rest events from rx including
// the closure, then exit.
if result.is_err() && !client_session.is_upgrade_req() {
return result;
}
},
Err(e) => {
// Push the error to downstream and then quit
// Don't care if send fails: downstream already gone
let _ = tx.send(HttpTask::Failed(send_error.unwrap_or(e).into_up())).await;
// Downstream should consume all remaining data and handle the error
return Ok(())
}
}
},
body = rx.recv(), if !request_done => {
match send_body_to1(client_session, body).await {
Ok(send_done) => {
request_done = send_done;
// An upgraded request is terminated when either side is done
if request_done && client_session.is_upgrade_req() {
response_done = true;
}
},
Err(e) => {
warn!("send error, draining read buf: {e}");
request_done = true;
send_error = Some(e);
continue
}
}
},
else => {
// this shouldn't be reached as the while loop would already exit
break;
}
}
}
Ok(())
}
// todo use this function to replace bidirection_1to2()
// returns whether this server (downstream) session can be reused
async fn proxy_handle_downstream(
&self,
session: &mut Session,
tx: mpsc::Sender<HttpTask>,
mut rx: mpsc::Receiver<HttpTask>,
ctx: &mut SV::CTX,
downstream_custom_message_writer: &mut Option<Box<dyn CustomMessageWrite>>,
) -> Result<bool>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
// setup custom message forwarding, if downstream supports it
let (
mut downstream_custom_read,
mut downstream_custom_write,
downstream_custom_message_custom_forwarding,
mut downstream_custom_message_inject_rx,
mut downstream_custom_message_reader,
) = if downstream_custom_message_writer.is_some() {
let reader = session.downstream_custom_message()?;
let (inject_tx, inject_rx) = mpsc::channel::<Bytes>(CUSTOM_MESSAGE_QUEUE_SIZE);
(true, true, Some(inject_tx), Some(inject_rx), reader)
} else {
(false, false, None, None, None)
};
if let Some(custom_forwarding) = downstream_custom_message_custom_forwarding {
self.inner
.custom_forwarding(session, ctx, None, custom_forwarding)
.await?;
}
let mut downstream_state = DownstreamStateMachine::new(session.as_mut().is_body_done());
let buffer = session.as_ref().get_retry_buffer();
// retry, send buffer if it exists or body empty
if buffer.is_some() || session.as_mut().is_body_empty() {
let send_permit = tx
.reserve()
.await
.or_err(InternalError, "reserving body pipe")?;
self.send_body_to_pipe(
session,
buffer,
downstream_state.is_done(),
send_permit,
ctx,
)
.await?;
}
let mut response_state = ResponseStateMachine::new();
// these two below can be wrapped into an internal ctx
// use cache when upstream revalidates (or TODO: error)
let mut serve_from_cache = proxy_cache::ServeFromCache::new();
let mut range_body_filter = proxy_cache::range_filter::RangeBodyFilter::new();
/* duplex mode without caching
* Read body from downstream while reading response from upstream
* If response is done, only read body from downstream
* If request is done, read response from upstream while idling downstream (to close quickly)
* If both are done, quit the loop
*
* With caching + but without partial read support
* Similar to above, cache admission write happen when the data is write to downstream
*
* With caching + partial read support
* A. Read upstream response and write to cache
* B. Read data from cache and send to downstream
* If B fails (usually downstream close), continue A.
* If A fails, exit with error.
* If both are done, quit the loop
* Usually there is no request body to read for cacheable request
*/
while !downstream_state.is_done()
|| !response_state.is_done()
|| downstream_custom_read && !downstream_state.is_errored()
|| downstream_custom_write
{
// reserve tx capacity ahead to avoid deadlock, see below
let send_permit = tx
.try_reserve()
.or_err(InternalError, "try_reserve() body pipe for upstream");
// Use optional futures to allow using optional channels in select branches
let custom_inject_rx_recv: OptionFuture<_> = downstream_custom_message_inject_rx
.as_mut()
.map(|rx| rx.recv())
.into();
let custom_reader_next: OptionFuture<_> = downstream_custom_message_reader
.as_mut()
.map(|reader| reader.next())
.into();
// partial read support, this check will also be false if cache is disabled.
let support_cache_partial_read =
session.cache.support_streaming_partial_write() == Some(true);
tokio::select! {
// only try to send to pipe if there is capacity to avoid deadlock
// Otherwise deadlock could happen if both upstream and downstream are blocked
// on sending to their corresponding pipes which are both full.
body = session.downstream_session.read_body_or_idle(downstream_state.is_done()),
if downstream_state.can_poll() && send_permit.is_ok() => {
debug!("downstream event");
let body = match body {
Ok(b) => b,
Err(e) => {
let wait_for_cache_fill = (!serve_from_cache.is_on() && support_cache_partial_read)
|| serve_from_cache.is_miss();
if wait_for_cache_fill {
// ignore downstream error so that upstream can continue to write cache
downstream_state.to_errored();
warn!(
"Downstream Error ignored during caching: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
continue;
} else {
return Err(e.into_down());
}
}
};
// If the request is websocket, `None` body means the request is closed.
// Set the response to be done as well so that the request completes normally.
if body.is_none() && session.is_upgrade_req() {
response_state.maybe_set_upstream_done(true);
}
// TODO: consider just drain this if serve_from_cache is set
let is_body_done = session.is_body_done();
let request_done = self.send_body_to_pipe(
session,
body,
is_body_done,
send_permit.unwrap(), // safe because we checked is_ok()
ctx,
)
.await?;
downstream_state.maybe_finished(request_done);
},
_ = tx.reserve(), if downstream_state.is_reading() && send_permit.is_err() => {
// If tx is closed, the upstream has already finished its job.
downstream_state.maybe_finished(tx.is_closed());
debug!("waiting for permit {send_permit:?}, upstream closed {}", tx.is_closed());
/* No permit, wait on more capacity to avoid starving.
* Otherwise this select only blocks on rx, which might send no data
* before the entire body is uploaded.
* once more capacity arrives we just loop back
*/
},
task = rx.recv(), if !response_state.upstream_done() => {
debug!("upstream event: {:?}", task);
if let Some(t) = task {
if serve_from_cache.should_discard_upstream() {
// just drain, do we need to do anything else?
continue;
}
// pull as many tasks as we can
let mut tasks = Vec::with_capacity(TASK_BUFFER_SIZE);
tasks.push(t);
// tokio::task::unconstrained because now_or_never may yield None when the future is ready
while let Some(maybe_task) = tokio::task::unconstrained(rx.recv()).now_or_never() {
debug!("upstream event now: {:?}", maybe_task);
if let Some(t) = maybe_task {
tasks.push(t);
} else {
break; // upstream closed
}
}
/* run filters before sending to downstream */
let mut filtered_tasks = Vec::with_capacity(TASK_BUFFER_SIZE);
for mut t in tasks {
if self.revalidate_or_stale(session, &mut t, ctx).await {
serve_from_cache.enable();
response_state.enable_cached_response();
// skip downstream filtering entirely as the 304 will not be sent
break;
}
session.upstream_compression.response_filter(&mut t);
let task = self.h1_response_filter(session, t, ctx,
&mut serve_from_cache,
&mut range_body_filter, false).await?;
if serve_from_cache.is_miss_header() {
response_state.enable_cached_response();
}
// check error and abort
// otherwise the error is surfaced via write_response_tasks()
if !serve_from_cache.should_send_to_downstream() {
if let HttpTask::Failed(e) = task {
return Err(e);
}
}
filtered_tasks.push(task);
}
if !serve_from_cache.should_send_to_downstream() {
// TODO: need to derive response_done from filtered_tasks in case downstream failed already
continue;
}
// set to downstream
let response_done = session.write_response_tasks(filtered_tasks).await?;
response_state.maybe_set_upstream_done(response_done);
// unsuccessful upgrade response may force the request done
downstream_state.maybe_finished(session.is_body_done());
} else {
debug!("empty upstream event");
response_state.maybe_set_upstream_done(true);
}
},
task = serve_from_cache.next_http_task(&mut session.cache, &mut range_body_filter),
if !response_state.cached_done() && !downstream_state.is_errored() && serve_from_cache.is_on() => {
let task = self.h1_response_filter(session, task?, ctx,
&mut serve_from_cache,
&mut range_body_filter, true).await?;
debug!("serve_from_cache task {task:?}");
match session.write_response_tasks(vec![task]).await {
Ok(b) => response_state.maybe_set_cache_done(b),
Err(e) => if serve_from_cache.is_miss() {
// give up writing to downstream but wait for upstream cache write to finish
downstream_state.to_errored();
response_state.maybe_set_cache_done(true);
warn!(
"Downstream Error ignored during caching: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
continue;
} else {
return Err(e);
}
}
if response_state.cached_done() {
if let Err(e) = session.cache.finish_hit_handler().await {
warn!("Error during finish_hit_handler: {}", e);
}
}
}
data = custom_reader_next, if downstream_custom_read && !downstream_state.is_errored() => {
let Some(data) = data.flatten() else {
downstream_custom_read = false;
continue;
};
let data = match data {
Ok(data) => data,
Err(err) => {
warn!("downstream_custom_message_reader got error: {err}");
downstream_custom_read = false;
continue;
},
};
self.inner
.downstream_custom_message_proxy_filter(session, data, ctx, true) // true, because it's the last hop for downstream proxying
.await?;
},
data = custom_inject_rx_recv, if downstream_custom_write => {
match data.flatten() {
Some(data) => {
if let Some(ref mut custom_writer) = downstream_custom_message_writer {
custom_writer.write_custom_message(data).await?
}
},
None => {
downstream_custom_write = false;
if let Some(ref mut custom_writer) = downstream_custom_message_writer {
custom_writer.finish_custom().await?;
}
},
}
},
else => {
break;
}
}
}
let mut reuse_downstream = !downstream_state.is_errored();
if reuse_downstream {
match session.as_mut().finish_body().await {
Ok(_) => {
debug!("finished sending body to downstream");
}
Err(e) => {
error!("Error finish sending body to downstream: {}", e);
reuse_downstream = false;
}
}
}
Ok(reuse_downstream)
}
async fn h1_response_filter(
&self,
session: &mut Session,
mut task: HttpTask,
ctx: &mut SV::CTX,
serve_from_cache: &mut ServeFromCache,
range_body_filter: &mut RangeBodyFilter,
from_cache: bool, // are the task from cache already
) -> Result<HttpTask>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
// skip caching if already served from cache
if !from_cache {
if let Some(duration) = self.upstream_filter(session, &mut task, ctx).await? {
trace!("delaying upstream response for {duration:?}");
time::sleep(duration).await;
}
// cache the original response before any downstream transformation
// requests that bypassed cache still need to run filters to see if the response has become cacheable
if session.cache.enabled() || session.cache.bypassing() {
if let Err(e) = self
.cache_http_task(session, &task, ctx, serve_from_cache)
.await
{
session.cache.disable(NoCacheReason::StorageError);
if serve_from_cache.is_miss_body() {
// if the response stream cache body during miss but write fails, it has to
// give up the entire request
return Err(e);
} else {
// otherwise, continue processing the response
warn!(
"Fail to cache response: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
}
}
}
if !serve_from_cache.should_send_to_downstream() {
return Ok(task);
}
} // else: cached/local response, no need to trigger upstream filters and caching
// normally max file size is tracked in cache_http_task filters (when cache enabled),
// we will track it in these filters before sending to downstream on specific conditions
// when cache is disabled
let track_max_cache_size = matches!(
session.cache.phase(),
CachePhase::Disabled(NoCacheReason::PredictedResponseTooLarge)
);
let res = match task {
HttpTask::Header(mut header, end) => {
/* Downstream revalidation/range, only needed when cache modified headers because otherwise origin
* will handle it */
if session.upstream_headers_mutated_for_cache() {
self.downstream_response_conditional_filter(
serve_from_cache,
session,
&mut header,
ctx,
);
if !session.ignore_downstream_range {
let range_type = self.inner.range_header_filter(session, &mut header, ctx);
range_body_filter.set(range_type);
}
}
// TODO: just set version to Version::HTTP_11 unconditionally here,
// (with another todo being an option to faithfully proxy the <1.1 responses)
// as we are already trying to mutate this for HTTP/1.1 downstream reuse
/* Convert HTTP 1.0 style response to chunked encoding so that we don't
* have to close the downstream connection */
// these status codes / method cannot have body, so no need to add chunked encoding
let no_body = session.req_header().method == http::method::Method::HEAD
|| matches!(header.status.as_u16(), 204 | 304);
if !no_body
&& !header.status.is_informational()
&& header
.headers
.get(http::header::TRANSFER_ENCODING)
.is_none()
&& header.headers.get(http::header::CONTENT_LENGTH).is_none()
&& !end
{
// Upgrade the http version to 1.1 because 1.0/0.9 doesn't support chunked
header.set_version(Version::HTTP_11);
header.insert_header(http::header::TRANSFER_ENCODING, "chunked")?;
}
match self.inner.response_filter(session, &mut header, ctx).await {
Ok(_) => Ok(HttpTask::Header(header, end)),
Err(e) => Err(e),
}
}
HttpTask::Body(data, end) => {
if track_max_cache_size {
session
.cache
.track_body_bytes_for_max_file_size(data.as_ref().map_or(0, |d| d.len()));
}
// before it can mark it as cacheable again.
let mut data = range_body_filter.filter_body(data);
if let Some(duration) = self
.inner
.response_body_filter(session, &mut data, end, ctx)?
{
trace!("delaying downstream response for {:?}", duration);
time::sleep(duration).await;
}
Ok(HttpTask::Body(data, end))
}
HttpTask::Trailer(h) => Ok(HttpTask::Trailer(h)), // TODO: support trailers for h1
HttpTask::Done => Ok(task),
HttpTask::Failed(_) => Ok(task), // Do nothing just pass the error down
};
// On end, check if the response (based on file size) can be considered cacheable again
if let Ok(task) = res.as_ref() {
if track_max_cache_size
&& task.is_end()
&& !matches!(task, HttpTask::Failed(_))
&& !session.cache.exceeded_max_file_size()
{
session.cache.response_became_cacheable();
}
}
res
}
// TODO:: use this function to replace send_body_to2
async fn send_body_to_pipe(
&self,
session: &mut Session,
mut data: Option<Bytes>,
end_of_body: bool,
tx: mpsc::Permit<'_, HttpTask>,
ctx: &mut SV::CTX,
) -> Result<bool>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
// None: end of body
// this var is to signal if downstream finish sending the body, which shouldn't be
// affected by the request_body_filter
let end_of_body = end_of_body || data.is_none();
session
.downstream_modules_ctx
.request_body_filter(&mut data, end_of_body)
.await?;
self.inner
.request_body_filter(session, &mut data, end_of_body, ctx)
.await?;
// the flag to signal to upstream
let upstream_end_of_body = end_of_body || data.is_none();
/* It is normal to get 0 bytes because of multi-chunk or request_body_filter decides not to
* output anything yet.
* Don't write 0 bytes to the network since it will be
* treated as the terminating chunk */
if !upstream_end_of_body && data.as_ref().is_some_and(|d| d.is_empty()) {
return Ok(false);
}
debug!(
"Read {} bytes body from downstream",
data.as_ref().map_or(-1, |d| d.len() as isize)
);
tx.send(HttpTask::Body(data, upstream_end_of_body));
Ok(end_of_body)
}
}
pub(crate) async fn send_body_to1(
client_session: &mut HttpSessionV1,
recv_task: Option<HttpTask>,
) -> Result<bool> {
let body_done;
if let Some(task) = recv_task {
match task {
HttpTask::Body(data, end) => {
body_done = end;
if let Some(d) = data {
let m = client_session.write_body(&d).await;
match m {
Ok(m) => match m {
Some(n) => {
debug!("Write {} bytes body to upstream", n);
}
None => {
warn!("Upstream body is already finished. Nothing to write");
}
},
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/proxy_custom.rs | pingora-proxy/src/proxy_custom.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::StreamExt;
use pingora_core::{
protocols::http::custom::{
client::Session as CustomSession, is_informational_except_101, BodyWrite,
CustomMessageWrite, CUSTOM_MESSAGE_QUEUE_SIZE,
},
ImmutStr,
};
use proxy_cache::{range_filter::RangeBodyFilter, ServeFromCache};
use proxy_common::{DownstreamStateMachine, ResponseStateMachine};
use tokio::sync::oneshot;
use super::*;
impl<SV, C> HttpProxy<SV, C>
where
C: custom::Connector,
{
/// Proxy to a custom protocol upstream.
/// Returns (reuse_server, error)
pub(crate) async fn proxy_to_custom_upstream(
&self,
session: &mut Session,
client_session: &mut C::Session,
reused: bool,
peer: &HttpPeer,
ctx: &mut SV::CTX,
) -> (bool, Option<Box<Error>>)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
#[cfg(windows)]
let raw = client_session.fd() as std::os::windows::io::RawSocket;
#[cfg(unix)]
let raw = client_session.fd();
if let Err(e) = self
.inner
.connected_to_upstream(session, reused, peer, raw, client_session.digest(), ctx)
.await
{
return (false, Some(e));
}
let (server_session_reuse, error) = self
.custom_proxy_down_to_up(session, client_session, peer, ctx)
.await;
// Parity with H1/H2: custom upstreams don't report payload bytes; record 0.
session.set_upstream_body_bytes_received(0);
(server_session_reuse, error)
}
/// Handle custom protocol proxying from downstream to upstream.
/// Returns (reuse_server, error)
async fn custom_proxy_down_to_up(
&self,
session: &mut Session,
client_session: &mut C::Session,
peer: &HttpPeer,
ctx: &mut SV::CTX,
) -> (bool, Option<Box<Error>>)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
let mut req = session.req_header().clone();
if session.cache.enabled() {
pingora_cache::filters::upstream::request_filter(
&mut req,
session.cache.maybe_cache_meta(),
);
session.mark_upstream_headers_mutated_for_cache();
}
match self
.inner
.upstream_request_filter(session, &mut req, ctx)
.await
{
Ok(_) => { /* continue */ }
Err(e) => {
return (false, Some(e));
}
}
session.upstream_compression.request_filter(&req);
let body_empty = session.as_mut().is_body_empty();
debug!("Request to custom: {req:?}");
let req = Box::new(req);
if let Err(e) = client_session.write_request_header(req, body_empty).await {
return (false, Some(e.into_up()));
}
client_session.set_read_timeout(peer.options.read_timeout);
client_session.set_write_timeout(peer.options.write_timeout);
// take the body writer out of the client for easy duplex
let mut client_body = client_session
.take_request_body_writer()
.expect("already send request header");
let (tx, rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
session.as_mut().enable_retry_buffering();
// Custom message logic
let Some(upstream_custom_message_reader) = client_session.take_custom_message_reader()
else {
return (
false,
Some(Error::explain(
ReadError,
"can't extract custom reader from upstream",
)),
);
};
let Some(mut upstream_custom_message_writer) = client_session.take_custom_message_writer()
else {
return (
false,
Some(Error::explain(
WriteError,
"custom upstream must have a custom message writer",
)),
);
};
// A channel to inject custom messages to upstream from server logic.
let (upstream_custom_message_inject_tx, upstream_custom_message_inject_rx) =
mpsc::channel(CUSTOM_MESSAGE_QUEUE_SIZE);
// Downstream reader
let downstream_custom_message_reader = match session.downstream_custom_message() {
Ok(Some(rx)) => rx,
Ok(None) => Box::new(futures::stream::empty::<Result<Bytes>>()),
Err(err) => return (false, Some(err)),
};
// Downstream writer
let (mut downstream_custom_message_writer, downstream_custom_final_hop): (
Box<dyn CustomMessageWrite>,
bool, // if this hop is final
) = if let Some(custom_session) = session.downstream_session.as_custom_mut() {
(
custom_session
.take_custom_message_writer()
.expect("custom downstream must have a custom message writer"),
false,
)
} else {
(Box::new(()), true)
};
// A channel to inject custom messages to downstream from server logic.
let (downstream_custom_message_inject_tx, downstream_custom_message_inject_rx) =
mpsc::channel(CUSTOM_MESSAGE_QUEUE_SIZE);
// Filters for ProxyHttp trait
let (upstream_custom_message_filter_tx, upstream_custom_message_filter_rx) =
mpsc::channel(CUSTOM_MESSAGE_QUEUE_SIZE);
let (downstream_custom_message_filter_tx, downstream_custom_message_filter_rx) =
mpsc::channel(CUSTOM_MESSAGE_QUEUE_SIZE);
// Cancellation channels for custom coroutines
// The transmitters act as guards: when dropped, they signal the receivers to cancel.
// `cancel_downstream_reader_tx` is held and later used to explicitly cancel.
// `_cancel_upstream_reader_tx` is unused (prefixed with _) - it will be dropped at the
// end of this scope, which automatically signals cancellation to the upstream reader.
let (cancel_downstream_reader_tx, cancel_downstream_reader_rx) = oneshot::channel();
let (_cancel_upstream_reader_tx, cancel_upstream_reader_rx) = oneshot::channel();
let upstream_custom_message_forwarder = CustomMessageForwarder {
ctx: "down_to_up".into(),
reader: downstream_custom_message_reader,
writer: &mut upstream_custom_message_writer,
filter: upstream_custom_message_filter_tx,
inject: upstream_custom_message_inject_rx,
cancel: cancel_downstream_reader_rx,
};
let downstream_custom_message_forwarder = CustomMessageForwarder {
ctx: "up_to_down".into(),
reader: upstream_custom_message_reader,
writer: &mut downstream_custom_message_writer,
filter: downstream_custom_message_filter_tx,
inject: downstream_custom_message_inject_rx,
cancel: cancel_upstream_reader_rx,
};
if let Err(e) = self
.inner
.custom_forwarding(
session,
ctx,
Some(upstream_custom_message_inject_tx),
downstream_custom_message_inject_tx,
)
.await
{
return (false, Some(e));
}
/* read downstream body and upstream response at the same time */
let ret = tokio::try_join!(
self.custom_bidirection_down_to_up(
session,
&mut client_body,
rx,
ctx,
upstream_custom_message_filter_rx,
downstream_custom_message_filter_rx,
downstream_custom_final_hop,
cancel_downstream_reader_tx,
),
custom_pipe_up_to_down_response(client_session, tx),
upstream_custom_message_forwarder.proxy(),
downstream_custom_message_forwarder.proxy(),
);
if let Some(custom_session) = session.downstream_session.as_custom_mut() {
custom_session
.restore_custom_message_writer(downstream_custom_message_writer)
.expect("downstream restore_custom_message_writer should be empty");
}
match ret {
Ok((downstream_can_reuse, _upstream, _custom_up_down, _custom_down_up)) => {
(downstream_can_reuse, None)
}
Err(e) => (false, Some(e)),
}
}
// returns whether server (downstream) session can be reused
#[allow(clippy::too_many_arguments)]
async fn custom_bidirection_down_to_up(
&self,
session: &mut Session,
client_body: &mut Box<dyn BodyWrite>,
mut rx: mpsc::Receiver<HttpTask>,
ctx: &mut SV::CTX,
mut upstream_custom_message_filter_rx: mpsc::Receiver<(
Bytes,
oneshot::Sender<Option<Bytes>>,
)>,
mut downstream_custom_message_filter_rx: mpsc::Receiver<(
Bytes,
oneshot::Sender<Option<Bytes>>,
)>,
downstream_custom_final_hop: bool,
cancel_downstream_reader_tx: oneshot::Sender<()>,
) -> Result<bool>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
let mut cancel_downstream_reader_tx = Some(cancel_downstream_reader_tx);
let mut downstream_state = DownstreamStateMachine::new(session.as_mut().is_body_done());
// retry, send buffer if it exists
if let Some(buffer) = session.as_mut().get_retry_buffer() {
self.send_body_to_custom(
session,
Some(buffer),
downstream_state.is_done(),
client_body,
ctx,
)
.await?;
}
let mut response_state = ResponseStateMachine::new();
// these two below can be wrapped into an internal ctx
// use cache when upstream revalidates (or TODO: error)
let mut serve_from_cache = ServeFromCache::new();
let mut range_body_filter = proxy_cache::range_filter::RangeBodyFilter::new();
let mut upstream_custom = true;
let mut downstream_custom = true;
/* duplex mode
* see the Same function for h1 for more comments
*/
while !downstream_state.is_done()
|| !response_state.is_done()
|| upstream_custom
|| downstream_custom
{
// partial read support, this check will also be false if cache is disabled.
let support_cache_partial_read =
session.cache.support_streaming_partial_write() == Some(true);
tokio::select! {
body = session.downstream_session.read_body_or_idle(downstream_state.is_done()), if downstream_state.can_poll() => {
let body = match body {
Ok(b) => b,
Err(e) => {
let wait_for_cache_fill = (!serve_from_cache.is_on() && support_cache_partial_read)
|| serve_from_cache.is_miss();
if wait_for_cache_fill {
// ignore downstream error so that upstream can continue to write cache
downstream_state.to_errored();
warn!(
"Downstream Error ignored during caching: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
continue;
} else {
return Err(e.into_down());
}
}
};
let is_body_done = session.is_body_done();
match self.send_body_to_custom(session, body, is_body_done, client_body, ctx).await {
Ok(request_done) => {
downstream_state.maybe_finished(request_done);
},
Err(e) => {
// mark request done, attempt to drain receive
warn!("body send error: {e}");
// upstream is what actually errored but we don't want to continue
// polling the downstream body
downstream_state.to_errored();
// downstream still trying to send something, but the upstream is already stooped
// cancel the custom downstream to upstream coroutine, because the proxy will not see EOS.
let _ = cancel_downstream_reader_tx.take().expect("cancel must be set and called once").send(());
}
};
},
task = rx.recv(), if !response_state.upstream_done() => {
debug!("upstream event");
if let Some(t) = task {
debug!("upstream event custom: {:?}", t);
if serve_from_cache.should_discard_upstream() {
// just drain, do we need to do anything else?
continue;
}
// pull as many tasks as we can
let mut tasks = Vec::with_capacity(TASK_BUFFER_SIZE);
tasks.push(t);
while let Ok(task) = rx.try_recv() {
tasks.push(task);
}
/* run filters before sending to downstream */
let mut filtered_tasks = Vec::with_capacity(TASK_BUFFER_SIZE);
for mut t in tasks {
if self.revalidate_or_stale(session, &mut t, ctx).await {
serve_from_cache.enable();
response_state.enable_cached_response();
// skip downstream filtering entirely as the 304 will not be sent
break;
}
session.upstream_compression.response_filter(&mut t);
// check error and abort
// otherwise the error is surfaced via write_response_tasks()
if !serve_from_cache.should_send_to_downstream() {
if let HttpTask::Failed(e) = t {
return Err(e);
}
}
filtered_tasks.push(
self.custom_response_filter(session, t, ctx,
&mut serve_from_cache,
&mut range_body_filter, false).await?);
if serve_from_cache.is_miss_header() {
response_state.enable_cached_response();
}
}
if !serve_from_cache.should_send_to_downstream() {
// TODO: need to derive response_done from filtered_tasks in case downstream failed already
continue;
}
let response_done = session.write_response_tasks(filtered_tasks).await?;
response_state.maybe_set_upstream_done(response_done);
} else {
debug!("empty upstream event");
response_state.maybe_set_upstream_done(true);
}
}
task = serve_from_cache.next_http_task(&mut session.cache, &mut range_body_filter),
if !response_state.cached_done() && !downstream_state.is_errored() && serve_from_cache.is_on() => {
let task = self.custom_response_filter(session, task?, ctx,
&mut serve_from_cache,
&mut range_body_filter, true).await?;
match session.write_response_tasks(vec![task]).await {
Ok(b) => response_state.maybe_set_cache_done(b),
Err(e) => if serve_from_cache.is_miss() {
// give up writing to downstream but wait for upstream cache write to finish
downstream_state.to_errored();
response_state.maybe_set_cache_done(true);
warn!(
"Downstream Error ignored during caching: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
continue;
} else {
return Err(e);
}
}
if response_state.cached_done() {
if let Err(e) = session.cache.finish_hit_handler().await {
warn!("Error during finish_hit_handler: {}", e);
}
}
}
ret = upstream_custom_message_filter_rx.recv(), if upstream_custom => {
let Some(msg) = ret else {
debug!("upstream_custom_message_filter_rx: custom downstream to upstream exited on reading");
upstream_custom = false;
continue;
};
let (data, callback) = msg;
let new_msg = self.inner
.downstream_custom_message_proxy_filter(session, data, ctx, false) // false because the upstream is custom
.await?;
if callback.send(new_msg).is_err() {
debug!("upstream_custom_message_incoming_rx: custom downstream to upstream exited on callback");
upstream_custom = false;
continue;
};
},
ret = downstream_custom_message_filter_rx.recv(), if downstream_custom => {
let Some(msg) = ret else {
debug!("downstream_custom_message_filter_rx: custom upstream to downstream exited on reading");
downstream_custom = false;
continue;
};
let (data, callback) = msg;
let new_msg = self.inner
.upstream_custom_message_proxy_filter(session, data, ctx, downstream_custom_final_hop)
.await?;
if callback.send(new_msg).is_err() {
debug!("downstream_custom_message_filter_rx: custom upstream to downstream exited on callback");
downstream_custom = false;
continue
};
},
else => {
break;
}
}
}
// Re-raise the error then the loop is finished.
if downstream_state.is_errored() {
let err = Error::e_explain(WriteError, "downstream_state is_errored");
error!("custom_bidirection_down_to_up: downstream_state.is_errored",);
return err;
}
let mut reuse_downstream = !downstream_state.is_errored();
if reuse_downstream {
match session.as_mut().finish_body().await {
Ok(_) => {
debug!("finished sending body to downstream");
}
Err(e) => {
error!("Error finish sending body to downstream: {}", e);
reuse_downstream = false;
}
}
}
Ok(reuse_downstream)
}
async fn custom_response_filter(
&self,
session: &mut Session,
mut task: HttpTask,
ctx: &mut SV::CTX,
serve_from_cache: &mut ServeFromCache,
range_body_filter: &mut RangeBodyFilter,
from_cache: bool, // are the task from cache already
) -> Result<HttpTask>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
if !from_cache {
self.upstream_filter(session, &mut task, ctx).await?;
// cache the original response before any downstream transformation
// requests that bypassed cache still need to run filters to see if the response has become cacheable
if session.cache.enabled() || session.cache.bypassing() {
if let Err(e) = self
.cache_http_task(session, &task, ctx, serve_from_cache)
.await
{
session.cache.disable(NoCacheReason::StorageError);
if serve_from_cache.is_miss_body() {
// if the response stream cache body during miss but write fails, it has to
// give up the entire request
return Err(e);
} else {
// otherwise, continue processing the response
warn!(
"Fail to cache response: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
}
}
}
// skip the downstream filtering if these tasks are just for cache admission
if !serve_from_cache.should_send_to_downstream() {
return Ok(task);
}
} // else: cached/local response, no need to trigger upstream filters and caching
match task {
HttpTask::Header(mut header, eos) => {
/* Downstream revalidation, only needed when cache is on because otherwise origin
* will handle it */
// TODO: if cache is disabled during response phase, we should still do the filter
if session.cache.enabled() {
self.downstream_response_conditional_filter(
serve_from_cache,
session,
&mut header,
ctx,
);
if !session.ignore_downstream_range {
let range_type = self.inner.range_header_filter(session, &mut header, ctx);
range_body_filter.set(range_type);
}
}
self.inner
.response_filter(session, &mut header, ctx)
.await?;
/* Downgrade the version so that write_response_header won't panic */
header.set_version(Version::HTTP_11);
// these status codes / method cannot have body, so no need to add chunked encoding
let no_body = session.req_header().method == "HEAD"
|| matches!(header.status.as_u16(), 204 | 304);
/* Add chunked header to tell downstream to use chunked encoding
* during the absent of content-length */
if !no_body
&& !header.status.is_informational()
&& header.headers.get(http::header::CONTENT_LENGTH).is_none()
{
header.insert_header(http::header::TRANSFER_ENCODING, "chunked")?;
}
Ok(HttpTask::Header(header, eos))
}
HttpTask::Body(data, eos) => {
let mut data = range_body_filter.filter_body(data);
if let Some(duration) = self
.inner
.response_body_filter(session, &mut data, eos, ctx)?
{
trace!("delaying response for {duration:?}");
time::sleep(duration).await;
}
Ok(HttpTask::Body(data, eos))
}
HttpTask::Trailer(mut trailers) => {
let trailer_buffer = match trailers.as_mut() {
Some(trailers) => {
debug!("Parsing response trailers..");
match self
.inner
.response_trailer_filter(session, trailers, ctx)
.await
{
Ok(buf) => buf,
Err(e) => {
error!(
"Encountered error while filtering upstream trailers {:?}",
e
);
None
}
}
}
_ => None,
};
// if we have a trailer buffer write it to the downstream response body
if let Some(buffer) = trailer_buffer {
// write_body will not write additional bytes after reaching the content-length
// for gRPC H2 -> H1 this is not a problem but may be a problem for non gRPC code
// https://http2.github.io/http2-spec/#malformed
Ok(HttpTask::Body(Some(buffer), true))
} else {
Ok(HttpTask::Trailer(trailers))
}
}
HttpTask::Done => Ok(task),
HttpTask::Failed(_) => Ok(task), // Do nothing just pass the error down
}
}
async fn send_body_to_custom(
&self,
session: &mut Session,
mut data: Option<Bytes>,
end_of_body: bool,
client_body: &mut Box<dyn BodyWrite>,
ctx: &mut SV::CTX,
) -> Result<bool>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
session
.downstream_modules_ctx
.request_body_filter(&mut data, end_of_body)
.await?;
self.inner
.request_body_filter(session, &mut data, end_of_body, ctx)
.await?;
/* it is normal to get 0 bytes because of multi-chunk parsing or request_body_filter.
* Although there is no harm writing empty byte to custom, unlike h1, we ignore it
* for consistency */
if !end_of_body && data.as_ref().is_some_and(|d| d.is_empty()) {
return Ok(false);
}
if let Some(mut data) = data {
client_body
.write_all_buf(&mut data)
.await
.map_err(|e| e.into_up())?;
if end_of_body {
client_body.finish().await.map_err(|e| e.into_up())?;
}
} else {
debug!("Read downstream body done");
client_body
.finish()
.await
.map_err(|e| {
Error::because(WriteError, "while shutdown send data stream on no data", e)
})
.map_err(|e| e.into_up())?;
}
Ok(end_of_body)
}
}
/* Read response header, body and trailer from custom upstream and send them to tx */
async fn custom_pipe_up_to_down_response<S: CustomSession>(
client: &mut S,
tx: mpsc::Sender<HttpTask>,
) -> Result<()> {
let mut is_informational = true;
while is_informational {
client
.read_response_header()
.await
.map_err(|e| e.into_up())?;
let resp_header = Box::new(client.response_header().expect("just read").clone());
// `101 Switching Protocols` is a response to the http1 Upgrade header and it's final response.
// The WebSocket Protocol https://datatracker.ietf.org/doc/html/rfc6455
is_informational = is_informational_except_101(resp_header.status.as_u16() as u32);
match client.check_response_end_or_error(true).await {
Ok(eos) => {
tx.send(HttpTask::Header(resp_header, eos))
.await
.or_err(InternalError, "sending custom headers to pipe")?;
}
Err(e) => {
// If upstream errored, then push error to downstream and then quit
// Don't care if send fails (which means downstream already gone)
// we were still able to retrieve the headers, so try sending
let _ = tx.send(HttpTask::Header(resp_header, false)).await;
let _ = tx.send(HttpTask::Failed(e.into_up())).await;
return Ok(());
}
}
}
while let Some(chunk) = client
.read_response_body()
.await
.map_err(|e| e.into_up())
.transpose()
{
let data = match chunk {
Ok(d) => d,
Err(e) => {
// Push the error to downstream and then quit
let _ = tx.send(HttpTask::Failed(e.into_up())).await;
// Downstream should consume all remaining data and handle the error
return Ok(());
}
};
match client.check_response_end_or_error(false).await {
Ok(eos) => {
let empty = data.is_empty();
if empty && !eos {
/* it is normal to get 0 bytes because of multi-chunk
* don't write 0 bytes to downstream since it will be
* misread as the terminating chunk */
continue;
}
let sent = tx
.send(HttpTask::Body(Some(data), eos))
.await
.or_err(InternalError, "sending custom body to pipe");
// If the if the response with content-length is sent to an HTTP1 downstream,
// custom_bidirection_down_to_up() could decide that the body has finished and exit without
// waiting for this function to signal the eos. In this case tx being closed is not
// an sign of error. It should happen if the only thing left for the custom to send is
// an empty data frame with eos set.
if sent.is_err() && eos && empty {
return Ok(());
}
sent?;
}
Err(e) => {
// Similar to above, push the error to downstream and then quit
let _ = tx.send(HttpTask::Failed(e.into_up())).await;
return Ok(());
}
}
}
// attempt to get trailers
let trailers = match client.read_trailers().await {
Ok(t) => t,
Err(e) => {
// Similar to above, push the error to downstream and then quit
let _ = tx.send(HttpTask::Failed(e.into_up())).await;
return Ok(());
}
};
let trailers = trailers.map(Box::new);
if trailers.is_some() {
tx.send(HttpTask::Trailer(trailers))
.await
.or_err(InternalError, "sending custom trailer to pipe")?;
}
tx.send(HttpTask::Done)
.await
.unwrap_or_else(|_| debug!("custom channel closed!"));
Ok(())
}
struct CustomMessageForwarder<'a> {
ctx: ImmutStr,
writer: &'a mut Box<dyn CustomMessageWrite>,
reader: Box<dyn futures::Stream<Item = Result<Bytes, Box<Error>>> + Send + Sync + Unpin>,
inject: mpsc::Receiver<Bytes>,
filter: mpsc::Sender<(Bytes, oneshot::Sender<Option<Bytes>>)>,
cancel: oneshot::Receiver<()>,
}
impl CustomMessageForwarder<'_> {
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/proxy_trait.rs | pingora-proxy/src/proxy_trait.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::*;
use pingora_cache::{
key::HashBinary,
CacheKey, CacheMeta, ForcedFreshness, HitHandler,
RespCacheable::{self, *},
};
use proxy_cache::range_filter::{self};
use std::time::Duration;
/// The interface to control the HTTP proxy
///
/// The methods in [ProxyHttp] are filters/callbacks which will be performed on all requests at their
/// particular stage (if applicable).
///
/// If any of the filters returns [Result::Err], the request will fail, and the error will be logged.
#[cfg_attr(not(doc_async_trait), async_trait)]
pub trait ProxyHttp {
/// The per request object to share state across the different filters
type CTX;
/// Define how the `ctx` should be created.
fn new_ctx(&self) -> Self::CTX;
/// Define where the proxy should send the request to.
///
/// The returned [HttpPeer] contains the information regarding where and how this request should
/// be forwarded to.
async fn upstream_peer(
&self,
session: &mut Session,
ctx: &mut Self::CTX,
) -> Result<Box<HttpPeer>>;
/// Set up downstream modules.
///
/// In this phase, users can add or configure [HttpModules] before the server starts up.
///
/// In the default implementation of this method, [ResponseCompressionBuilder] is added
/// and disabled.
fn init_downstream_modules(&self, modules: &mut HttpModules) {
// Add disabled downstream compression module by default
modules.add_module(ResponseCompressionBuilder::enable(0));
}
/// Handle the incoming request.
///
/// In this phase, users can parse, validate, rate limit, perform access control and/or
/// return a response for this request.
///
/// If the user already sent a response to this request, an `Ok(true)` should be returned so that
/// the proxy would exit. The proxy continues to the next phases when `Ok(false)` is returned.
///
/// By default this filter does nothing and returns `Ok(false)`.
async fn request_filter(&self, _session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool>
where
Self::CTX: Send + Sync,
{
Ok(false)
}
/// Handle the incoming request before any downstream module is executed.
///
/// This function is similar to [Self::request_filter()] but executes before any other logic,
/// including downstream module logic. The main purpose of this function is to provide finer
/// grained control of the behavior of the modules.
///
/// Note that because this function is executed before any module that might provide access
/// control or rate limiting, logic should stay in request_filter() if it can in order to be
/// protected by said modules.
async fn early_request_filter(&self, _session: &mut Session, _ctx: &mut Self::CTX) -> Result<()>
where
Self::CTX: Send + Sync,
{
Ok(())
}
/// Returns whether this session is allowed to spawn subrequests.
///
/// This function is checked after [Self::early_request_filter] to allow that filter to configure
/// this if required. This will also run for subrequests themselves, which may allowed to spawn
/// their own subrequests.
///
/// Note that this doesn't prevent subrequests from being spawned based on the session by proxy
/// core functionality, e.g. background cache revalidation requires spawning subrequests.
fn allow_spawning_subrequest(&self, _session: &Session, _ctx: &Self::CTX) -> bool
where
Self::CTX: Send + Sync,
{
false
}
/// Handle the incoming request body.
///
/// This function will be called every time a piece of request body is received. The `body` is
/// **not the entire request body**.
///
/// The async nature of this function allows to throttle the upload speed and/or executing
/// heavy computation logic such as WAF rules on offloaded threads without blocking the threads
/// who process the requests themselves.
async fn request_body_filter(
&self,
_session: &mut Session,
_body: &mut Option<Bytes>,
_end_of_stream: bool,
_ctx: &mut Self::CTX,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
Ok(())
}
/// This filter decides if the request is cacheable and what cache backend to use
///
/// The caller can interact with `Session.cache` to enable caching.
///
/// By default this filter does nothing which effectively disables caching.
// Ideally only session.cache should be modified, TODO: reflect that in this interface
fn request_cache_filter(&self, _session: &mut Session, _ctx: &mut Self::CTX) -> Result<()> {
Ok(())
}
/// This callback generates the cache key
///
/// This callback is called only when cache is enabled for this request
///
/// By default this callback returns a default cache key generated from the request.
fn cache_key_callback(&self, session: &Session, _ctx: &mut Self::CTX) -> Result<CacheKey> {
let req_header = session.req_header();
Ok(CacheKey::default(req_header))
}
/// This callback is invoked when a cacheable response is ready to be admitted to cache.
fn cache_miss(&self, session: &mut Session, _ctx: &mut Self::CTX) {
session.cache.cache_miss();
}
/// This filter is called after a successful cache lookup and before the
/// cache asset is ready to be used.
///
/// This filter allows the user to log or force invalidate the asset, or
/// to adjust the body reader associated with the cache hit.
/// This also runs on stale hit assets (for which `is_fresh` is false).
///
/// The value returned indicates if the force invalidation should be used,
/// and which kind. Returning `None` indicates no forced invalidation
async fn cache_hit_filter(
&self,
_session: &mut Session,
_meta: &CacheMeta,
_hit_handler: &mut HitHandler,
_is_fresh: bool,
_ctx: &mut Self::CTX,
) -> Result<Option<ForcedFreshness>>
where
Self::CTX: Send + Sync,
{
Ok(None)
}
/// Decide if a request should continue to upstream after not being served from cache.
///
/// returns: Ok(true) if the request should continue, Ok(false) if a response was written by the
/// callback and the session should be finished, or an error
///
/// This filter can be used for deferring checks like rate limiting or access control to when they
/// actually needed after cache miss.
///
/// By default the session will attempt to be reused after returning Ok(false). It is the
/// caller's responsibility to disable keepalive or drain the request body if needed.
async fn proxy_upstream_filter(
&self,
_session: &mut Session,
_ctx: &mut Self::CTX,
) -> Result<bool>
where
Self::CTX: Send + Sync,
{
Ok(true)
}
/// Decide if the response is cacheable
fn response_cache_filter(
&self,
_session: &Session,
_resp: &ResponseHeader,
_ctx: &mut Self::CTX,
) -> Result<RespCacheable> {
Ok(Uncacheable(NoCacheReason::Custom("default")))
}
/// Decide how to generate cache vary key from both request and response
///
/// None means no variance is needed.
fn cache_vary_filter(
&self,
_meta: &CacheMeta,
_ctx: &mut Self::CTX,
_req: &RequestHeader,
) -> Option<HashBinary> {
// default to None for now to disable vary feature
None
}
/// Decide if the incoming request's condition _fails_ against the cached response.
///
/// Returning `Ok(true)` means that the response does _not_ match against the condition, and
/// that the proxy can return `304 Not Modified` downstream.
///
/// An example is a conditional GET request with `If-None-Match: "foobar"`. If the cached
/// response contains the `ETag: "foobar"`, then the condition fails, and `304 Not Modified`
/// should be returned. Else, the condition passes which means the full `200 OK` response must
/// be sent.
fn cache_not_modified_filter(
&self,
session: &Session,
resp: &ResponseHeader,
_ctx: &mut Self::CTX,
) -> Result<bool> {
Ok(
pingora_core::protocols::http::conditional_filter::not_modified_filter(
session.req_header(),
resp,
),
)
}
/// This filter is called when cache is enabled to determine what byte range to return (in both
/// cache hit and miss cases) from the response body. It is only used when caching is enabled,
/// otherwise the upstream is responsible for any filtering. It allows users to define the range
/// this request is for via its return type `range_filter::RangeType`.
///
/// It also allow users to modify the response header accordingly.
///
/// The default implementation can handle a single-range as per [RFC7232].
///
/// [RFC7232]: https://www.rfc-editor.org/rfc/rfc7232
fn range_header_filter(
&self,
session: &mut Session,
resp: &mut ResponseHeader,
_ctx: &mut Self::CTX,
) -> range_filter::RangeType {
const DEFAULT_MAX_RANGES: Option<usize> = Some(200);
proxy_cache::range_filter::range_header_filter(
session.req_header(),
resp,
DEFAULT_MAX_RANGES,
)
}
/// Modify the request before it is sent to the upstream
///
/// Unlike [Self::request_filter()], this filter allows to change the request headers to send
/// to the upstream.
async fn upstream_request_filter(
&self,
_session: &mut Session,
_upstream_request: &mut RequestHeader,
_ctx: &mut Self::CTX,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
Ok(())
}
/// Modify the response header from the upstream
///
/// The modification is before caching, so any change here will be stored in the cache if enabled.
///
/// Responses served from cache won't trigger this filter. If the cache needed revalidation,
/// only the 304 from upstream will trigger the filter (though it will be merged into the
/// cached header, not served directly to downstream).
async fn upstream_response_filter(
&self,
_session: &mut Session,
_upstream_response: &mut ResponseHeader,
_ctx: &mut Self::CTX,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
Ok(())
}
/// Modify the response header before it is send to the downstream
///
/// The modification is after caching. This filter is called for all responses including
/// responses served from cache.
async fn response_filter(
&self,
_session: &mut Session,
_upstream_response: &mut ResponseHeader,
_ctx: &mut Self::CTX,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
Ok(())
}
// custom_forwarding is called when downstream and upstream connections are successfully established.
#[doc(hidden)]
async fn custom_forwarding(
&self,
_session: &mut Session,
_ctx: &mut Self::CTX,
_custom_message_to_upstream: Option<mpsc::Sender<Bytes>>,
_custom_message_to_downstream: mpsc::Sender<Bytes>,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
Ok(())
}
// received a custom message from the downstream before sending it to the upstream.
#[doc(hidden)]
async fn downstream_custom_message_proxy_filter(
&self,
_session: &mut Session,
custom_message: Bytes,
_ctx: &mut Self::CTX,
_final_hop: bool,
) -> Result<Option<Bytes>>
where
Self::CTX: Send + Sync,
{
Ok(Some(custom_message))
}
// received a custom message from the upstream before sending it to the downstream.
#[doc(hidden)]
async fn upstream_custom_message_proxy_filter(
&self,
_session: &mut Session,
custom_message: Bytes,
_ctx: &mut Self::CTX,
_final_hop: bool,
) -> Result<Option<Bytes>>
where
Self::CTX: Send + Sync,
{
Ok(Some(custom_message))
}
/// Similar to [Self::upstream_response_filter()] but for response body
///
/// This function will be called every time a piece of response body is received. The `body` is
/// **not the entire response body**.
fn upstream_response_body_filter(
&self,
_session: &mut Session,
_body: &mut Option<Bytes>,
_end_of_stream: bool,
_ctx: &mut Self::CTX,
) -> Result<Option<Duration>> {
Ok(None)
}
/// Similar to [Self::upstream_response_filter()] but for response trailers
fn upstream_response_trailer_filter(
&self,
_session: &mut Session,
_upstream_trailers: &mut header::HeaderMap,
_ctx: &mut Self::CTX,
) -> Result<()> {
Ok(())
}
/// Similar to [Self::response_filter()] but for response body chunks
fn response_body_filter(
&self,
_session: &mut Session,
_body: &mut Option<Bytes>,
_end_of_stream: bool,
_ctx: &mut Self::CTX,
) -> Result<Option<Duration>>
where
Self::CTX: Send + Sync,
{
Ok(None)
}
/// Similar to [Self::response_filter()] but for response trailers.
/// Note, returning an Ok(Some(Bytes)) will result in the downstream response
/// trailers being written to the response body.
///
/// TODO: make this interface more intuitive
async fn response_trailer_filter(
&self,
_session: &mut Session,
_upstream_trailers: &mut header::HeaderMap,
_ctx: &mut Self::CTX,
) -> Result<Option<Bytes>>
where
Self::CTX: Send + Sync,
{
Ok(None)
}
/// This filter is called when the entire response is sent to the downstream successfully or
/// there is a fatal error that terminate the request.
///
/// An error log is already emitted if there is any error. This phase is used for collecting
/// metrics and sending access logs.
async fn logging(&self, _session: &mut Session, _e: Option<&Error>, _ctx: &mut Self::CTX)
where
Self::CTX: Send + Sync,
{
}
/// A value of true means that the log message will be suppressed. The default value is false.
fn suppress_error_log(&self, _session: &Session, _ctx: &Self::CTX, _error: &Error) -> bool {
false
}
/// This filter is called when there is an error **after** a connection is established (or reused)
/// to the upstream.
fn error_while_proxy(
&self,
peer: &HttpPeer,
session: &mut Session,
e: Box<Error>,
_ctx: &mut Self::CTX,
client_reused: bool,
) -> Box<Error> {
let mut e = e.more_context(format!("Peer: {}", peer));
// only reused client connections where retry buffer is not truncated
e.retry
.decide_reuse(client_reused && !session.as_ref().retry_buffer_truncated());
e
}
/// This filter is called when there is an error in the process of establishing a connection
/// to the upstream.
///
/// In this filter the user can decide whether the error is retry-able by marking the error `e`.
///
/// If the error can be retried, [Self::upstream_peer()] will be called again so that the user
/// can decide whether to send the request to the same upstream or another upstream that is possibly
/// available.
fn fail_to_connect(
&self,
_session: &mut Session,
_peer: &HttpPeer,
_ctx: &mut Self::CTX,
e: Box<Error>,
) -> Box<Error> {
e
}
/// This filter is called when the request encounters a fatal error.
///
/// Users may write an error response to the downstream if the downstream is still writable.
///
/// The response status code of the error response may be returned for logging purposes.
/// Additionally, the user can return whether this session may be reused in spite of the error.
/// Today this reuse status is only respected for errors that occur prior to upstream peer
/// selection, and the keepalive configured on the `Session` itself still takes precedent.
async fn fail_to_proxy(
&self,
session: &mut Session,
e: &Error,
_ctx: &mut Self::CTX,
) -> FailToProxy
where
Self::CTX: Send + Sync,
{
let code = match e.etype() {
HTTPStatus(code) => *code,
_ => {
match e.esource() {
ErrorSource::Upstream => 502,
ErrorSource::Downstream => {
match e.etype() {
WriteError | ReadError | ConnectionClosed => {
/* conn already dead */
0
}
_ => 400,
}
}
ErrorSource::Internal | ErrorSource::Unset => 500,
}
}
};
if code > 0 {
session.respond_error(code).await.unwrap_or_else(|e| {
error!("failed to send error response to downstream: {e}");
});
}
FailToProxy {
error_code: code,
// default to no reuse, which is safest
can_reuse_downstream: false,
}
}
/// Decide whether should serve stale when encountering an error or during revalidation
///
/// An implementation should follow
/// <https://datatracker.ietf.org/doc/html/rfc9111#section-4.2.4>
/// <https://www.rfc-editor.org/rfc/rfc5861#section-4>
///
/// This filter is only called if cache is enabled.
// 5xx HTTP status will be encoded as ErrorType::HTTPStatus(code)
fn should_serve_stale(
&self,
_session: &mut Session,
_ctx: &mut Self::CTX,
error: Option<&Error>, // None when it is called during stale while revalidate
) -> bool {
// A cache MUST NOT generate a stale response unless
// it is disconnected
// or doing so is explicitly permitted by the client or origin server
// (e.g. headers or an out-of-band contract)
error.is_some_and(|e| e.esource() == &ErrorSource::Upstream)
}
/// This filter is called when the request just established or reused a connection to the upstream
///
/// This filter allows user to log timing and connection related info.
async fn connected_to_upstream(
&self,
_session: &mut Session,
_reused: bool,
_peer: &HttpPeer,
#[cfg(unix)] _fd: std::os::unix::io::RawFd,
#[cfg(windows)] _sock: std::os::windows::io::RawSocket,
_digest: Option<&Digest>,
_ctx: &mut Self::CTX,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
Ok(())
}
/// This callback is invoked every time request related error log needs to be generated
///
/// Users can define what is important to be written about this request via the returned string.
fn request_summary(&self, session: &Session, _ctx: &Self::CTX) -> String {
session.as_ref().request_summary()
}
/// Whether the request should be used to invalidate(delete) the HTTP cache
///
/// - `true`: this request will be used to invalidate the cache.
/// - `false`: this request is a treated as a normal request
fn is_purge(&self, _session: &Session, _ctx: &Self::CTX) -> bool {
false
}
/// This filter is called after the proxy cache generates the downstream response to the purge
/// request (to invalidate or delete from the HTTP cache), based on the purge status, which
/// indicates whether the request succeeded or failed.
///
/// The filter allows the user to modify or replace the generated downstream response.
/// If the filter returns `Err`, the proxy will instead send a 500 response.
fn purge_response_filter(
&self,
_session: &Session,
_ctx: &mut Self::CTX,
_purge_status: PurgeStatus,
_purge_response: &mut std::borrow::Cow<'static, ResponseHeader>,
) -> Result<()> {
Ok(())
}
}
/// Context struct returned by `fail_to_proxy`.
pub struct FailToProxy {
pub error_code: u16,
pub can_reuse_downstream: bool,
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/proxy_cache.rs | pingora-proxy/src/proxy_cache.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::*;
use http::header::{CONTENT_LENGTH, CONTENT_TYPE};
use http::{Method, StatusCode};
use pingora_cache::key::CacheHashKey;
use pingora_cache::lock::LockStatus;
use pingora_cache::max_file_size::ERR_RESPONSE_TOO_LARGE;
use pingora_cache::{ForcedFreshness, HitHandler, HitStatus, RespCacheable::*};
use pingora_core::protocols::http::conditional_filter::to_304;
use pingora_core::protocols::http::v1::common::header_value_content_length;
use pingora_core::ErrorType;
use range_filter::RangeBodyFilter;
use std::time::SystemTime;
impl<SV, C> HttpProxy<SV, C>
where
C: custom::Connector,
{
// return bool: server_session can be reused, and error if any
pub(crate) async fn proxy_cache(
self: &Arc<Self>,
session: &mut Session,
ctx: &mut SV::CTX,
) -> Option<(bool, Option<Box<Error>>)>
// None: continue to proxy, Some: return
where
SV: ProxyHttp + Send + Sync + 'static,
SV::CTX: Send + Sync,
{
// Cache logic request phase
if let Err(e) = self.inner.request_cache_filter(session, ctx) {
// TODO: handle this error
warn!(
"Fail to request_cache_filter: {e}, {}",
self.inner.request_summary(session, ctx)
);
}
// cache key logic, should this be part of request_cache_filter?
if session.cache.enabled() {
match self.inner.cache_key_callback(session, ctx) {
Ok(key) => {
session.cache.set_cache_key(key);
}
Err(e) => {
// TODO: handle this error
session.cache.disable(NoCacheReason::StorageError);
warn!(
"Fail to cache_key_callback: {e}, {}",
self.inner.request_summary(session, ctx)
);
}
}
}
// cache purge logic: PURGE short-circuits rest of request
if self.inner.is_purge(session, ctx) {
return self.proxy_purge(session, ctx).await;
}
// bypass cache lookup if we predict to be uncacheable
if session.cache.enabled() && !session.cache.cacheable_prediction() {
session.cache.bypass();
}
if !session.cache.enabled() {
return None;
}
// cache lookup logic
loop {
// for cache lock, TODO: cap the max number of loops
match session.cache.cache_lookup().await {
Ok(res) => {
let mut hit_status_opt = None;
if let Some((mut meta, mut handler)) = res {
// Vary logic
// Because this branch can be called multiple times in a loop, and we only
// need to update the vary once, check if variance is already set to
// prevent unnecessary vary lookups.
let cache_key = session.cache.cache_key();
if let Some(variance) = cache_key.variance_bin() {
// We've looked up a secondary slot.
// Adhoc double check that the variance found is the variance we want.
if Some(variance) != meta.variance() {
warn!("Cache variance mismatch, {variance:?}, {cache_key:?}");
session.cache.disable(NoCacheReason::InternalError);
break None;
}
} else {
// Basic cache key; either variance is off, or this is the primary slot.
let req_header = session.req_header();
let variance = self.inner.cache_vary_filter(&meta, ctx, req_header);
if let Some(variance) = variance {
// Variance is on. This is the primary slot.
if !session.cache.cache_vary_lookup(variance, &meta) {
// This wasn't the desired variant. Updated cache key variance, cause another
// lookup to get the desired variant, which would be in a secondary slot.
continue;
}
} // else: vary is not in use
}
// Either no variance, or the current handler targets the correct variant.
// hit
// TODO: maybe round and/or cache now()
let is_fresh = meta.is_fresh(SystemTime::now());
// check if we should force expire or force miss
let hit_status = match self
.inner
.cache_hit_filter(session, &meta, &mut handler, is_fresh, ctx)
.await
{
Err(e) => {
error!(
"Failed to filter cache hit: {e}, {}",
self.inner.request_summary(session, ctx)
);
// this return value will cause us to fetch from upstream
HitStatus::FailedHitFilter
}
Ok(None) => {
if is_fresh {
HitStatus::Fresh
} else {
HitStatus::Expired
}
}
Ok(Some(ForcedFreshness::ForceExpired)) => {
// force expired asset should not be serve as stale
// because force expire is usually to remove data
meta.disable_serve_stale();
HitStatus::ForceExpired
}
Ok(Some(ForcedFreshness::ForceMiss)) => HitStatus::ForceMiss,
Ok(Some(ForcedFreshness::ForceFresh)) => HitStatus::Fresh,
};
hit_status_opt = Some(hit_status);
// init cache for hit / stale
session.cache.cache_found(meta, handler, hit_status);
}
if hit_status_opt.is_none_or(HitStatus::is_treated_as_miss) {
// cache miss
if session.cache.is_cache_locked() {
// Another request is filling the cache; try waiting til that's done and retry.
let lock_status = session.cache.cache_lock_wait().await;
if self.handle_lock_status(session, ctx, lock_status) {
continue;
} else {
break None;
}
} else {
self.inner.cache_miss(session, ctx);
break None;
}
}
// Safe because an empty hit status would have broken out
// in the block above
let hit_status = hit_status_opt.expect("None case handled as miss");
if !hit_status.is_fresh() {
// expired or force expired asset
if session.cache.is_cache_locked() {
// first if this is the sub request for the background cache update
if let Some(write_lock) = session
.subrequest_ctx
.as_mut()
.and_then(|ctx| ctx.take_write_lock())
{
// Put the write lock in the request
session.cache.set_write_lock(write_lock);
session.cache.tag_as_subrequest();
// and then let it go to upstream
break None;
}
let will_serve_stale = session.cache.can_serve_stale_updating()
&& self.inner.should_serve_stale(session, ctx, None);
if !will_serve_stale {
let lock_status = session.cache.cache_lock_wait().await;
if self.handle_lock_status(session, ctx, lock_status) {
continue;
} else {
break None;
}
}
// else continue to serve stale
session.cache.set_stale_updating();
} else if session.cache.is_cache_lock_writer() {
// stale while revalidate logic for the writer
let will_serve_stale = session.cache.can_serve_stale_updating()
&& self.inner.should_serve_stale(session, ctx, None);
if will_serve_stale {
// create a background thread to do the actual update
// the subrequest handle is only None by this phase in unit tests
// that don't go through process_new_http
let (permit, cache_lock) = session.cache.take_write_lock();
SubrequestSpawner::new(self.clone()).spawn_background_subrequest(
session.as_ref(),
subrequest::Ctx::builder()
.cache_write_lock(
cache_lock,
session.cache.cache_key().clone(),
permit,
)
.build(),
);
// continue to serve stale for this request
session.cache.set_stale_updating();
} else {
// return to fetch from upstream
break None;
}
} else {
// return to fetch from upstream
break None;
}
}
let (reuse, err) = self.proxy_cache_hit(session, ctx).await;
if let Some(e) = err.as_ref() {
error!(
"Fail to serve cache: {e}, {}",
self.inner.request_summary(session, ctx)
);
}
// responses is served from cache, exit
break Some((reuse, err));
}
Err(e) => {
// Allow cache miss to fill cache even if cache lookup errors
// this is mostly to support backward incompatible metadata update
// TODO: check error types
// session.cache.disable();
self.inner.cache_miss(session, ctx);
warn!(
"Fail to cache lookup: {e}, {}",
self.inner.request_summary(session, ctx)
);
break None;
}
}
}
}
// return bool: server_session can be reused, and error if any
pub(crate) async fn proxy_cache_hit(
&self,
session: &mut Session,
ctx: &mut SV::CTX,
) -> (bool, Option<Box<Error>>)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
use range_filter::*;
let seekable = session.cache.hit_handler().can_seek();
let mut header = cache_hit_header(&session.cache);
let req = session.req_header();
let not_modified = match self.inner.cache_not_modified_filter(session, &header, ctx) {
Ok(not_modified) => not_modified,
Err(e) => {
// fail open if cache_not_modified_filter errors,
// just return the whole original response
warn!(
"Failed to run cache not modified filter: {e}, {}",
self.inner.request_summary(session, ctx)
);
false
}
};
if not_modified {
to_304(&mut header);
}
let header_only = not_modified || req.method == http::method::Method::HEAD;
// process range header if the cache storage supports seek
let range_type = if seekable && !session.ignore_downstream_range {
self.inner.range_header_filter(session, &mut header, ctx)
} else {
RangeType::None
};
// return a 416 with an empty body for simplicity
let header_only = header_only || matches!(range_type, RangeType::Invalid);
debug!("header: {header:?}");
// TODO: use ProxyUseCache to replace the logic below
match self.inner.response_filter(session, &mut header, ctx).await {
Ok(_) => {
if let Err(e) = session
.downstream_modules_ctx
.response_header_filter(&mut header, header_only)
.await
{
error!(
"Failed to run downstream modules response header filter in hit: {e}, {}",
self.inner.request_summary(session, ctx)
);
session
.as_mut()
.respond_error(500)
.await
.unwrap_or_else(|e| {
error!("failed to send error response to downstream: {e}");
});
// we have not write anything dirty to downstream, it is still reusable
return (true, Some(e));
}
if let Err(e) = session
.as_mut()
.write_response_header(header)
.await
.map_err(|e| e.into_down())
{
// downstream connection is bad already
return (false, Some(e));
}
}
Err(e) => {
error!(
"Failed to run response filter in hit: {e}, {}",
self.inner.request_summary(session, ctx)
);
session
.as_mut()
.respond_error(500)
.await
.unwrap_or_else(|e| {
error!("failed to send error response to downstream: {e}");
});
// we have not write anything dirty to downstream, it is still reusable
return (true, Some(e));
}
}
debug!("finished sending cached header to downstream");
// If the function returns an Err, there was an issue seeking from the hit handler.
//
// Returning false means that no seeking or state change was done, either because the
// hit handler doesn't support the seek or because multipart doesn't apply.
fn seek_multipart(
hit_handler: &mut HitHandler,
range_filter: &mut RangeBodyFilter,
) -> Result<bool> {
if !range_filter.is_multipart_range() || !hit_handler.can_seek_multipart() {
return Ok(false);
}
let r = range_filter.next_cache_multipart_range();
hit_handler.seek_multipart(r.start, Some(r.end))?;
// we still need RangeBodyFilter's help to transform the byte
// range into a multipart response.
range_filter.set_current_cursor(r.start);
Ok(true)
}
if !header_only {
let mut maybe_range_filter = match &range_type {
RangeType::Single(r) => {
if session.cache.hit_handler().can_seek() {
if let Err(e) = session.cache.hit_handler().seek(r.start, Some(r.end)) {
return (false, Some(e));
}
None
} else {
Some(RangeBodyFilter::new_range(range_type.clone()))
}
}
RangeType::Multi(_) => {
let mut range_filter = RangeBodyFilter::new_range(range_type.clone());
if let Err(e) = seek_multipart(session.cache.hit_handler(), &mut range_filter) {
return (false, Some(e));
}
Some(range_filter)
}
RangeType::Invalid => unreachable!(),
RangeType::None => None,
};
loop {
match session.cache.hit_handler().read_body().await {
Ok(raw_body) => {
let end = raw_body.is_none();
if end {
if let Some(range_filter) = maybe_range_filter.as_mut() {
if range_filter.should_cache_seek_again() {
let e = match seek_multipart(
session.cache.hit_handler(),
range_filter,
) {
Ok(true) => {
// called seek(), read again
continue;
}
Ok(false) => {
// body reader can no longer seek multipart,
// but cache wants to continue seeking
// the body will just end in this case if we pass the
// None through
// (TODO: how might hit handlers want to recover from
// this situation)?
Error::explain(
InternalError,
"hit handler cannot seek for multipart again",
)
// the body will just end in this case.
}
Err(e) => e,
};
return (false, Some(e));
}
}
}
let mut body = if let Some(range_filter) = maybe_range_filter.as_mut() {
range_filter.filter_body(raw_body)
} else {
raw_body
};
match self
.inner
.response_body_filter(session, &mut body, end, ctx)
{
Ok(Some(duration)) => {
trace!("delaying response for {duration:?}");
time::sleep(duration).await;
}
Ok(None) => { /* continue */ }
Err(e) => {
// body is being sent, don't treat downstream as reusable
return (false, Some(e));
}
}
if let Err(e) = session
.downstream_modules_ctx
.response_body_filter(&mut body, end)
{
// body is being sent, don't treat downstream as reusable
return (false, Some(e));
}
if !end && body.as_ref().is_none_or(|b| b.is_empty()) {
// Don't write empty body which will end session,
// still more hit handler bytes to read
continue;
}
// write to downstream
let b = body.unwrap_or_default();
if let Err(e) = session
.as_mut()
.write_response_body(b, end)
.await
.map_err(|e| e.into_down())
{
return (false, Some(e));
}
if end {
break;
}
}
Err(e) => return (false, Some(e)),
}
}
}
if let Err(e) = session.cache.finish_hit_handler().await {
warn!("Error during finish_hit_handler: {}", e);
}
match session.as_mut().finish_body().await {
Ok(_) => {
debug!("finished sending cached body to downstream");
(true, None)
}
Err(e) => (false, Some(e)),
}
}
/* Downstream revalidation, only needed when cache is on because otherwise origin
* will handle it */
pub(crate) fn downstream_response_conditional_filter(
&self,
use_cache: &mut ServeFromCache,
session: &Session,
resp: &mut ResponseHeader,
ctx: &mut SV::CTX,
) where
SV: ProxyHttp,
{
// TODO: range
let req = session.req_header();
let not_modified = match self.inner.cache_not_modified_filter(session, resp, ctx) {
Ok(not_modified) => not_modified,
Err(e) => {
// fail open if cache_not_modified_filter errors,
// just return the whole original response
warn!(
"Failed to run cache not modified filter: {e}, {}",
self.inner.request_summary(session, ctx)
);
false
}
};
if not_modified {
to_304(resp);
}
let header_only = not_modified || req.method == http::method::Method::HEAD;
if header_only && use_cache.is_on() {
// tell cache to stop serving downstream after yielding header
// (misses will continue to allow admitting upstream into cache)
use_cache.enable_header_only();
}
}
// TODO: cache upstream header filter to add/remove headers
pub(crate) async fn cache_http_task(
&self,
session: &mut Session,
task: &HttpTask,
ctx: &mut SV::CTX,
serve_from_cache: &mut ServeFromCache,
) -> Result<()>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
if !session.cache.enabled() && !session.cache.bypassing() {
return Ok(());
}
match task {
HttpTask::Header(header, end_stream) => {
// decide if cacheable and create cache meta
// for now, skip 1xxs (should not affect response cache decisions)
// However 101 is an exception because it is the final response header
if header.status.is_informational()
&& header.status != StatusCode::SWITCHING_PROTOCOLS
{
return Ok(());
}
match self.inner.response_cache_filter(session, header, ctx)? {
Cacheable(meta) => {
let mut fill_cache = true;
if session.cache.bypassing() {
// The cache might have been bypassed because the response exceeded the
// maximum cacheable asset size. If that looks like the case (there
// is a maximum file size configured and we don't know the content
// length up front), attempting to re-enable the cache now would cause
// the request to fail when the chunked response exceeds the maximum
// file size again.
if session.cache.max_file_size_bytes().is_some()
&& !meta.headers().contains_key(header::CONTENT_LENGTH)
{
session
.cache
.disable(NoCacheReason::PredictedResponseTooLarge);
return Ok(());
}
session.cache.response_became_cacheable();
if session.req_header().method == Method::GET
&& meta.response_header().status == StatusCode::OK
{
self.inner.cache_miss(session, ctx);
if !session.cache.enabled() {
fill_cache = false;
}
} else {
// we've allowed caching on the next request,
// but do not cache _this_ request if bypassed and not 200
// (We didn't run upstream request cache filters to strip range or condition headers,
// so this could be an uncacheable response e.g. 206 or 304 or HEAD.
// Exclude all non-200/GET for simplicity, may expand allowable codes in the future.)
fill_cache = false;
session.cache.disable(NoCacheReason::Deferred);
}
}
// If the Content-Length is known, and a maximum asset size has been configured
// on the cache, validate that the response does not exceed the maximum asset size.
if session.cache.enabled() {
if let Some(max_file_size) = session.cache.max_file_size_bytes() {
let content_length_hdr = meta.headers().get(header::CONTENT_LENGTH);
if let Some(content_length) =
header_value_content_length(content_length_hdr)
{
if content_length > max_file_size {
fill_cache = false;
session.cache.response_became_uncacheable(
NoCacheReason::ResponseTooLarge,
);
session.cache.disable(NoCacheReason::ResponseTooLarge);
// too large to cache, disable ranging
session.ignore_downstream_range = true;
}
}
// if the content-length header is not specified, the miss handler
// will count the response size on the fly, aborting the request
// mid-transfer if the max file size is exceeded
}
}
if fill_cache {
let req_header = session.req_header();
// Update the variance in the meta via the same callback,
// cache_vary_filter(), used in cache lookup for consistency.
// Future cache lookups need a matching variance in the meta
// with the cache key to pick up the correct variance
let variance = self.inner.cache_vary_filter(&meta, ctx, req_header);
session.cache.set_cache_meta(meta);
session.cache.update_variance(variance);
// this sends the meta and header
session.cache.set_miss_handler().await?;
if session.cache.miss_body_reader().is_some() {
serve_from_cache.enable_miss();
}
if *end_stream {
session
.cache
.miss_handler()
.unwrap() // safe, it is set above
.write_body(Bytes::new(), true)
.await?;
session.cache.finish_miss_handler().await?;
}
}
}
Uncacheable(reason) => {
if !session.cache.bypassing() {
// mark as uncacheable, so we bypass cache next time
session.cache.response_became_uncacheable(reason);
}
session.cache.disable(reason);
}
}
}
HttpTask::Body(data, end_stream) => match data {
Some(d) => {
if session.cache.enabled() {
// TODO: do this async
// fail if writing the body would exceed the max_file_size_bytes
let body_size_allowed =
session.cache.track_body_bytes_for_max_file_size(d.len());
if !body_size_allowed {
debug!("chunked response exceeded max cache size, remembering that it is uncacheable");
session
.cache
.response_became_uncacheable(NoCacheReason::ResponseTooLarge);
return Error::e_explain(
ERR_RESPONSE_TOO_LARGE,
format!(
"writing data of size {} bytes would exceed max file size of {} bytes",
d.len(),
session.cache.max_file_size_bytes().expect("max file size bytes must be set to exceed size")
),
);
}
// this will panic if more data is sent after we see end_stream
// but should be impossible in real world
let miss_handler = session.cache.miss_handler().unwrap();
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/proxy_common.rs | pingora-proxy/src/proxy_common.rs | /// Possible downstream states during request multiplexing
#[derive(Debug, Clone, Copy)]
pub(crate) enum DownstreamStateMachine {
/// more request (body) to read
Reading,
/// no more data to read
ReadingFinished,
/// downstream is already errored or closed
Errored,
}
#[allow(clippy::wrong_self_convention)]
impl DownstreamStateMachine {
pub fn new(finished: bool) -> Self {
if finished {
Self::ReadingFinished
} else {
Self::Reading
}
}
// Can call read() to read more data or wait on closing
pub fn can_poll(&self) -> bool {
!matches!(self, Self::Errored)
}
pub fn is_reading(&self) -> bool {
matches!(self, Self::Reading)
}
pub fn is_done(&self) -> bool {
!matches!(self, Self::Reading)
}
pub fn is_errored(&self) -> bool {
matches!(self, Self::Errored)
}
/// Move the state machine to Finished state if `set` is true
pub fn maybe_finished(&mut self, set: bool) {
if set {
*self = Self::ReadingFinished
}
}
pub fn to_errored(&mut self) {
*self = Self::Errored
}
}
/// Possible upstream states during request multiplexing
#[derive(Debug, Clone, Copy)]
pub(crate) struct ResponseStateMachine {
upstream_response_done: bool,
cached_response_done: bool,
}
impl ResponseStateMachine {
pub fn new() -> Self {
ResponseStateMachine {
upstream_response_done: false,
cached_response_done: true, // no cached response by default
}
}
pub fn is_done(&self) -> bool {
self.upstream_response_done && self.cached_response_done
}
pub fn upstream_done(&self) -> bool {
self.upstream_response_done
}
pub fn cached_done(&self) -> bool {
self.cached_response_done
}
pub fn enable_cached_response(&mut self) {
self.cached_response_done = false;
}
pub fn maybe_set_upstream_done(&mut self, done: bool) {
if done {
self.upstream_response_done = true;
}
}
pub fn maybe_set_cache_done(&mut self, done: bool) {
if done {
self.cached_response_done = true;
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/proxy_purge.rs | pingora-proxy/src/proxy_purge.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::*;
use pingora_core::protocols::http::error_resp;
use std::borrow::Cow;
#[derive(Debug)]
pub enum PurgeStatus {
/// Cache was not enabled, purge ineffectual.
NoCache,
/// Asset was found in cache (and presumably purged or being purged).
Found,
/// Asset was not found in cache.
NotFound,
/// Cache returned a purge error.
/// Contains causing error in case it should affect the downstream response.
Error(Box<Error>),
}
// Return a canned response to a purge request, based on whether the cache had the asset or not
// (or otherwise returned an error).
fn purge_response(purge_status: &PurgeStatus) -> Cow<'static, ResponseHeader> {
let resp = match purge_status {
PurgeStatus::NoCache => &*NOT_PURGEABLE,
PurgeStatus::Found => &*OK,
PurgeStatus::NotFound => &*NOT_FOUND,
PurgeStatus::Error(ref _e) => &*INTERNAL_ERROR,
};
Cow::Borrowed(resp)
}
fn gen_purge_response(code: u16) -> ResponseHeader {
let mut resp = ResponseHeader::build(code, Some(3)).unwrap();
resp.insert_header(header::SERVER, &SERVER_NAME[..])
.unwrap();
resp.insert_header(header::CONTENT_LENGTH, 0).unwrap();
resp.insert_header(header::CACHE_CONTROL, "private, no-store")
.unwrap();
// TODO more headers?
resp
}
static OK: Lazy<ResponseHeader> = Lazy::new(|| gen_purge_response(200));
static NOT_FOUND: Lazy<ResponseHeader> = Lazy::new(|| gen_purge_response(404));
// for when purge is sent to uncacheable assets
static NOT_PURGEABLE: Lazy<ResponseHeader> = Lazy::new(|| gen_purge_response(405));
// on cache storage or proxy error
static INTERNAL_ERROR: Lazy<ResponseHeader> = Lazy::new(|| error_resp::gen_error_response(500));
impl<SV, C> HttpProxy<SV, C>
where
C: custom::Connector,
{
pub(crate) async fn proxy_purge(
&self,
session: &mut Session,
ctx: &mut SV::CTX,
) -> Option<(bool, Option<Box<Error>>)>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
let purge_status = if session.cache.enabled() {
match session.cache.purge().await {
Ok(found) => {
if found {
PurgeStatus::Found
} else {
PurgeStatus::NotFound
}
}
Err(e) => {
session.cache.disable(NoCacheReason::StorageError);
warn!(
"Fail to purge cache: {e}, {}",
self.inner.request_summary(session, ctx)
);
PurgeStatus::Error(e)
}
}
} else {
// cache was not enabled
PurgeStatus::NoCache
};
let mut purge_resp = purge_response(&purge_status);
if let Err(e) =
self.inner
.purge_response_filter(session, ctx, purge_status, &mut purge_resp)
{
error!(
"Failed purge response filter: {e}, {}",
self.inner.request_summary(session, ctx)
);
purge_resp = Cow::Borrowed(&*INTERNAL_ERROR)
}
let write_result = match purge_resp {
Cow::Borrowed(r) => session.as_mut().write_response_header_ref(r).await,
Cow::Owned(r) => session.as_mut().write_response_header(Box::new(r)).await,
};
let (reuse, err) = match write_result {
Ok(_) => (true, None),
// dirty, not reusable
Err(e) => {
let e = e.into_down();
error!(
"Failed to send purge response: {e}, {}",
self.inner.request_summary(session, ctx)
);
(false, Some(e))
}
};
Some((reuse, err))
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/proxy_h2.rs | pingora-proxy/src/proxy_h2.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::future::OptionFuture;
use futures::StreamExt;
use super::*;
use crate::proxy_cache::{range_filter::RangeBodyFilter, ServeFromCache};
use crate::proxy_common::*;
use http::{header::CONTENT_LENGTH, Method, StatusCode};
use pingora_cache::CachePhase;
use pingora_core::protocols::http::custom::CUSTOM_MESSAGE_QUEUE_SIZE;
use pingora_core::protocols::http::v2::{client::Http2Session, write_body};
// add scheme and authority as required by h2 lib
fn update_h2_scheme_authority(
header: &mut http::request::Parts,
raw_host: &[u8],
tls: bool,
) -> Result<()> {
let authority = if let Ok(s) = std::str::from_utf8(raw_host) {
if s.starts_with('[') {
// don't mess with ipv6 host
s
} else if let Some(colon) = s.find(':') {
if s.len() == colon + 1 {
// colon is the last char, ignore
s
} else if let Some(another_colon) = s[colon + 1..].find(':') {
// try to get rid of extra port numbers
&s[..colon + 1 + another_colon]
} else {
s
}
} else {
s
}
} else {
return Error::e_explain(
InvalidHTTPHeader,
format!("invalid authority from host {:?}", raw_host),
);
};
let scheme = if tls { "https" } else { "http" };
let uri = http::uri::Builder::new()
.scheme(scheme)
.authority(authority)
.path_and_query(header.uri.path_and_query().as_ref().unwrap().as_str())
.build();
match uri {
Ok(uri) => {
header.uri = uri;
Ok(())
}
Err(_) => Error::e_explain(
InvalidHTTPHeader,
format!("invalid authority from host {}", authority),
),
}
}
impl<SV, C> HttpProxy<SV, C>
where
C: custom::Connector,
{
pub(crate) async fn proxy_down_to_up(
&self,
session: &mut Session,
client_session: &mut Http2Session,
peer: &HttpPeer,
ctx: &mut SV::CTX,
) -> (bool, Option<Box<Error>>)
// (reuse_server, error)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
let mut req = session.req_header().clone();
if req.version != Version::HTTP_2 {
/* remove H1 specific headers */
// https://github.com/hyperium/h2/blob/d3b9f1e36aadc1a7a6804e2f8e86d3fe4a244b4f/src/proto/streams/send.rs#L72
req.remove_header(&http::header::TRANSFER_ENCODING);
req.remove_header(&http::header::CONNECTION);
req.remove_header(&http::header::UPGRADE);
req.remove_header("keep-alive");
req.remove_header("proxy-connection");
}
/* turn it into h2 */
req.set_version(Version::HTTP_2);
if session.cache.enabled() {
pingora_cache::filters::upstream::request_filter(
&mut req,
session.cache.maybe_cache_meta(),
);
session.mark_upstream_headers_mutated_for_cache();
}
match self
.inner
.upstream_request_filter(session, &mut req, ctx)
.await
{
Ok(_) => { /* continue */ }
Err(e) => {
return (false, Some(e));
}
}
// Remove H1 `Host` header, save it in order to add to :authority
// We do this because certain H2 servers expect request not to have a host header.
// The `Host` is removed after the upstream filters above for 2 reasons
// 1. there is no API to change the :authority header
// 2. the filter code needs to be aware of the host vs :authority across http versions otherwise
let host = req.remove_header(&http::header::HOST);
session.upstream_compression.request_filter(&req);
let body_empty = session.as_mut().is_body_empty();
// whether we support sending END_STREAM on HEADERS if body is empty
let send_end_stream = req.send_end_stream().expect("req must be h2");
let mut req: http::request::Parts = req.into();
// H2 requires authority to be set, so copy that from H1 host if that is set
if let Some(host) = host {
if let Err(e) = update_h2_scheme_authority(&mut req, host.as_bytes(), peer.is_tls()) {
return (false, Some(e));
}
}
debug!("Request to h2: {req:?}");
// send END_STREAM on HEADERS
let send_header_eos = send_end_stream && body_empty;
debug!("send END_STREAM on HEADERS: {send_end_stream}");
let req = Box::new(RequestHeader::from(req));
if let Err(e) = client_session.write_request_header(req, send_header_eos) {
return (false, Some(e.into_up()));
}
if !send_end_stream && body_empty {
// send END_STREAM on empty DATA frame
match client_session.write_request_body(Bytes::new(), true).await {
Ok(()) => debug!("sent empty DATA frame to h2"),
Err(e) => {
return (false, Some(e.into_up()));
}
}
}
client_session.read_timeout = peer.options.read_timeout;
let mut downstream_custom_message_writer = session
.downstream_session
.as_custom_mut()
.and_then(|c| c.take_custom_message_writer());
// take the body writer out of the client for easy duplex
let mut client_body = client_session
.take_request_body_writer()
.expect("already send request header");
// need to get the write_timeout here since we pass the h2 SendStream
// directly to bidirection_down_to_up
let write_timeout = peer.options.write_timeout;
let (tx, rx) = mpsc::channel::<HttpTask>(TASK_BUFFER_SIZE);
session.as_mut().enable_retry_buffering();
/* read downstream body and upstream response at the same time */
let ret = tokio::try_join!(
self.bidirection_down_to_up(
session,
&mut client_body,
rx,
ctx,
write_timeout,
&mut downstream_custom_message_writer
),
pipe_up_to_down_response(client_session, tx)
);
if let Some(custom_session) = session.downstream_session.as_custom_mut() {
match custom_session.restore_custom_message_writer(
downstream_custom_message_writer.expect("downstream be present"),
) {
Ok(_) => { /* continue */ }
Err(e) => {
return (false, Some(e));
}
}
}
match ret {
Ok((downstream_can_reuse, _upstream)) => (downstream_can_reuse, None),
Err(e) => (false, Some(e)),
}
}
pub(crate) async fn proxy_to_h2_upstream(
&self,
session: &mut Session,
client_session: &mut Http2Session,
reused: bool,
peer: &HttpPeer,
ctx: &mut SV::CTX,
) -> (bool, Option<Box<Error>>)
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
#[cfg(windows)]
let raw = client_session.fd() as std::os::windows::io::RawSocket;
#[cfg(unix)]
let raw = client_session.fd();
if let Err(e) = self
.inner
.connected_to_upstream(session, reused, peer, raw, client_session.digest(), ctx)
.await
{
return (false, Some(e));
}
let (server_session_reuse, error) = self
.proxy_down_to_up(session, client_session, peer, ctx)
.await;
// Record upstream response body bytes received (HTTP/2 DATA payload).
let upstream_bytes_total = client_session.body_bytes_received();
session.set_upstream_body_bytes_received(upstream_bytes_total);
(server_session_reuse, error)
}
// returns whether server (downstream) session can be reused
async fn bidirection_down_to_up(
&self,
session: &mut Session,
client_body: &mut h2::SendStream<bytes::Bytes>,
mut rx: mpsc::Receiver<HttpTask>,
ctx: &mut SV::CTX,
write_timeout: Option<Duration>,
downstream_custom_message_writer: &mut Option<Box<dyn CustomMessageWrite>>,
) -> Result<bool>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
// setup custom message forwarding, if downstream supports it
let (
mut downstream_custom_read,
mut downstream_custom_write,
downstream_custom_message_custom_forwarding,
mut downstream_custom_message_inject_rx,
mut downstream_custom_message_reader,
) = if downstream_custom_message_writer.is_some() {
let reader = session.downstream_custom_message()?;
let (inject_tx, inject_rx) = mpsc::channel::<Bytes>(CUSTOM_MESSAGE_QUEUE_SIZE);
(true, true, Some(inject_tx), Some(inject_rx), reader)
} else {
(false, false, None, None, None)
};
if let Some(custom_forwarding) = downstream_custom_message_custom_forwarding {
self.inner
.custom_forwarding(session, ctx, None, custom_forwarding)
.await?;
}
let mut downstream_state = DownstreamStateMachine::new(session.as_mut().is_body_done());
// retry, send buffer if it exists
if let Some(buffer) = session.as_mut().get_retry_buffer() {
self.send_body_to2(
session,
Some(buffer),
downstream_state.is_done(),
client_body,
ctx,
write_timeout,
)
.await?;
}
let mut response_state = ResponseStateMachine::new();
// these two below can be wrapped into an internal ctx
// use cache when upstream revalidates (or TODO: error)
let mut serve_from_cache = ServeFromCache::new();
let mut range_body_filter = proxy_cache::range_filter::RangeBodyFilter::new();
/* duplex mode
* see the Same function for h1 for more comments
*/
while !downstream_state.is_done()
|| !response_state.is_done()
|| downstream_custom_read && !downstream_state.is_errored()
|| downstream_custom_write
{
// Use optional futures to allow using optional channels in select branches
let custom_inject_rx_recv: OptionFuture<_> = downstream_custom_message_inject_rx
.as_mut()
.map(|rx| rx.recv())
.into();
let custom_reader_next: OptionFuture<_> = downstream_custom_message_reader
.as_mut()
.map(|reader| reader.next())
.into();
// partial read support, this check will also be false if cache is disabled.
let support_cache_partial_read =
session.cache.support_streaming_partial_write() == Some(true);
// Similar logic in h1 need to reserve capacity first to avoid deadlock
// But we don't need to do the same because the h2 client_body pipe is unbounded (never block)
tokio::select! {
// NOTE: cannot avoid this copy since h2 owns the buf
body = session.downstream_session.read_body_or_idle(downstream_state.is_done()), if downstream_state.can_poll() => {
debug!("downstream event");
let body = match body {
Ok(b) => b,
Err(e) => {
let wait_for_cache_fill = (!serve_from_cache.is_on() && support_cache_partial_read)
|| serve_from_cache.is_miss();
if wait_for_cache_fill {
// ignore downstream error so that upstream can continue to write cache
downstream_state.to_errored();
warn!(
"Downstream Error ignored during caching: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
continue;
} else {
return Err(e.into_down());
}
}
};
let is_body_done = session.is_body_done();
match self.send_body_to2(session, body, is_body_done, client_body, ctx, write_timeout).await {
Ok(request_done) => {
downstream_state.maybe_finished(request_done);
},
Err(e) => {
// mark request done, attempt to drain receive
warn!("Upstream h2 body send error: {e}");
// upstream is what actually errored but we don't want to continue
// polling the downstream body
downstream_state.to_errored();
}
};
},
task = rx.recv(), if !response_state.upstream_done() => {
if let Some(t) = task {
debug!("upstream event: {:?}", t);
if serve_from_cache.should_discard_upstream() {
// just drain, do we need to do anything else?
continue;
}
// pull as many tasks as we can
let mut tasks = Vec::with_capacity(TASK_BUFFER_SIZE);
tasks.push(t);
// tokio::task::unconstrained because now_or_never may yield None when the future is ready
while let Some(maybe_task) = tokio::task::unconstrained(rx.recv()).now_or_never() {
if let Some(t) = maybe_task {
tasks.push(t);
} else {
break
}
}
/* run filters before sending to downstream */
let mut filtered_tasks = Vec::with_capacity(TASK_BUFFER_SIZE);
for mut t in tasks {
if self.revalidate_or_stale(session, &mut t, ctx).await {
serve_from_cache.enable();
response_state.enable_cached_response();
// skip downstream filtering entirely as the 304 will not be sent
break;
}
session.upstream_compression.response_filter(&mut t);
// check error and abort
// otherwise the error is surfaced via write_response_tasks()
if !serve_from_cache.should_send_to_downstream() {
if let HttpTask::Failed(e) = t {
return Err(e);
}
}
filtered_tasks.push(
self.h2_response_filter(session, t, ctx,
&mut serve_from_cache,
&mut range_body_filter, false).await?);
if serve_from_cache.is_miss_header() {
response_state.enable_cached_response();
}
}
if !serve_from_cache.should_send_to_downstream() {
// TODO: need to derive response_done from filtered_tasks in case downstream failed already
continue;
}
let response_done = session.write_response_tasks(filtered_tasks).await?;
response_state.maybe_set_upstream_done(response_done);
} else {
debug!("empty upstream event");
response_state.maybe_set_upstream_done(true);
}
}
task = serve_from_cache.next_http_task(&mut session.cache, &mut range_body_filter),
if !response_state.cached_done() && !downstream_state.is_errored() && serve_from_cache.is_on() => {
let task = self.h2_response_filter(session, task?, ctx,
&mut serve_from_cache,
&mut range_body_filter, true).await?;
debug!("serve_from_cache task {task:?}");
match session.write_response_tasks(vec![task]).await {
Ok(b) => response_state.maybe_set_cache_done(b),
Err(e) => if serve_from_cache.is_miss() {
// give up writing to downstream but wait for upstream cache write to finish
downstream_state.to_errored();
response_state.maybe_set_cache_done(true);
warn!(
"Downstream Error ignored during caching: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
continue;
} else {
return Err(e);
}
}
if response_state.cached_done() {
if let Err(e) = session.cache.finish_hit_handler().await {
warn!("Error during finish_hit_handler: {}", e);
}
}
}
data = custom_reader_next, if downstream_custom_read && !downstream_state.is_errored() => {
let Some(data) = data.flatten() else {
downstream_custom_read = false;
continue;
};
let data = match data {
Ok(data) => data,
Err(err) => {
warn!("downstream_custom_message_reader got error: {err}");
downstream_custom_read = false;
continue;
},
};
self.inner
.downstream_custom_message_proxy_filter(session, data, ctx, true) // true, because it's the last hop for downstream proxying
.await?;
},
data = custom_inject_rx_recv, if downstream_custom_write => {
match data.flatten() {
Some(data) => {
if let Some(ref mut custom_writer) = downstream_custom_message_writer {
custom_writer.write_custom_message(data).await?
}
},
None => {
downstream_custom_write = false;
if let Some(ref mut custom_writer) = downstream_custom_message_writer {
custom_writer.finish_custom().await?;
}
},
}
},
else => {
break;
}
}
}
let mut reuse_downstream = !downstream_state.is_errored();
if reuse_downstream {
match session.as_mut().finish_body().await {
Ok(_) => {
debug!("finished sending body to downstream");
}
Err(e) => {
error!("Error finish sending body to downstream: {}", e);
reuse_downstream = false;
}
}
}
Ok(reuse_downstream)
}
async fn h2_response_filter(
&self,
session: &mut Session,
mut task: HttpTask,
ctx: &mut SV::CTX,
serve_from_cache: &mut ServeFromCache,
range_body_filter: &mut RangeBodyFilter,
from_cache: bool, // are the task from cache already
) -> Result<HttpTask>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
if !from_cache {
if let Some(duration) = self.upstream_filter(session, &mut task, ctx).await? {
trace!("delaying upstream response for {duration:?}");
time::sleep(duration).await;
}
// cache the original response before any downstream transformation
// requests that bypassed cache still need to run filters to see if the response has become cacheable
if session.cache.enabled() || session.cache.bypassing() {
if let Err(e) = self
.cache_http_task(session, &task, ctx, serve_from_cache)
.await
{
session.cache.disable(NoCacheReason::StorageError);
if serve_from_cache.is_miss_body() {
// if the response stream cache body during miss but write fails, it has to
// give up the entire request
return Err(e);
} else {
// otherwise, continue processing the response
warn!(
"Fail to cache response: {}, {}",
e,
self.inner.request_summary(session, ctx)
);
}
}
}
// skip the downstream filtering if these tasks are just for cache admission
if !serve_from_cache.should_send_to_downstream() {
return Ok(task);
}
} // else: cached/local response, no need to trigger upstream filters and caching
// normally max file size is tracked in cache_http_task filters (when cache enabled),
// we will track it in these filters before sending to downstream on specific conditions
// when cache is disabled
let track_max_cache_size = matches!(
session.cache.phase(),
CachePhase::Disabled(NoCacheReason::PredictedResponseTooLarge)
);
let res = match task {
HttpTask::Header(mut header, eos) => {
/* Downstream revalidation, only needed when cache is on because otherwise origin
* will handle it */
if session.upstream_headers_mutated_for_cache() {
self.downstream_response_conditional_filter(
serve_from_cache,
session,
&mut header,
ctx,
);
if !session.ignore_downstream_range {
let range_type = self.inner.range_header_filter(session, &mut header, ctx);
range_body_filter.set(range_type);
}
}
self.inner
.response_filter(session, &mut header, ctx)
.await?;
/* Downgrade the version so that write_response_header won't panic */
header.set_version(Version::HTTP_11);
// these status codes / method cannot have body, so no need to add chunked encoding
let no_body = session.req_header().method == "HEAD"
|| matches!(header.status.as_u16(), 204 | 304);
/* Add chunked header to tell downstream to use chunked encoding
* during the absent of content-length in h2 */
if !no_body
&& !header.status.is_informational()
&& header.headers.get(http::header::CONTENT_LENGTH).is_none()
{
header.insert_header(http::header::TRANSFER_ENCODING, "chunked")?;
}
Ok(HttpTask::Header(header, eos))
}
HttpTask::Body(data, eos) => {
if track_max_cache_size {
session
.cache
.track_body_bytes_for_max_file_size(data.as_ref().map_or(0, |d| d.len()));
}
let mut data = range_body_filter.filter_body(data);
if let Some(duration) = self
.inner
.response_body_filter(session, &mut data, eos, ctx)?
{
trace!("delaying downstream response for {duration:?}");
time::sleep(duration).await;
}
Ok(HttpTask::Body(data, eos))
}
HttpTask::Trailer(mut trailers) => {
let trailer_buffer = match trailers.as_mut() {
Some(trailers) => {
debug!("Parsing response trailers..");
match self
.inner
.response_trailer_filter(session, trailers, ctx)
.await
{
Ok(buf) => buf,
Err(e) => {
error!(
"Encountered error while filtering upstream trailers {:?}",
e
);
None
}
}
}
_ => None,
};
// if we have a trailer buffer write it to the downstream response body
if let Some(buffer) = trailer_buffer {
// write_body will not write additional bytes after reaching the content-length
// for gRPC H2 -> H1 this is not a problem but may be a problem for non gRPC code
// https://http2.github.io/http2-spec/#malformed
Ok(HttpTask::Body(Some(buffer), true))
} else {
Ok(HttpTask::Trailer(trailers))
}
}
HttpTask::Done => Ok(task),
HttpTask::Failed(_) => Ok(task), // Do nothing just pass the error down
};
// On end, check if the response (based on file size) can be considered cacheable again
if let Ok(task) = res.as_ref() {
if track_max_cache_size
&& task.is_end()
&& !matches!(task, HttpTask::Failed(_))
&& !session.cache.exceeded_max_file_size()
{
session.cache.response_became_cacheable();
}
}
res
}
async fn send_body_to2(
&self,
session: &mut Session,
mut data: Option<Bytes>,
end_of_body: bool,
client_body: &mut h2::SendStream<bytes::Bytes>,
ctx: &mut SV::CTX,
write_timeout: Option<Duration>,
) -> Result<bool>
where
SV: ProxyHttp + Send + Sync,
SV::CTX: Send + Sync,
{
session
.downstream_modules_ctx
.request_body_filter(&mut data, end_of_body)
.await?;
self.inner
.request_body_filter(session, &mut data, end_of_body, ctx)
.await?;
/* it is normal to get 0 bytes because of multi-chunk parsing or request_body_filter.
* Although there is no harm writing empty byte to h2, unlike h1, we ignore it
* for consistency */
if !end_of_body && data.as_ref().is_some_and(|d| d.is_empty()) {
return Ok(false);
}
if let Some(data) = data {
debug!("Write {} bytes body to h2 upstream", data.len());
write_body(client_body, data, end_of_body, write_timeout)
.await
.map_err(|e| e.into_up())?;
} else {
debug!("Read downstream body done");
/* send a standalone END_STREAM flag */
write_body(client_body, Bytes::new(), true, write_timeout)
.await
.map_err(|e| e.into_up())?;
}
Ok(end_of_body)
}
}
/* Read response header, body and trailer from h2 upstream and send them to tx */
pub(crate) async fn pipe_up_to_down_response(
client: &mut Http2Session,
tx: mpsc::Sender<HttpTask>,
) -> Result<()> {
client
.read_response_header()
.await
.map_err(|e| e.into_up())?; // should we send the error as an HttpTask?
let resp_header = Box::new(client.response_header().expect("just read").clone());
match client.check_response_end_or_error() {
Ok(eos) => {
// XXX: the h2 crate won't check for content-length underflow
// if a header frame with END_STREAM is sent without data frames
// As stated by RFC, "204 or 304 responses contain no content,
// as does the response to a HEAD request"
// https://datatracker.ietf.org/doc/html/rfc9113#section-8.1.1
let req_header = client.request_header().expect("must have sent req");
if eos
&& req_header.method != Method::HEAD
&& resp_header.status != StatusCode::NO_CONTENT
&& resp_header.status != StatusCode::NOT_MODIFIED
// RFC technically allows for leading zeroes
// https://datatracker.ietf.org/doc/html/rfc9110#name-content-length
&& resp_header
.headers
.get(CONTENT_LENGTH)
.is_some_and(|cl| cl.as_bytes().iter().any(|b| *b != b'0'))
{
let _ = tx
.send(HttpTask::Failed(
Error::explain(H2Error, "non-zero content-length on EOS headers frame")
.into_up(),
))
.await;
return Ok(());
}
tx.send(HttpTask::Header(resp_header, eos))
.await
.or_err(InternalError, "sending h2 headers to pipe")?;
}
Err(e) => {
// If upstream errored, then push error to downstream and then quit
// Don't care if send fails (which means downstream already gone)
// we were still able to retrieve the headers, so try sending
let _ = tx.send(HttpTask::Header(resp_header, false)).await;
let _ = tx.send(HttpTask::Failed(e.into_up())).await;
return Ok(());
}
}
while let Some(chunk) = client
.read_response_body()
.await
.map_err(|e| e.into_up())
.transpose()
{
let data = match chunk {
Ok(d) => d,
Err(e) => {
// Push the error to downstream and then quit
let _ = tx.send(HttpTask::Failed(e.into_up())).await;
// Downstream should consume all remaining data and handle the error
return Ok(());
}
};
match client.check_response_end_or_error() {
Ok(eos) => {
let empty = data.is_empty();
if empty && !eos {
/* it is normal to get 0 bytes because of multi-chunk
* don't write 0 bytes to downstream since it will be
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/src/subrequest/mod.rs | pingora-proxy/src/subrequest/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use pingora_cache::lock::{CacheKeyLockImpl, LockStatus, WritePermit};
use pingora_cache::CacheKey;
use pingora_core::protocols::http::subrequest::server::{
HttpSession as SessionSubrequest, SubrequestHandle,
};
use std::any::Any;
struct LockCtx {
write_permit: WritePermit,
cache_lock: &'static CacheKeyLockImpl,
key: CacheKey,
}
/// Optional user-defined subrequest context.
pub type UserCtx = Box<dyn Any + Sync + Send>;
#[derive(Debug, Copy, Clone, Default, PartialEq, Eq)]
pub enum BodyMode {
/// No body to be sent for subrequest.
#[default]
NoBody,
/// Waiting on body if needed.
ExpectBody,
}
#[derive(Default)]
pub struct CtxBuilder {
lock: Option<LockCtx>,
body_mode: BodyMode,
user_ctx: Option<UserCtx>,
}
impl CtxBuilder {
pub fn new() -> Self {
Self {
lock: None,
body_mode: BodyMode::NoBody,
user_ctx: None,
}
}
pub fn cache_write_lock(
mut self,
cache_lock: &'static CacheKeyLockImpl,
key: CacheKey,
write_permit: WritePermit,
) -> Self {
self.lock = Some(LockCtx {
cache_lock,
key,
write_permit,
});
self
}
pub fn user_ctx(mut self, user_ctx: UserCtx) -> Self {
self.user_ctx = Some(user_ctx);
self
}
pub fn body_mode(mut self, body_mode: BodyMode) -> Self {
self.body_mode = body_mode;
self
}
pub fn build(self) -> Ctx {
Ctx {
lock: self.lock,
body_mode: self.body_mode,
user_ctx: self.user_ctx,
}
}
}
/// Context struct to share state across the parent and sub-request.
pub struct Ctx {
body_mode: BodyMode,
lock: Option<LockCtx>,
// User-defined custom context.
user_ctx: Option<UserCtx>,
}
impl Ctx {
/// Create a [`CtxBuilder`] in order to make a new subrequest `Ctx`.
pub fn builder() -> CtxBuilder {
CtxBuilder::new()
}
/// Get a reference to the extensions inside this subrequest.
pub fn user_ctx(&self) -> Option<&UserCtx> {
self.user_ctx.as_ref()
}
/// Get a mutable reference to the extensions inside this subrequest.
pub fn user_ctx_mut(&mut self) -> Option<&mut UserCtx> {
self.user_ctx.as_mut()
}
/// Release the write lock from the subrequest (to clean up a write permit
/// that will not be used in the cache key lock).
pub fn release_write_lock(&mut self) {
if let Some(lock) = self.lock.take() {
// If we are releasing the write lock in the subrequest,
// it means that the cache did not take it for whatever reason.
// TransientError will cause the election of a new writer
lock.cache_lock
.release(&lock.key, lock.write_permit, LockStatus::TransientError);
}
}
/// Take the write lock from the subrequest, for use in a cache key lock.
pub fn take_write_lock(&mut self) -> Option<WritePermit> {
// also clear out lock ctx
self.lock.take().map(|lock| lock.write_permit)
}
/// Get the `BodyMode` when this subrequest was created.
pub fn body_mode(&self) -> BodyMode {
self.body_mode
}
}
use crate::HttpSession;
pub(crate) fn create_session(parsed_session: &HttpSession) -> (HttpSession, SubrequestHandle) {
let (session, handle) = SessionSubrequest::new_from_session(parsed_session);
(HttpSession::new_subrequest(session), handle)
}
#[tokio::test]
async fn test_dummy_request() {
use tokio_test::io::Builder;
let input = b"GET / HTTP/1.1\r\n\r\n";
let mock_io = Builder::new().read(&input[..]).build();
let mut req = HttpSession::new_http1(Box::new(mock_io));
req.read_request().await.unwrap();
assert_eq!(input.as_slice(), req.to_h1_raw());
let (mut subreq, _handle) = create_session(&req);
subreq.read_request().await.unwrap();
assert_eq!(input.as_slice(), subreq.to_h1_raw());
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/tests/test_basic.rs | pingora-proxy/tests/test_basic.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod utils;
use bytes::Bytes;
use h2::client;
use http::Request;
use hyper::{body::HttpBody, header::HeaderValue, Body, Client};
#[cfg(unix)]
use hyperlocal::{UnixClientExt, Uri};
use reqwest::{header, StatusCode};
use tokio::net::TcpStream;
use utils::server_utils::init;
fn is_specified_port(port: u16) -> bool {
(1..65535).contains(&port)
}
#[tokio::test]
async fn test_origin_alive() {
init();
let res = reqwest::get("http://127.0.0.1:8000/").await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
#[tokio::test]
async fn test_simple_proxy() {
init();
let res = reqwest::get("http://127.0.0.1:6147").await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
assert_eq!(headers["x-server-addr"], "127.0.0.1:6147");
let sockaddr = headers["x-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.1");
assert!(is_specified_port(sockaddr.port()));
assert_eq!(headers["x-upstream-server-addr"], "127.0.0.1:8000");
let sockaddr = headers["x-upstream-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.2");
assert!(is_specified_port(sockaddr.port()));
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_h2_to_h1() {
init();
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let res = client
.get("https://127.0.0.1:6150")
.header("sni", "openrusty.org")
.send()
.await
.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
assert_eq!(headers["x-server-addr"], "127.0.0.1:6150");
let sockaddr = headers["x-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.1");
assert!(is_specified_port(sockaddr.port()));
assert_eq!(headers["x-upstream-server-addr"], "127.0.0.1:8443");
let sockaddr = headers["x-upstream-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.2");
assert!(is_specified_port(sockaddr.port()));
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_h2_to_h2() {
init();
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let res = client
.get("https://127.0.0.1:6150")
.header("sni", "openrusty.org")
.header("x-h2", "true")
.send()
.await
.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
assert_eq!(headers["x-server-addr"], "127.0.0.1:6150");
let sockaddr = headers["x-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.1");
assert!(is_specified_port(sockaddr.port()));
assert_eq!(headers["x-upstream-server-addr"], "127.0.0.1:8443");
let sockaddr = headers["x-upstream-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.2");
assert!(is_specified_port(sockaddr.port()));
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
#[tokio::test]
async fn test_h2c_to_h2c() {
init();
let client = hyper::client::Client::builder()
.http2_only(true)
.build_http();
let mut req = hyper::Request::builder()
.uri("http://127.0.0.1:6146")
.body(Body::empty())
.unwrap();
req.headers_mut()
.insert("x-h2", HeaderValue::from_bytes(b"true").unwrap());
let res = client.request(req).await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let body = res.into_body().data().await.unwrap().unwrap();
assert_eq!(body.as_ref(), b"Hello World!\n");
}
#[tokio::test]
async fn test_h1_on_h2c_port() {
init();
let client = hyper::client::Client::builder()
.http2_only(false)
.build_http();
let mut req = hyper::Request::builder()
.uri("http://127.0.0.1:6146")
.body(Body::empty())
.unwrap();
req.headers_mut()
.insert("x-h2", HeaderValue::from_bytes(b"true").unwrap());
let res = client.request(req).await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_11);
let body = res.into_body().data().await.unwrap().unwrap();
assert_eq!(body.as_ref(), b"Hello World!\n");
}
#[tokio::test]
#[cfg(feature = "openssl_derived")]
async fn test_h2_to_h2_host_override() {
init();
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let res = client
.get("https://127.0.0.1:6150")
.header("x-h2", "true")
.header("host-override", "test.com")
.send()
.await
.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_h2_to_h2_upload() {
init();
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let payload = "test upload";
let res = client
.get("https://127.0.0.1:6150/echo")
.header("sni", "openrusty.org")
.header("x-h2", "true")
.body(payload)
.send()
.await
.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let body = res.text().await.unwrap();
assert_eq!(body, payload);
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_h2_to_h1_upload() {
init();
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let payload = "test upload";
let res = client
.get("https://127.0.0.1:6150/echo")
.header("sni", "openrusty.org")
.body(payload)
.send()
.await
.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let body = res.text().await.unwrap();
assert_eq!(body, payload);
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_h2_head() {
init();
let client = reqwest::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let res = client
.head("https://127.0.0.1:6150/set_content_length")
.header("sni", "openrusty.org")
.header("x-h2", "true")
.header("x-set-content-length", "11")
.send()
.await
.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
assert_eq!(res.version(), reqwest::Version::HTTP_2);
let body = res.text().await.unwrap();
// should not be any body, despite content-length
assert_eq!(body, "");
}
#[cfg(unix)]
#[tokio::test]
async fn test_simple_proxy_uds() {
init();
let url = Uri::new("/tmp/pingora_proxy.sock", "/").into();
let client = Client::unix();
let res = client.get(url).await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
let (resp, body) = res.into_parts();
let headers = &resp.headers;
assert_eq!(headers[header::CONTENT_LENGTH], "13");
assert_eq!(headers["x-server-addr"], "/tmp/pingora_proxy.sock");
assert_eq!(headers["x-client-addr"], "unset"); // unnamed UDS
assert_eq!(headers["x-upstream-server-addr"], "127.0.0.1:8000");
let sockaddr = headers["x-upstream-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.2");
assert!(is_specified_port(sockaddr.port()));
let body = hyper::body::to_bytes(body).await.unwrap();
assert_eq!(body.as_ref(), b"Hello World!\n");
}
#[cfg(unix)]
#[tokio::test]
async fn test_simple_proxy_uds_peer() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6147")
.header("x-uds-peer", "1") // force upstream peer to be UDS
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = &res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
assert_eq!(headers["x-server-addr"], "127.0.0.1:6147");
let sockaddr = headers["x-client-addr"]
.to_str()
.unwrap()
.parse::<std::net::SocketAddr>()
.unwrap();
assert_eq!(sockaddr.ip().to_string(), "127.0.0.1");
assert!(is_specified_port(sockaddr.port()));
assert_eq!(headers["x-upstream-client-addr"], "unset"); // unnamed UDS
assert_eq!(
headers["x-upstream-server-addr"],
"/tmp/pingora_nginx_test.sock"
);
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
async fn test_dropped_conn_get() {
init();
let client = reqwest::Client::new();
let port = "8001"; // special port to avoid unexpected connection reuse from other tests
for _ in 1..3 {
// load conns into pool
let res = client
.get("http://127.0.0.1:6147")
.header("x-port", port)
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
let res = client
.get("http://127.0.0.1:6147/bad_lb")
.header("x-port", port)
.send()
.await
.unwrap();
// retry gives 200
assert_eq!(res.status(), StatusCode::OK);
let body = res.text().await.unwrap();
assert_eq!(body, "dog!\n");
}
async fn test_dropped_conn_post_empty_body() {
init();
let client = reqwest::Client::new();
let port = "8001"; // special port to avoid unexpected connection reuse from other tests
for _ in 1..3 {
// load conn into pool
let res = client
.get("http://127.0.0.1:6147")
.header("x-port", port)
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
let res = client
.post("http://127.0.0.1:6147/bad_lb")
.header("x-port", port)
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = res.text().await.unwrap();
assert_eq!(body, "dog!\n");
}
async fn test_dropped_conn_post_body() {
init();
let client = reqwest::Client::new();
let port = "8001"; // special port to avoid unexpected connection reuse from other tests
for _ in 1..3 {
// load conn into pool
let res = client
.get("http://127.0.0.1:6147")
.header("x-port", port)
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
let res = client
.post("http://127.0.0.1:6147/bad_lb")
.header("x-port", port)
.body("cat!")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = res.text().await.unwrap();
assert_eq!(body, "cat!\n");
}
async fn test_dropped_conn_post_body_over() {
init();
let client = reqwest::Client::new();
let port = "8001"; // special port to avoid unexpected connection reuse from other tests
let large_body = String::from_utf8(vec![b'e'; 1024 * 64 + 1]).unwrap();
for _ in 1..3 {
// load conn into pool
let res = client
.get("http://127.0.0.1:6147")
.header("x-port", port)
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
let res = client
.post("http://127.0.0.1:6147/bad_lb")
.header("x-port", port)
.body(large_body)
.send()
.await
.unwrap();
// 502, body larger than buffer limit
assert_eq!(res.status(), StatusCode::from_u16(502).unwrap());
}
#[tokio::test]
async fn test_dropped_conn() {
// These tests can race with each other
// So force run them sequentially
test_dropped_conn_get().await;
test_dropped_conn_post_empty_body().await;
test_dropped_conn_post_body().await;
test_dropped_conn_post_body_over().await;
}
// currently not supported with Rustls implementation
#[cfg(feature = "openssl_derived")]
#[tokio::test]
async fn test_tls_no_verify() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_verify_sni_not_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("sni", "openrusty.org")
.header("verify", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
// currently not supported with Rustls implementation
#[cfg(feature = "openssl_derived")]
#[tokio::test]
async fn test_tls_none_verify_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_verify_sni_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("sni", "openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_underscore_sub_sni_verify_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("sni", "d_g.openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_underscore_non_sub_sni_verify_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("sni", "open_rusty.org")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::BAD_GATEWAY);
let headers = res.headers();
assert_eq!(headers[header::CONNECTION], "close");
}
#[cfg(feature = "openssl_derived")]
#[tokio::test]
async fn test_tls_alt_verify_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("sni", "open_rusty.org")
.header("alt", "openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "openssl_derived")]
#[tokio::test]
async fn test_tls_underscore_sub_alt_verify_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("sni", "open_rusty.org")
.header("alt", "d_g.openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_underscore_non_sub_alt_verify_host() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("sni", "open_rusty.org")
.header("alt", "open_rusty.org")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::BAD_GATEWAY);
}
#[tokio::test]
async fn test_upstream_compression() {
init();
// disable reqwest gzip support to check compression headers and body
// otherwise reqwest will decompress and strip the headers
let client = reqwest::ClientBuilder::new().gzip(false).build().unwrap();
let res = client
.get("http://127.0.0.1:6147/no_compression")
.header("accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.headers().get("Content-Encoding").unwrap(), "gzip");
let body = res.bytes().await.unwrap();
assert!(body.len() < 32);
// Next let reqwest decompress to validate the data
let client = reqwest::ClientBuilder::new().gzip(true).build().unwrap();
let res = client
.get("http://127.0.0.1:6147/no_compression")
.header("accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = res.bytes().await.unwrap();
assert_eq!(body.as_ref(), &[b'B'; 32]);
}
#[tokio::test]
async fn test_downstream_compression() {
init();
// disable reqwest gzip support to check compression headers and body
// otherwise reqwest will decompress and strip the headers
let client = reqwest::ClientBuilder::new().gzip(false).build().unwrap();
let res = client
.get("http://127.0.0.1:6147/no_compression")
// tell the test proxy to use downstream compression module instead of upstream
.header("x-downstream-compression", "1")
.header("accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.headers().get("Content-Encoding").unwrap(), "gzip");
let body = res.bytes().await.unwrap();
assert!(body.len() < 32);
// Next let reqwest decompress to validate the data
let client = reqwest::ClientBuilder::new().gzip(true).build().unwrap();
let res = client
.get("http://127.0.0.1:6147/no_compression")
.header("accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = res.bytes().await.unwrap();
assert_eq!(body.as_ref(), &[b'B'; 32]);
}
#[tokio::test]
async fn test_connect_close() {
init();
// default keep-alive
let client = reqwest::ClientBuilder::new().build().unwrap();
let res = client.get("http://127.0.0.1:6147").send().await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
assert_eq!(headers[header::CONNECTION], "keep-alive");
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
// close
let client = reqwest::ClientBuilder::new().build().unwrap();
let res = client
.get("http://127.0.0.1:6147")
.header("connection", "close")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "13");
assert_eq!(headers[header::CONNECTION], "close");
let body = res.text().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_mtls_no_client_cert() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("x-port", "8444")
.header("sni", "openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
// 400: because no cert
assert_eq!(res.status(), StatusCode::BAD_REQUEST);
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_mtls_no_intermediate_cert() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/tls_verify")
.header("x-port", "8444")
.header("sni", "openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.header("client_cert", "1")
.send()
.await
.unwrap();
// 400: because no intermediate cert
assert_eq!(res.status(), StatusCode::BAD_REQUEST);
}
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_mtls() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/")
.header("x-port", "8444")
.header("sni", "openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.header("client_cert", "1")
.header("client_intermediate", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "any_tls")]
async fn assert_reuse(req: reqwest::RequestBuilder) {
req.try_clone().unwrap().send().await.unwrap();
let res = req.send().await.unwrap();
let headers = res.headers();
assert!(headers.get("x-conn-reuse").is_some());
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_mtls_diff_cert_no_reuse() {
init();
let client = reqwest::Client::new();
let req = client
.get("http://127.0.0.1:6149/")
.header("x-port", "8444")
.header("sni", "openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.header("client_cert", "1")
.header("client_intermediate", "1");
// pre check re-use
assert_reuse(req).await;
// different cert no re-use
let res = client
.get("http://127.0.0.1:6149/")
.header("x-port", "8444")
.header("sni", "openrusty.org")
.header("verify", "1")
.header("verify_host", "1")
.header("client_cert", "2")
.header("client_intermediate", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert!(headers.get("x-conn-reuse").is_none());
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_diff_verify_no_reuse() {
init();
let client = reqwest::Client::new();
let req = client
.get("http://127.0.0.1:6149/")
.header("sni", "dog.openrusty.org")
.header("verify", "1");
// pre check re-use
assert_reuse(req).await;
// disable 'verify' no re-use
let res = client
.get("http://127.0.0.1:6149/")
.header("sni", "dog.openrusty.org")
.header("verify", "0")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert!(headers.get("x-conn-reuse").is_none());
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_diff_verify_host_no_reuse() {
init();
let client = reqwest::Client::new();
let req = client
.get("http://127.0.0.1:6149/")
.header("sni", "cat.openrusty.org")
.header("verify", "1")
.header("verify_host", "1");
// pre check re-use
assert_reuse(req).await;
// disable 'verify_host' no re-use
let res = client
.get("http://127.0.0.1:6149/")
.header("sni", "cat.openrusty.org")
.header("verify", "1")
.header("verify_host", "0")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert!(headers.get("x-conn-reuse").is_none());
}
#[cfg(feature = "any_tls")]
#[tokio::test]
async fn test_tls_diff_alt_cnt_no_reuse() {
init();
let client = reqwest::Client::new();
let req = client
.get("http://127.0.0.1:6149/")
.header("sni", "openrusty.org")
.header("alt", "cat.com")
.header("verify", "1")
.header("verify_host", "1");
// pre check re-use
assert_reuse(req).await;
// use alt-cn no reuse
let res = client
.get("http://127.0.0.1:6149/")
.header("sni", "openrusty.org")
.header("alt", "dog.com")
.header("verify", "1")
.header("verify_host", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert!(headers.get("x-conn-reuse").is_none());
}
#[cfg(feature = "s2n")]
#[tokio::test]
async fn test_tls_psk() {
use crate::utils::server_utils::TEST_PSK_IDENTITY;
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/")
.header("sni", "openrusty.org")
.header("psk_identity", TEST_PSK_IDENTITY)
.header("x-port", "6151")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[cfg(feature = "s2n")]
#[tokio::test]
async fn test_tls_psk_invalid() {
init();
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6149/")
.header("sni", "openrusty.org")
.header("psk_identity", "BAD_IDENTITY")
.header("x-port", "6151")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::BAD_GATEWAY);
}
#[tokio::test]
async fn test_error_before_headers_sent() {
init();
let url = "http://127.0.0.1:6146/sleep/test_error_before_headers_sent.txt";
let tcp = TcpStream::connect("127.0.0.1:6146").await.unwrap();
let (mut client, h2) = client::handshake(tcp).await.unwrap();
tokio::spawn(async move {
h2.await.unwrap();
});
let request = Request::builder()
.uri(url)
.header("x-set-sleep", "0")
.header("x-abort", "true")
.body(())
.unwrap();
let (response, mut _stream) = client.send_request(request, true).unwrap();
let response = response.await.unwrap();
let mut body = response.into_body();
while let Some(chunk) = body.data().await {
assert_eq!(chunk.unwrap(), Bytes::new());
}
}
#[tokio::test]
async fn test_error_after_headers_sent_rst_received() {
init();
let url = "http://127.0.0.1:6146/connection_die/test_error_after_headers_sent_rst_received.txt";
let tcp = TcpStream::connect("127.0.0.1:6146").await.unwrap();
let (mut client, h2) = client::handshake(tcp).await.unwrap();
tokio::spawn(async move {
h2.await.unwrap();
});
let request = Request::builder().uri(url).body(()).unwrap();
let (response, mut _stream) = client.send_request(request, true).unwrap();
let response = response.await.unwrap();
let mut body = response.into_body();
let chunk = body.data().await.unwrap();
assert_eq!(chunk.unwrap(), Bytes::from_static(b"AAAAA"));
let err = body.data().await.unwrap().err().unwrap();
assert_eq!(err.reason().unwrap(), h2::Reason::CANCEL);
}
#[tokio::test]
async fn test_103() {
init();
let res = reqwest::get("http://127.0.0.1:6147/103").await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers[header::CONTENT_LENGTH], "6");
let body = res.text().await.unwrap();
assert_eq!(body, "123456");
}
#[tokio::test]
async fn test_103_die() {
init();
let res = reqwest::get("http://127.0.0.1:6147/103-die").await.unwrap();
assert_eq!(res.status(), StatusCode::BAD_GATEWAY);
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/tests/test_upstream.rs | pingora-proxy/tests/test_upstream.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod utils;
use utils::server_utils::init;
use utils::websocket::WS_ECHO;
use futures::{SinkExt, StreamExt};
use reqwest::header::{HeaderName, HeaderValue};
use reqwest::{StatusCode, Version};
use std::time::Duration;
use tokio_tungstenite::tungstenite::{client::IntoClientRequest, Message};
#[tokio::test]
async fn test_ip_binding() {
init();
let res = reqwest::get("http://127.0.0.1:6147/client_ip")
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-client-ip"], "127.0.0.2");
}
#[tokio::test]
async fn test_duplex() {
init();
// NOTE: this doesn't really verify that we are in full duplex mode as reqwest
// won't allow us control when req body is sent
let client = reqwest::Client::new();
let res = client
.post("http://127.0.0.1:6147/duplex/")
.body("b".repeat(1024 * 1024)) // 1 MB upload
.timeout(Duration::from_secs(5))
.send()
.await
.unwrap();
let headers = res.headers();
assert_eq!(headers["Connection"], "keep-alive");
assert_eq!(res.status(), StatusCode::OK);
let body = res.text().await.unwrap();
assert_eq!(body.len(), 64 * 5);
}
#[tokio::test]
async fn test_connection_die() {
init();
let res = reqwest::get("http://127.0.0.1:6147/connection_die")
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = res.text().await;
// reqwest doesn't allow us to inspect the partial body
assert!(body.is_err());
}
#[tokio::test]
async fn test_upload_connection_die() {
init();
let client = reqwest::Client::new();
let res = client
.post("http://127.0.0.1:6147/upload_connection_die/")
.body("b".repeat(15 * 1024 * 1024)) // 15 MB upload
.timeout(Duration::from_secs(5))
.send()
.await
.unwrap();
// should get 200 status before connection dies
assert_eq!(res.status(), StatusCode::OK);
let _ = res.text().await;
// try h2
let client = reqwest::Client::new();
let res = client
.post("http://127.0.0.1:6147/upload_connection_die/")
.body("b".repeat(15 * 1024 * 1024)) // 15 MB upload
.timeout(Duration::from_secs(5))
.header("x-h2", "true")
.send()
.await
.unwrap();
// should get 200 status before connection dies
assert_eq!(res.status(), StatusCode::OK);
let _ = res.text().await;
}
#[tokio::test]
async fn test_upload() {
init();
let client = reqwest::Client::new();
let res = client
.post("http://127.0.0.1:6147/upload/")
.body("b".repeat(15 * 1024 * 1024)) // 15 MB upload
.timeout(Duration::from_secs(5))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = res.text().await.unwrap();
assert_eq!(body.len(), 64 * 5);
}
#[tokio::test]
async fn test_close_on_response_before_downstream_finish() {
init();
let client = reqwest::Client::new();
let res = client
.post("http://127.0.0.1:6147/test2")
.header("x-close-on-response-before-downstream-finish", "1")
.body("b".repeat(15 * 1024 * 1024)) // 15 MB upload
.timeout(Duration::from_secs(5))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["Connection"], "close");
let body = res.text().await.unwrap();
assert_eq!(body.len(), 11);
}
#[tokio::test]
async fn test_ws_server_ends_conn() {
init();
let _ = *WS_ECHO;
// server gracefully closes connection
let mut req = "ws://127.0.0.1:6147".into_client_request().unwrap();
req.headers_mut()
.insert("x-port", HeaderValue::from_static("9283"));
let (mut ws_stream, _) = tokio_tungstenite::connect_async(req).await.unwrap();
// gracefully close connection
ws_stream.send("test".into()).await.unwrap();
ws_stream.next().await.unwrap().unwrap();
ws_stream.send("graceful".into()).await.unwrap();
let msg = ws_stream.next().await.unwrap().unwrap();
// assert graceful close
assert!(matches!(msg, Message::Close(None)));
// test may hang here if downstream doesn't close when upstream does
assert!(ws_stream.next().await.is_none());
// server abruptly closes connection
let mut req = "ws://127.0.0.1:6147".into_client_request().unwrap();
req.headers_mut()
.insert("x-port", HeaderValue::from_static("9283"));
let (mut ws_stream, _) = tokio_tungstenite::connect_async(req).await.unwrap();
// abrupt close connection
ws_stream.send("close".into()).await.unwrap();
// test will hang here if downstream doesn't close when upstream does
assert!(ws_stream.next().await.unwrap().is_err());
// client gracefully closes connection
let mut req = "ws://127.0.0.1:6147".into_client_request().unwrap();
req.headers_mut()
.insert("x-port", HeaderValue::from_static("9283"));
let (mut ws_stream, _) = tokio_tungstenite::connect_async(req).await.unwrap();
ws_stream.send("test".into()).await.unwrap();
// sender initiates close
ws_stream.close(None).await.unwrap();
let msg = ws_stream.next().await.unwrap().unwrap();
// assert echo
assert_eq!("test", msg.into_text().unwrap());
let msg = ws_stream.next().await.unwrap().unwrap();
// assert graceful close
assert!(matches!(msg, Message::Close(None)));
assert!(ws_stream.next().await.is_none());
}
#[tokio::test]
async fn test_download_timeout() {
init();
use hyper::body::HttpBody;
use tokio::time::sleep;
let client = hyper::Client::new();
let uri: hyper::Uri = "http://127.0.0.1:6147/download/".parse().unwrap();
let req = hyper::Request::builder()
.uri(uri)
.header("x-write-timeout", "1")
.body(hyper::Body::empty())
.unwrap();
let mut res = client.request(req).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let mut err = false;
sleep(Duration::from_secs(2)).await;
while let Some(chunk) = res.body_mut().data().await {
if chunk.is_err() {
err = true;
}
}
assert!(err);
}
#[tokio::test]
async fn test_download_timeout_min_rate() {
init();
use hyper::body::HttpBody;
use tokio::time::sleep;
let client = hyper::Client::new();
let uri: hyper::Uri = "http://127.0.0.1:6147/download/".parse().unwrap();
let req = hyper::Request::builder()
.uri(uri)
.header("x-write-timeout", "1")
.header("x-min-rate", "10000")
.body(hyper::Body::empty())
.unwrap();
let mut res = client.request(req).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let mut err = false;
sleep(Duration::from_secs(2)).await;
while let Some(chunk) = res.body_mut().data().await {
if chunk.is_err() {
err = true;
}
}
// no error as write timeout is overridden by min rate
assert!(!err);
}
mod test_cache {
use super::*;
use std::str::FromStr;
use tokio::time::sleep;
#[tokio::test]
async fn test_basic_caching() {
init();
let url = "http://127.0.0.1:6148/unique/test_basic_caching/now";
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "hit");
assert_eq!(res.text().await.unwrap(), "hello world");
assert_eq!(cache_miss_epoch, cache_hit_epoch);
sleep(Duration::from_millis(1100)).await; // ttl is 1
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_expired_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "expired");
assert_eq!(res.text().await.unwrap(), "hello world");
assert!(cache_expired_epoch > cache_hit_epoch);
}
#[tokio::test]
async fn test_purge() {
init();
let res = reqwest::get("http://127.0.0.1:6148/unique/test_purge/test2")
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::get("http://127.0.0.1:6148/unique/test_purge/test2")
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "hit");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::Client::builder()
.build()
.unwrap()
.request(
reqwest::Method::from_bytes(b"PURGE").unwrap(),
"http://127.0.0.1:6148/unique/test_purge/test2",
)
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "");
let res = reqwest::Client::builder()
.build()
.unwrap()
.request(
reqwest::Method::from_bytes(b"PURGE").unwrap(),
"http://127.0.0.1:6148/unique/test_purge/test2",
)
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_FOUND);
assert_eq!(res.text().await.unwrap(), "");
let res = reqwest::get("http://127.0.0.1:6148/unique/test_purge/test2")
.await
.unwrap();
let headers = res.headers();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), "hello world");
}
#[tokio::test]
async fn test_cache_miss_convert() {
init();
// test if-* header is stripped
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6148/unique/test_cache_miss_convert/no_if_headers")
.header("if-modified-since", "Wed, 19 Jan 2022 18:39:12 GMT")
.send()
.await
.unwrap();
// 200 because last-modified not returned from upstream
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), "no if headers detected\n");
// test range header is stripped
let client = reqwest::Client::new();
let res = client
.get("http://127.0.0.1:6148/unique/test_cache_miss_convert2/no_if_headers")
.header("Range", "bytes=0-1")
.send()
.await
.unwrap();
// we have not implemented downstream range yet, it should be 206 once we have it
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), "no if headers detected\n");
}
#[tokio::test]
async fn test_cache_http10() {
// allow caching http1.0 from origin, but proxy as h1.1 downstream
init();
let url = "http://127.0.0.1:6148/unique/test_cache_http10/now";
let res = reqwest::Client::new()
.get(url)
.header("x-upstream-fake-http10", "1") // fake http1.0 in upstream response filter
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.version(), Version::HTTP_11);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["transfer-encoding"], "chunked");
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::Client::new()
.get(url)
.header("x-upstream-fake-http10", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.version(), Version::HTTP_11);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["transfer-encoding"], "chunked");
assert_eq!(headers["x-cache-status"], "hit");
assert_eq!(res.text().await.unwrap(), "hello world");
assert_eq!(cache_miss_epoch, cache_hit_epoch);
sleep(Duration::from_millis(1100)).await; // ttl is 1
let res = reqwest::Client::new()
.get(url)
.header("x-upstream-fake-http10", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.version(), Version::HTTP_11);
let headers = res.headers();
let cache_expired_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["transfer-encoding"], "chunked");
assert_eq!(headers["x-cache-status"], "expired");
assert_eq!(res.text().await.unwrap(), "hello world");
assert!(cache_expired_epoch > cache_hit_epoch);
}
#[tokio::test]
async fn test_cache_downstream_compression() {
init();
// disable reqwest gzip support to check compression headers and body
// otherwise reqwest will decompress and strip the headers
let client = reqwest::ClientBuilder::new().gzip(false).build().unwrap();
let res = client
.get("http://127.0.0.1:6148/unique/test_cache_downstream_compression/no_compression")
.header("x-downstream-compression", "1")
.header("accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["Content-Encoding"], "gzip");
assert_eq!(headers["x-cache-status"], "miss");
let body = res.bytes().await.unwrap();
assert!(body.len() < 32);
// should also apply on hit
let client = reqwest::ClientBuilder::new().gzip(false).build().unwrap();
let res = client
.get("http://127.0.0.1:6148/unique/test_cache_downstream_compression/no_compression")
.header("x-downstream-compression", "1")
.header("accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["Content-Encoding"], "gzip");
assert_eq!(headers["x-cache-status"], "hit");
let body = res.bytes().await.unwrap();
assert!(body.len() < 32);
}
#[tokio::test]
async fn test_cache_downstream_decompression() {
init();
// disable reqwest gzip support to check compression headers and body
// otherwise reqwest will decompress and strip the headers
let client = reqwest::ClientBuilder::new().gzip(false).build().unwrap();
let res = client
.get("http://127.0.0.1:6148/unique/test_cache_downstream_decompression/gzip/index.html")
.header("x-downstream-decompression", "1")
.header("x-upstream-accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
// upstream should have received gzip, should decompress for downstream
assert_eq!(headers["received-accept-encoding"], "gzip");
assert!(headers.get("Content-Encoding").is_none());
assert_eq!(headers["x-cache-status"], "miss");
let body = res.bytes().await.unwrap();
assert_eq!(body, "Hello World!\n");
// should also apply on hit
let client = reqwest::ClientBuilder::new().gzip(false).build().unwrap();
let res = client
.get("http://127.0.0.1:6148/unique/test_cache_downstream_decompression/gzip/index.html")
.header("x-downstream-decompression", "1")
.header("x-upstream-accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert!(headers.get("Content-Encoding").is_none());
assert_eq!(headers["x-cache-status"], "hit");
let body = res.bytes().await.unwrap();
assert_eq!(body, "Hello World!\n");
sleep(Duration::from_millis(1100)).await; // ttl is 1
// should also apply on revalidated
let client = reqwest::ClientBuilder::new().gzip(false).build().unwrap();
let res = client
.get("http://127.0.0.1:6148/unique/test_cache_downstream_decompression/gzip/index.html")
.header("x-downstream-decompression", "1")
.header("x-upstream-accept-encoding", "gzip")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert!(headers.get("Content-Encoding").is_none());
assert_eq!(headers["x-cache-status"], "revalidated");
let body = res.bytes().await.unwrap();
assert_eq!(body, "Hello World!\n");
}
#[tokio::test]
async fn test_network_error_mid_response() {
init();
let url = "http://127.0.0.1:6148/sleep/test_network_error_mid_response.txt";
let res = reqwest::Client::new()
.get(url)
.header("x-set-sleep", "0") // no need to sleep
.header("x-set-body-sleep", "0.1") // pause the body a bit before abort
.header("x-abort-body", "true") // this will tell origin to kill the conn right away
.send()
.await
.unwrap();
assert_eq!(res.status(), 200);
// sleep just a little to make sure the req above gets the cache lock
sleep(Duration::from_millis(50)).await;
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
// the connection dies
assert!(res.text().await.is_err());
let res = reqwest::Client::new()
.get(url)
.header("x-set-sleep", "0") // no need to sleep
.header("x-set-body-sleep", "0.1") // pause the body a bit before abort
.header("x-abort-body", "true") // this will tell origin to kill the conn right away
.send()
.await
.unwrap();
assert_eq!(res.status(), 200);
// sleep just a little to make sure the req above gets the cache lock
sleep(Duration::from_millis(50)).await;
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
// the connection dies
assert!(res.text().await.is_err());
}
#[tokio::test]
async fn test_cache_upstream_revalidation() {
init();
let url = "http://127.0.0.1:6148/unique/test_upstream_revalidation/revalidate_now";
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(headers["x-upstream-status"], "200");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "hit");
assert!(headers.get("x-upstream-status").is_none());
assert_eq!(res.text().await.unwrap(), "hello world");
assert_eq!(cache_miss_epoch, cache_hit_epoch);
sleep(Duration::from_millis(1100)).await; // ttl is 1
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_expired_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "revalidated");
assert_eq!(headers["x-upstream-status"], "304");
assert_eq!(res.text().await.unwrap(), "hello world");
// still the old object
assert_eq!(cache_expired_epoch, cache_hit_epoch);
}
#[tokio::test]
async fn test_cache_upstream_revalidation_appends_headers() {
init();
let url = "http://127.0.0.1:6148/unique/test_cache_upstream_revalidation_appends_headers/cache_control";
let res = reqwest::Client::new()
.get(url)
.header("set-cache-control", "public, max-age=1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(headers["x-upstream-status"], "200");
assert_eq!(headers["cache-control"], "public, max-age=1");
assert_eq!(headers.get_all("cache-control").into_iter().count(), 1);
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::Client::new()
.get(url)
.header("set-cache-control", "public, max-age=1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "hit");
assert!(headers.get("x-upstream-status").is_none());
assert_eq!(headers.get_all("cache-control").into_iter().count(), 1);
assert_eq!(res.text().await.unwrap(), "hello world");
sleep(Duration::from_millis(1100)).await; // ttl is 1
let res = reqwest::Client::new()
.get(url)
.header("set-cache-control", "public, max-age=1")
.header("set-cache-control", "stale-while-revalidate=86400")
.header("set-revalidated", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "revalidated");
assert_eq!(headers["x-upstream-status"], "304");
let mut cc = headers.get_all("cache-control").into_iter();
assert_eq!(cc.next().unwrap(), "public, max-age=1");
assert_eq!(cc.next().unwrap(), "stale-while-revalidate=86400");
assert!(cc.next().is_none());
assert_eq!(res.text().await.unwrap(), "hello world");
}
#[tokio::test]
async fn test_force_miss() {
init();
let url = "http://127.0.0.1:6148/unique/test_froce_miss/revalidate_now";
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(headers["x-upstream-status"], "200");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "hit");
assert!(headers.get("x-upstream-status").is_none());
assert_eq!(res.text().await.unwrap(), "hello world");
assert_eq!(cache_miss_epoch, cache_hit_epoch);
let res = reqwest::Client::new()
.get(url)
.header("x-force-miss", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(headers["x-upstream-status"], "200");
assert_eq!(res.text().await.unwrap(), "hello world");
}
#[tokio::test]
async fn test_force_miss_stale() {
init();
let url = "http://127.0.0.1:6148/unique/test_froce_miss_stale/revalidate_now";
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(headers["x-upstream-status"], "200");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "hit");
assert!(headers.get("x-upstream-status").is_none());
assert_eq!(res.text().await.unwrap(), "hello world");
assert_eq!(cache_miss_epoch, cache_hit_epoch);
sleep(Duration::from_millis(1100)).await; // ttl is 1
// stale, but can be forced miss
let res = reqwest::Client::new()
.get(url)
.header("x-force-miss", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(headers["x-upstream-status"], "200");
let cache_miss_epoch2 = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert!(cache_miss_epoch != cache_miss_epoch2);
assert_eq!(res.text().await.unwrap(), "hello world");
}
#[tokio::test]
async fn test_force_fresh() {
init();
let url = "http://127.0.0.1:6148/unique/test_force_fresh/revalidate_now";
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(headers["x-upstream-status"], "200");
assert_eq!(res.text().await.unwrap(), "hello world");
let res = reqwest::get(url).await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "hit");
assert!(headers.get("x-upstream-status").is_none());
assert_eq!(res.text().await.unwrap(), "hello world");
assert_eq!(cache_miss_epoch, cache_hit_epoch);
sleep(Duration::from_millis(1100)).await; // ttl is 1
// stale, but can be forced fresh
let res = reqwest::Client::new()
.get(url)
.header("x-force-fresh", "1")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(headers["x-cache-status"], "hit");
assert!(!headers.contains_key("x-upstream-status"));
let cache_miss_epoch2 = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(cache_miss_epoch, cache_miss_epoch2);
assert_eq!(res.text().await.unwrap(), "hello world");
}
#[tokio::test]
async fn test_cache_downstream_revalidation_etag() {
init();
let url = "http://127.0.0.1:6148/unique/test_downstream_revalidation_etag/revalidate_now";
let client = reqwest::Client::new();
// MISS + 304
let res = client
.get(url)
.header("If-None-Match", "\"abcd\", \"foobar\"") // "abcd" is the fixed etag of this
// endpoint
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_MODIFIED);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), ""); // 304 no body
// HIT + 304
let res = client
.get(url)
.header("If-None-Match", "\"abcd\", \"foobar\"")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_MODIFIED);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "hit");
assert_eq!(res.text().await.unwrap(), ""); // 304 no body
assert_eq!(cache_miss_epoch, cache_hit_epoch);
// HIT + 200 (condition passed)
let res = client
.get(url)
.header("If-None-Match", "\"foobar\"")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
let cache_hit_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "hit");
assert_eq!(res.text().await.unwrap(), "hello world");
assert_eq!(cache_miss_epoch, cache_hit_epoch);
sleep(Duration::from_millis(1100)).await; // ttl is 1
// revalidated + 304
let res = client
.get(url)
.header("If-None-Match", "\"abcd\", \"foobar\"")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_MODIFIED);
let headers = res.headers();
let cache_expired_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "revalidated");
assert_eq!(res.text().await.unwrap(), ""); // 304 no body
// still the old object
assert_eq!(cache_expired_epoch, cache_hit_epoch);
}
#[tokio::test]
async fn test_cache_downstream_revalidation_last_modified() {
init();
let url = "http://127.0.0.1:6148/unique/test_downstream_revalidation_last_modified/revalidate_now";
let client = reqwest::Client::new();
// MISS + 304
let res = client
.get(url)
.header("If-Modified-Since", "Tue, 03 May 2022 01:04:39 GMT") // fixed last-modified of
// the endpoint
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_MODIFIED);
let headers = res.headers();
let cache_miss_epoch = headers["x-epoch"].to_str().unwrap().parse::<f64>().unwrap();
assert_eq!(headers["x-cache-status"], "miss");
assert_eq!(res.text().await.unwrap(), ""); // 304 no body
// HIT + 304
let res = client
.get(url)
.header("If-Modified-Since", "Tue, 03 May 2022 01:11:39 GMT")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_MODIFIED);
let headers = res.headers();
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | true |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/tests/utils/websocket.rs | pingora-proxy/tests/utils/websocket.rs | use std::{io::Error, thread, time::Duration};
use futures_util::{SinkExt, StreamExt};
use log::debug;
use once_cell::sync::Lazy;
use tokio::{
net::{TcpListener, TcpStream},
runtime::Builder,
};
pub static WS_ECHO: Lazy<bool> = Lazy::new(init);
fn init() -> bool {
thread::spawn(move || {
let runtime = Builder::new_current_thread()
.thread_name("websocket echo")
.enable_all()
.build()
.unwrap();
runtime.block_on(async move {
server("127.0.0.1:9283").await.unwrap();
})
});
thread::sleep(Duration::from_millis(200));
true
}
async fn server(addr: &str) -> Result<(), Error> {
let listener = TcpListener::bind(&addr).await.unwrap();
while let Ok((stream, _)) = listener.accept().await {
tokio::spawn(handle_connection(stream));
}
Ok(())
}
async fn handle_connection(stream: TcpStream) {
let mut ws_stream = tokio_tungstenite::accept_async(stream).await.unwrap();
while let Some(msg) = ws_stream.next().await {
let msg = msg.unwrap();
let echo = msg.clone();
if msg.is_text() {
let data = msg.into_text().unwrap();
if data.contains("close") {
// abruptly close the stream without WS close;
debug!("abrupt close");
return;
} else if data.contains("graceful") {
debug!("graceful close");
ws_stream.close(None).await.unwrap();
// close() only sends frame
return;
} else {
ws_stream.send(echo).await.unwrap();
}
}
}
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/tests/utils/server_utils.rs | pingora-proxy/tests/utils/server_utils.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "any_tls")]
use super::cert;
use async_trait::async_trait;
use clap::Parser;
use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH, TRANSFER_ENCODING, VARY};
use http::HeaderValue;
use log::error;
use once_cell::sync::Lazy;
use pingora_cache::cache_control::CacheControl;
use pingora_cache::hashtable::ConcurrentHashTable;
use pingora_cache::key::HashBinary;
use pingora_cache::lock::CacheKeyLockImpl;
use pingora_cache::{
eviction::simple_lru::Manager, filters::resp_cacheable, lock::CacheLock, predictor::Predictor,
set_compression_dict_path, CacheMeta, CacheMetaDefaults, CachePhase, MemCache, NoCacheReason,
RespCacheable,
};
use pingora_cache::{
CacheOptionOverrides, ForcedFreshness, HitHandler, PurgeType, VarianceBuilder,
};
use pingora_core::apps::{HttpServerApp, HttpServerOptions};
use pingora_core::modules::http::compression::ResponseCompression;
use pingora_core::protocols::{
http::error_resp::gen_error_response, l4::socket::SocketAddr, Digest,
};
use pingora_core::server::configuration::Opt;
use pingora_core::services::Service;
use pingora_core::upstreams::peer::HttpPeer;
use pingora_core::utils::tls::CertKey;
use pingora_error::{Error, ErrorSource, ErrorType::*, Result};
use pingora_http::{RequestHeader, ResponseHeader};
use pingora_proxy::{FailToProxy, ProxyHttp, Session};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
pub struct ExampleProxyHttps {}
pub const TEST_PSK_IDENTITY: &str = "test-psk-identity";
pub const TEST_PSK_SECRET: &str = "i2Wx8jrYVi5Vt7HSL/fsk003+PnmfcFuwWMsUyQvcZ4=";
#[allow(clippy::upper_case_acronyms)]
#[derive(Default)]
pub struct CTX {
conn_reused: bool,
upstream_client_addr: Option<SocketAddr>,
upstream_server_addr: Option<SocketAddr>,
}
// Common logic for both ProxyHttp(s) types
fn connected_to_upstream_common(
reused: bool,
digest: Option<&Digest>,
ctx: &mut CTX,
) -> Result<()> {
ctx.conn_reused = reused;
let socket_digest = digest
.expect("upstream connector digest should be set for HTTP sessions")
.socket_digest
.as_ref()
.expect("socket digest should be set for HTTP sessions");
ctx.upstream_client_addr = socket_digest.local_addr().cloned();
ctx.upstream_server_addr = socket_digest.peer_addr().cloned();
Ok(())
}
fn response_filter_common(
session: &mut Session,
response: &mut ResponseHeader,
ctx: &mut CTX,
) -> Result<()> {
if ctx.conn_reused {
response.insert_header("x-conn-reuse", "1")?;
}
let client_addr = session.client_addr();
let server_addr = session.server_addr();
response.insert_header(
"x-client-addr",
client_addr.map_or_else(|| "unset".into(), |a| a.to_string()),
)?;
response.insert_header(
"x-server-addr",
server_addr.map_or_else(|| "unset".into(), |a| a.to_string()),
)?;
response.insert_header(
"x-upstream-client-addr",
ctx.upstream_client_addr
.as_ref()
.map_or_else(|| "unset".into(), |a| a.to_string()),
)?;
response.insert_header(
"x-upstream-server-addr",
ctx.upstream_server_addr
.as_ref()
.map_or_else(|| "unset".into(), |a| a.to_string()),
)?;
Ok(())
}
#[async_trait]
#[cfg(feature = "any_tls")]
impl ProxyHttp for ExampleProxyHttps {
type CTX = CTX;
fn new_ctx(&self) -> Self::CTX {
CTX::default()
}
async fn upstream_peer(
&self,
session: &mut Session,
_ctx: &mut Self::CTX,
) -> Result<Box<HttpPeer>> {
let session = session.as_downstream();
let req = session.req_header();
let port = req
.headers
.get("x-port")
.map_or("8443", |v| v.to_str().unwrap());
let sni = req.headers.get("sni").map_or("", |v| v.to_str().unwrap());
let alt = req
.headers
.get("alt")
.map(|v| v.to_str().unwrap().to_string());
let client_cert = session.get_header_bytes("client_cert");
let mut peer = Box::new(HttpPeer::new(
format!("127.0.0.1:{port}"),
true,
sni.to_string(),
));
peer.options.alternative_cn = alt;
let verify = session.get_header_bytes("verify") == b"1";
peer.options.verify_cert = verify;
let verify_host = session.get_header_bytes("verify_host") == b"1";
peer.options.verify_hostname = verify_host;
if matches!(client_cert, b"1" | b"2") {
let (mut certs, key) = if client_cert == b"1" {
(vec![cert::LEAF_CERT.clone()], cert::LEAF_KEY.clone())
} else {
(vec![cert::LEAF2_CERT.clone()], cert::LEAF2_KEY.clone())
};
if session.get_header_bytes("client_intermediate") == b"1" {
certs.push(cert::INTERMEDIATE_CERT.clone());
}
#[cfg(feature = "s2n")]
{
let combined_pem = certs.into_iter().flatten().collect();
peer.client_cert_key = Some(Arc::new(CertKey::new(combined_pem, key)));
}
#[cfg(not(feature = "s2n"))]
{
peer.client_cert_key = Some(Arc::new(CertKey::new(certs, key)));
}
}
#[cfg(feature = "s2n")]
if let Some(psk_identity) = req.headers.get("psk_identity") {
use pingora_core::{
protocols::tls::{Psk, PskConfig},
tls::PskHmac,
};
let psk = Psk::new(
psk_identity.to_str().unwrap().to_string(),
TEST_PSK_SECRET.as_bytes().to_vec(),
PskHmac::SHA256,
);
peer.options.psk = Some(Arc::new(PskConfig::new(vec![psk])));
}
if session.get_header_bytes("x-h2") == b"true" {
// default is 1, 1
peer.options.set_http_version(2, 2);
}
Ok(peer)
}
async fn response_filter(
&self,
session: &mut Session,
upstream_response: &mut ResponseHeader,
ctx: &mut Self::CTX,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
response_filter_common(session, upstream_response, ctx)
}
async fn upstream_request_filter(
&self,
session: &mut Session,
req: &mut RequestHeader,
_ctx: &mut Self::CTX,
) -> Result<()> {
let host = session.get_header_bytes("host-override");
if host != b"" {
req.insert_header("host", host)?;
}
Ok(())
}
async fn connected_to_upstream(
&self,
_http_session: &mut Session,
reused: bool,
_peer: &HttpPeer,
#[cfg(unix)] _fd: std::os::unix::io::RawFd,
#[cfg(windows)] _sock: std::os::windows::io::RawSocket,
digest: Option<&Digest>,
ctx: &mut CTX,
) -> Result<()> {
connected_to_upstream_common(reused, digest, ctx)
}
}
pub struct ExampleProxyHttp {}
#[async_trait]
impl ProxyHttp for ExampleProxyHttp {
type CTX = CTX;
fn new_ctx(&self) -> Self::CTX {
CTX::default()
}
async fn early_request_filter(
&self,
session: &mut Session,
_ctx: &mut Self::CTX,
) -> Result<()> {
let req = session.req_header();
let downstream_compression = req.headers.get("x-downstream-compression").is_some();
if downstream_compression {
session
.downstream_modules_ctx
.get_mut::<ResponseCompression>()
.unwrap()
.adjust_level(6);
} else {
// enable upstream compression for all requests by default
session.upstream_compression.adjust_level(6);
}
Ok(())
}
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
let req = session.req_header();
let write_timeout = req
.headers
.get("x-write-timeout")
.and_then(|v| v.to_str().ok().and_then(|v| v.parse().ok()));
let min_rate = req
.headers
.get("x-min-rate")
.and_then(|v| v.to_str().ok().and_then(|v| v.parse().ok()));
let close_on_response_before_downstream_finish = req
.headers
.get("x-close-on-response-before-downstream-finish")
.is_some();
let downstream_compression = req.headers.get("x-downstream-compression").is_some();
if !downstream_compression {
// enable upstream compression for all requests by default
session.upstream_compression.adjust_level(6);
// also disable downstream compression in order to test the upstream one
session
.downstream_modules_ctx
.get_mut::<ResponseCompression>()
.unwrap()
.adjust_level(0);
}
session.set_min_send_rate(min_rate);
session.set_write_timeout(write_timeout.map(Duration::from_secs));
session.set_close_on_response_before_downstream_finish(
close_on_response_before_downstream_finish,
);
Ok(false)
}
async fn response_filter(
&self,
session: &mut Session,
upstream_response: &mut ResponseHeader,
ctx: &mut Self::CTX,
) -> Result<()> {
response_filter_common(session, upstream_response, ctx)
}
async fn upstream_peer(
&self,
session: &mut Session,
_ctx: &mut Self::CTX,
) -> Result<Box<HttpPeer>> {
let req = session.req_header();
#[cfg(unix)]
if req.headers.contains_key("x-uds-peer") {
return Ok(Box::new(HttpPeer::new_uds(
"/tmp/pingora_nginx_test.sock",
false,
"".to_string(),
)?));
}
let port = req
.headers
.get("x-port")
.map_or("8000", |v| v.to_str().unwrap());
let mut peer = Box::new(HttpPeer::new(
format!("127.0.0.1:{port}"),
false,
"".to_string(),
));
if session.get_header_bytes("x-h2") == b"true" {
// default is 1, 1
peer.options.set_http_version(2, 2);
}
Ok(peer)
}
async fn connected_to_upstream(
&self,
_http_session: &mut Session,
reused: bool,
_peer: &HttpPeer,
#[cfg(unix)] _fd: std::os::unix::io::RawFd,
#[cfg(windows)] _sock: std::os::windows::io::RawSocket,
digest: Option<&Digest>,
ctx: &mut CTX,
) -> Result<()> {
connected_to_upstream_common(reused, digest, ctx)
}
}
static CACHE_BACKEND: Lazy<MemCache> = Lazy::new(MemCache::new);
const CACHE_DEFAULT: CacheMetaDefaults =
CacheMetaDefaults::new(|_| Some(Duration::from_secs(1)), 1, 1);
static CACHE_PREDICTOR: Lazy<Predictor<32>> = Lazy::new(|| Predictor::new(5, None));
static EVICTION_MANAGER: Lazy<Manager> = Lazy::new(|| Manager::new(8192)); // 8192 bytes
static CACHE_LOCK: Lazy<Box<CacheKeyLockImpl>> =
Lazy::new(|| CacheLock::new_boxed(std::time::Duration::from_secs(2)));
// Example of how one might restrict which fields can be varied on.
static CACHE_VARY_ALLOWED_HEADERS: Lazy<Option<HashSet<&str>>> =
Lazy::new(|| Some(vec!["accept", "accept-encoding"].into_iter().collect()));
// #[allow(clippy::upper_case_acronyms)]
pub struct CacheCTX {
upstream_status: Option<u16>,
}
pub struct ExampleProxyCache {}
#[async_trait]
impl ProxyHttp for ExampleProxyCache {
type CTX = CacheCTX;
fn new_ctx(&self) -> Self::CTX {
CacheCTX {
upstream_status: None,
}
}
async fn early_request_filter(
&self,
session: &mut Session,
_ctx: &mut Self::CTX,
) -> Result<()> {
if session
.req_header()
.headers
.get("x-downstream-compression")
.is_some()
{
session
.downstream_modules_ctx
.get_mut::<ResponseCompression>()
.unwrap()
.adjust_level(6);
}
if session
.req_header()
.headers
.get("x-downstream-decompression")
.is_some()
{
session
.downstream_modules_ctx
.get_mut::<ResponseCompression>()
.unwrap()
.adjust_decompression(true);
}
Ok(())
}
async fn upstream_peer(
&self,
session: &mut Session,
_ctx: &mut Self::CTX,
) -> Result<Box<HttpPeer>> {
let req = session.req_header();
let port = req
.headers
.get("x-port")
.map_or("8000", |v| v.to_str().unwrap());
let mut peer = Box::new(HttpPeer::new(
format!("127.0.0.1:{}", port),
false,
"".to_string(),
));
if session.get_header_bytes("x-h2") == b"true" {
// default is 1, 1
peer.options.set_http_version(2, 2);
}
Ok(peer)
}
fn request_cache_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<()> {
// TODO: only allow GET & HEAD
if session.get_header_bytes("x-bypass-cache") != b"" {
return Ok(());
}
// turn on eviction only for some requests to avoid interference across tests
let eviction = session.req_header().headers.get("x-eviction").map(|_| {
&*EVICTION_MANAGER as &'static (dyn pingora_cache::eviction::EvictionManager + Sync)
});
let lock = session
.req_header()
.headers
.get("x-lock")
.map(|_| CACHE_LOCK.as_ref());
let mut overrides = CacheOptionOverrides::default();
overrides.wait_timeout = Some(Duration::from_secs(2));
session.cache.enable(
&*CACHE_BACKEND,
eviction,
Some(&*CACHE_PREDICTOR),
lock,
Some(overrides),
);
if let Some(max_file_size_hdr) = session
.req_header()
.headers
.get("x-cache-max-file-size-bytes")
{
let bytes = max_file_size_hdr
.to_str()
.unwrap()
.parse::<usize>()
.unwrap();
session.cache.set_max_file_size_bytes(bytes);
}
Ok(())
}
async fn cache_hit_filter(
&self,
session: &mut Session,
_meta: &CacheMeta,
_hit_handler: &mut HitHandler,
is_fresh: bool,
_ctx: &mut Self::CTX,
) -> Result<Option<ForcedFreshness>> {
// allow test header to control force expiry/miss
if session.get_header_bytes("x-force-miss") != b"" {
return Ok(Some(ForcedFreshness::ForceMiss));
}
if !is_fresh {
if session.get_header_bytes("x-force-fresh") != b"" {
return Ok(Some(ForcedFreshness::ForceFresh));
}
// already expired
return Ok(None);
}
if session.get_header_bytes("x-force-expire") != b"" {
return Ok(Some(ForcedFreshness::ForceExpired));
}
Ok(None)
}
fn cache_vary_filter(
&self,
meta: &CacheMeta,
_ctx: &mut Self::CTX,
req: &RequestHeader,
) -> Option<HashBinary> {
let mut key = VarianceBuilder::new();
// Vary per header from origin. Target headers are de-duplicated by key logic.
let vary_headers_lowercased: Vec<String> = meta
.headers()
.get_all(VARY)
.iter()
// Filter out any unparseable vary headers.
.flat_map(|vary_header| vary_header.to_str().ok())
.flat_map(|vary_header| vary_header.split(','))
.map(|s| s.trim().to_lowercase())
.filter(|header_name| {
// Filter only for allowed headers, if restricted.
CACHE_VARY_ALLOWED_HEADERS
.as_ref()
.map(|al| al.contains(header_name.as_str()))
.unwrap_or(true)
})
.collect();
vary_headers_lowercased.iter().for_each(|header_name| {
// Add this header and value to be considered in the variance key.
key.add_value(
header_name,
req.headers
.get(header_name)
.map(|v| v.as_bytes())
.unwrap_or(&[]),
);
});
key.finalize()
}
async fn upstream_request_filter(
&self,
session: &mut Session,
upstream_request: &mut RequestHeader,
_ctx: &mut Self::CTX,
) -> Result<()> {
if let Some(up_accept_encoding) = session
.req_header()
.headers
.get("x-upstream-accept-encoding")
{
upstream_request.insert_header(&ACCEPT_ENCODING, up_accept_encoding)?;
}
Ok(())
}
fn response_cache_filter(
&self,
_session: &Session,
resp: &ResponseHeader,
_ctx: &mut Self::CTX,
) -> Result<RespCacheable> {
let cc = CacheControl::from_resp_headers(resp);
Ok(resp_cacheable(
cc.as_ref(),
resp.clone(),
false,
&CACHE_DEFAULT,
))
}
async fn upstream_response_filter(
&self,
session: &mut Session,
upstream_response: &mut ResponseHeader,
ctx: &mut Self::CTX,
) -> Result<()> {
ctx.upstream_status = Some(upstream_response.status.into());
if session
.req_header()
.headers
.contains_key("x-upstream-fake-http10")
{
// TODO to simulate an actual http1.0 origin
upstream_response.set_version(http::Version::HTTP_10);
upstream_response.remove_header(&CONTENT_LENGTH);
upstream_response.remove_header(&TRANSFER_ENCODING);
}
Ok(())
}
async fn response_filter(
&self,
session: &mut Session,
upstream_response: &mut ResponseHeader,
ctx: &mut Self::CTX,
) -> Result<()>
where
Self::CTX: Send + Sync,
{
if session.cache.enabled() {
match session.cache.phase() {
CachePhase::Hit => upstream_response.insert_header("x-cache-status", "hit")?,
CachePhase::Miss => upstream_response.insert_header("x-cache-status", "miss")?,
CachePhase::Stale => upstream_response.insert_header("x-cache-status", "stale")?,
CachePhase::StaleUpdating => {
upstream_response.insert_header("x-cache-status", "stale-updating")?
}
CachePhase::Expired => {
upstream_response.insert_header("x-cache-status", "expired")?
}
CachePhase::Revalidated | CachePhase::RevalidatedNoCache(_) => {
upstream_response.insert_header("x-cache-status", "revalidated")?
}
_ => upstream_response.insert_header("x-cache-status", "invalid")?,
}
} else {
match session.cache.phase() {
CachePhase::Disabled(NoCacheReason::Deferred) => {
upstream_response.insert_header("x-cache-status", "deferred")?;
}
_ => upstream_response.insert_header("x-cache-status", "no-cache")?,
}
}
if let Some(d) = session.cache.lock_duration() {
upstream_response.insert_header("x-cache-lock-time-ms", format!("{}", d.as_millis()))?
}
if let Some(up_stat) = ctx.upstream_status {
upstream_response.insert_header("x-upstream-status", up_stat.to_string())?;
}
Ok(())
}
async fn fail_to_proxy(
&self,
session: &mut Session,
e: &Error,
_ctx: &mut Self::CTX,
) -> FailToProxy
where
Self::CTX: Send + Sync,
{
// default OSS fail_to_proxy with added headers
let code = match e.etype() {
HTTPStatus(code) => *code,
_ => {
match e.esource() {
ErrorSource::Upstream => 502,
ErrorSource::Downstream => {
match e.etype() {
WriteError | ReadError | ConnectionClosed => {
/* conn already dead */
0
}
_ => 400,
}
}
ErrorSource::Internal | ErrorSource::Unset => 500,
}
}
};
if code > 0 {
let mut resp = gen_error_response(code);
// any relevant metadata headers to add
if let Some(d) = session.cache.lock_duration() {
resp.insert_header("x-cache-lock-time-ms", format!("{}", d.as_millis()))
.unwrap();
}
session
.write_response_header(Box::new(resp), true)
.await
.unwrap_or_else(|e| {
error!("failed to send error response to downstream: {e}");
});
}
FailToProxy {
error_code: code,
// default to no reuse, which is safest
can_reuse_downstream: false,
}
}
fn should_serve_stale(
&self,
_session: &mut Session,
_ctx: &mut Self::CTX,
error: Option<&Error>, // None when it is called during stale while revalidate
) -> bool {
// enable serve stale while updating
error.is_none_or(|e| e.esource() == &ErrorSource::Upstream)
}
fn is_purge(&self, session: &Session, _ctx: &Self::CTX) -> bool {
session.req_header().method == "PURGE"
}
}
fn test_main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let opts: Vec<String> = vec![
"pingora-proxy".into(),
"-c".into(),
"tests/pingora_conf.yaml".into(),
];
let mut my_server =
pingora_core::server::Server::new(Some(Opt::parse_from_args(opts))).unwrap();
my_server.bootstrap();
let mut proxy_service_http =
pingora_proxy::http_proxy_service(&my_server.configuration, ExampleProxyHttp {});
proxy_service_http.add_tcp("0.0.0.0:6147");
#[cfg(unix)]
proxy_service_http.add_uds("/tmp/pingora_proxy.sock", None);
let mut proxy_service_h2c =
pingora_proxy::http_proxy_service(&my_server.configuration, ExampleProxyHttp {});
let http_logic = proxy_service_h2c.app_logic_mut().unwrap();
let mut http_server_options = HttpServerOptions::default();
http_server_options.h2c = true;
http_logic.server_options = Some(http_server_options);
proxy_service_h2c.add_tcp("0.0.0.0:6146");
let mut proxy_service_https_opt: Option<Box<dyn Service>> = None;
#[cfg(feature = "any_tls")]
{
let mut proxy_service_https =
pingora_proxy::http_proxy_service(&my_server.configuration, ExampleProxyHttps {});
proxy_service_https.add_tcp("0.0.0.0:6149");
let cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key_path = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let mut tls_settings =
pingora_core::listeners::tls::TlsSettings::intermediate(&cert_path, &key_path).unwrap();
tls_settings.enable_h2();
proxy_service_https.add_tls_with_settings("0.0.0.0:6150", None, tls_settings);
proxy_service_https_opt = Some(Box::new(proxy_service_https))
}
let mut proxy_service_cache =
pingora_proxy::http_proxy_service(&my_server.configuration, ExampleProxyCache {});
proxy_service_cache.add_tcp("0.0.0.0:6148");
#[cfg(feature = "any_tls")]
{
let cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
let key_path = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
let mut tls_settings =
pingora_core::listeners::tls::TlsSettings::intermediate(&cert_path, &key_path).unwrap();
tls_settings.enable_h2();
proxy_service_cache.add_tls_with_settings("0.0.0.0:6153", None, tls_settings);
}
let mut services: Vec<Box<dyn Service>> = vec![
Box::new(proxy_service_h2c),
Box::new(proxy_service_http),
Box::new(proxy_service_cache),
];
if let Some(proxy_service_https) = proxy_service_https_opt {
services.push(proxy_service_https)
}
set_compression_dict_path("tests/headers.dict");
my_server.add_services(services);
my_server.run_forever();
}
pub struct Server {
pub handle: thread::JoinHandle<()>,
}
impl Server {
pub fn start() -> Self {
let server_handle = thread::spawn(|| {
test_main();
});
Server {
handle: server_handle,
}
}
}
#[cfg(feature = "s2n")]
pub struct PskTlsServer {
pub handle: thread::JoinHandle<()>,
}
#[cfg(feature = "s2n")]
impl PskTlsServer {
pub fn start() -> Self {
let server_handle = thread::spawn(|| {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(Self::run_server());
});
PskTlsServer {
handle: server_handle,
}
}
async fn run_server() {
use pingora_core::{protocols::tls::S2NConnectionBuilder, tls::TlsAcceptor};
use pingora_core::{
protocols::tls::{Psk, PskConfig, PskType},
tls::{Config, PskHmac, S2NPolicy, DEFAULT_TLS13},
};
use tokio::net::TcpListener;
let psk = Psk::new(
TEST_PSK_IDENTITY.to_string(),
TEST_PSK_SECRET.as_bytes().to_vec(),
PskHmac::SHA256,
);
let psk_config = Arc::new(PskConfig::new(vec![psk]));
let addr: std::net::SocketAddr = "127.0.0.1:6151".parse().unwrap();
let listener = TcpListener::bind(addr).await.unwrap();
let mut config_builder = Config::builder();
unsafe {
config_builder.disable_x509_verification();
}
config_builder.set_security_policy(&DEFAULT_TLS13).unwrap();
let config = config_builder.build().unwrap();
let connection_builder = S2NConnectionBuilder {
config: config.clone(),
psk_config: Some(psk_config.clone()),
security_policy: None,
};
let acceptor = TlsAcceptor::new(connection_builder);
loop {
use tokio::{io::AsyncWriteExt, net::tcp};
let (tcp_stream, _) = listener.accept().await.unwrap();
let mut stream = acceptor.clone().accept(tcp_stream).await.unwrap();
let response = b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello";
stream.write(response).await.unwrap();
stream.shutdown().await;
}
}
}
// FIXME: this still allows multiple servers to spawn across integration tests
pub static TEST_SERVER: Lazy<Server> = Lazy::new(Server::start);
#[cfg(feature = "s2n")]
pub static TEST_PSK_TLS_SERVER: Lazy<PskTlsServer> = Lazy::new(PskTlsServer::start);
use super::mock_origin::MOCK_ORIGIN;
pub fn init() {
let _ = *TEST_SERVER;
let _ = *MOCK_ORIGIN;
#[cfg(feature = "s2n")]
let _ = *TEST_PSK_TLS_SERVER;
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/tests/utils/mock_origin.rs | pingora-proxy/tests/utils/mock_origin.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use once_cell::sync::Lazy;
use std::path::Path;
use std::process;
use std::{thread, time};
pub static MOCK_ORIGIN: Lazy<bool> = Lazy::new(init);
fn init() -> bool {
#[cfg(feature = "rustls")]
let src_cert_path = format!(
"{}/tests/utils/conf/keys/server_rustls.crt",
env!("CARGO_MANIFEST_DIR")
);
#[cfg(feature = "openssl_derived")]
let src_cert_path = format!(
"{}/tests/utils/conf/keys/server_boringssl_openssl.crt",
env!("CARGO_MANIFEST_DIR")
);
#[cfg(feature = "s2n")]
let src_cert_path = format!(
"{}/tests/utils/conf/keys/server_s2n.crt",
env!("CARGO_MANIFEST_DIR")
);
#[cfg(feature = "any_tls")]
{
let mut dst_cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
std::fs::copy(Path::new(&src_cert_path), Path::new(&dst_cert_path));
dst_cert_path = format!(
"{}/tests/utils/conf/keys/server.crt",
env!("CARGO_MANIFEST_DIR")
);
std::fs::copy(Path::new(&src_cert_path), Path::new(&dst_cert_path));
}
// TODO: figure out a way to kill openresty when exiting
process::Command::new("pkill")
.args(["-F", "/tmp/pingora_mock_origin.pid"])
.spawn()
.unwrap()
.wait();
let _origin = thread::spawn(|| {
process::Command::new("openresty")
.args(["-p", &format!("{}/origin", super::conf_dir())])
.output()
.unwrap();
});
// wait until the server is up
thread::sleep(time::Duration::from_secs(2));
true
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/tests/utils/mod.rs | pingora-proxy/tests/utils/mod.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused)]
#[cfg(feature = "any_tls")]
pub mod cert;
pub mod mock_origin;
pub mod server_utils;
pub mod websocket;
use once_cell::sync::Lazy;
use tokio::runtime::{Builder, Runtime};
// for tests with a static connection pool, if we use tokio::test the reactor
// will no longer be associated with the backing pool fds since it's dropped per test
pub static GLOBAL_RUNTIME: Lazy<Runtime> =
Lazy::new(|| Builder::new_multi_thread().enable_all().build().unwrap());
pub fn conf_dir() -> String {
format!("{}/tests/utils/conf", env!("CARGO_MANIFEST_DIR"))
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
cloudflare/pingora | https://github.com/cloudflare/pingora/blob/5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094/pingora-proxy/tests/utils/cert.rs | pingora-proxy/tests/utils/cert.rs | // Copyright 2025 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use once_cell::sync::Lazy;
#[cfg(feature = "s2n")]
use pingora_core::tls::load_pem_file;
#[cfg(feature = "rustls")]
use pingora_core::tls::{load_pem_file_ca, load_pem_file_private_key};
#[cfg(feature = "openssl_derived")]
use pingora_core::tls::{
pkey::{PKey, Private},
x509::X509,
};
use std::fs;
#[cfg(feature = "openssl_derived")]
mod key_types {
use super::*;
pub type PrivateKeyType = PKey<Private>;
pub type CertType = X509;
}
#[cfg(feature = "rustls")]
mod key_types {
use super::*;
pub type PrivateKeyType = Vec<u8>;
pub type CertType = Vec<u8>;
}
#[cfg(feature = "s2n")]
mod key_types {
use super::*;
pub type PrivateKeyType = Vec<u8>;
pub type CertType = Vec<u8>;
}
use key_types::*;
pub static INTERMEDIATE_CERT: Lazy<CertType> = Lazy::new(|| load_cert("keys/intermediate.crt"));
pub static LEAF_CERT: Lazy<CertType> = Lazy::new(|| load_cert("keys/leaf.crt"));
pub static LEAF2_CERT: Lazy<CertType> = Lazy::new(|| load_cert("keys/leaf2.crt"));
pub static LEAF_KEY: Lazy<PrivateKeyType> = Lazy::new(|| load_key("keys/leaf.key"));
pub static LEAF2_KEY: Lazy<PrivateKeyType> = Lazy::new(|| load_key("keys/leaf2.key"));
pub static CURVE_521_TEST_KEY: Lazy<PrivateKeyType> =
Lazy::new(|| load_key("keys/curve_test.521.key.pem"));
pub static CURVE_521_TEST_CERT: Lazy<CertType> = Lazy::new(|| load_cert("keys/curve_test.521.crt"));
pub static CURVE_384_TEST_KEY: Lazy<PrivateKeyType> =
Lazy::new(|| load_key("keys/curve_test.384.key.pem"));
pub static CURVE_384_TEST_CERT: Lazy<CertType> = Lazy::new(|| load_cert("keys/curve_test.384.crt"));
#[cfg(feature = "openssl_derived")]
fn load_cert(path: &str) -> X509 {
let path = format!("{}/{path}", super::conf_dir());
let cert_bytes = fs::read(path).unwrap();
X509::from_pem(&cert_bytes).unwrap()
}
#[cfg(feature = "openssl_derived")]
fn load_key(path: &str) -> PKey<Private> {
let path = format!("{}/{path}", super::conf_dir());
let key_bytes = fs::read(path).unwrap();
PKey::private_key_from_pem(&key_bytes).unwrap()
}
#[cfg(feature = "rustls")]
fn load_cert(path: &str) -> Vec<u8> {
let path = format!("{}/{path}", super::conf_dir());
load_pem_file_ca(&path).unwrap()
}
#[cfg(feature = "rustls")]
fn load_key(path: &str) -> Vec<u8> {
let path = format!("{}/{path}", super::conf_dir());
load_pem_file_private_key(&path).unwrap()
}
#[cfg(feature = "s2n")]
fn load_cert(path: &str) -> Vec<u8> {
let path = format!("{}/{path}", super::conf_dir());
load_pem_file(&path).unwrap()
}
#[cfg(feature = "s2n")]
fn load_key(path: &str) -> Vec<u8> {
let path = format!("{}/{path}", super::conf_dir());
load_pem_file(&path).unwrap()
}
| rust | Apache-2.0 | 5c4bd0bc546b2d9caaff4a438a4cf3d69e5a5094 | 2026-01-04T15:36:50.761692Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.