repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/typeid.rs | tokio/src/util/typeid.rs | use std::{
any::TypeId,
marker::PhantomData,
mem::{self, ManuallyDrop},
};
// SAFETY: this function does not compare lifetimes. Values returned as `Ok`
// may have their lifetimes extended.
pub(super) unsafe fn try_transmute<Src, Target: 'static>(x: Src) -> Result<Target, Src> {
if nonstatic_typeid::<Src>() == TypeId::of::<Target>() {
let x = ManuallyDrop::new(x);
// SAFETY: we have checked that the types are the same.
Ok(unsafe { mem::transmute_copy::<Src, Target>(&x) })
} else {
Err(x)
}
}
// https://github.com/dtolnay/typeid/blob/b06a3c08a0eaccc7df6091ade1ae4e3fb53609d5/src/lib.rs#L197-L222
#[inline(always)]
fn nonstatic_typeid<T>() -> TypeId
where
T: ?Sized,
{
trait NonStaticAny {
fn get_type_id(&self) -> TypeId
where
Self: 'static;
}
impl<T: ?Sized> NonStaticAny for PhantomData<T> {
#[inline(always)]
fn get_type_id(&self) -> TypeId
where
Self: 'static,
{
TypeId::of::<T>()
}
}
let phantom_data = PhantomData::<T>;
NonStaticAny::get_type_id(unsafe {
mem::transmute::<&dyn NonStaticAny, &(dyn NonStaticAny + 'static)>(&phantom_data)
})
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/ptr_expose.rs | tokio/src/util/ptr_expose.rs | //! Utility for helping miri understand our exposed pointers.
//!
//! During normal execution, this module is equivalent to pointer casts. However, when running
//! under miri, pointer casts are replaced with lookups in a hash map. This makes Tokio compatible
//! with strict provenance when running under miri (which comes with a performance cost).
use std::marker::PhantomData;
#[cfg(miri)]
use {crate::loom::sync::Mutex, std::collections::BTreeMap};
pub(crate) struct PtrExposeDomain<T> {
#[cfg(miri)]
map: Mutex<BTreeMap<usize, *const T>>,
_phantom: PhantomData<T>,
}
// SAFETY: Actually using the pointers is unsafe, so it's sound to transfer them across threads.
unsafe impl<T> Sync for PtrExposeDomain<T> {}
impl<T> PtrExposeDomain<T> {
pub(crate) const fn new() -> Self {
Self {
#[cfg(miri)]
map: Mutex::const_new(BTreeMap::new()),
_phantom: PhantomData,
}
}
#[inline]
pub(crate) fn expose_provenance(&self, ptr: *const T) -> usize {
#[cfg(miri)]
{
let addr: usize = ptr.addr();
self.map.lock().insert(addr, ptr);
addr
}
#[cfg(not(miri))]
{
ptr as usize
}
}
#[inline]
#[allow(clippy::wrong_self_convention)] // mirrors std name
pub(crate) fn from_exposed_addr(&self, addr: usize) -> *const T {
#[cfg(miri)]
{
let maybe_ptr = self.map.lock().get(&addr).copied();
// SAFETY: Intentionally trigger a miri failure if the provenance we want is not
// exposed.
unsafe { maybe_ptr.unwrap_unchecked() }
}
#[cfg(not(miri))]
{
addr as *const T
}
}
#[inline]
pub(crate) fn unexpose_provenance(&self, _ptr: *const T) {
#[cfg(miri)]
{
let addr: usize = _ptr.addr();
let maybe_ptr = self.map.lock().remove(&addr);
// SAFETY: Intentionally trigger a miri failure if the provenance we want is not
// exposed.
unsafe { maybe_ptr.unwrap_unchecked() };
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/trace.rs | tokio/src/util/trace.rs | cfg_rt! {
use std::marker::PhantomData;
#[derive(Copy, Clone)]
pub(crate) struct SpawnMeta<'a> {
/// The name of the task
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) name: Option<&'a str>,
/// The original size of the future or function being spawned
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) original_size: usize,
/// The source code location where the task was spawned.
///
/// This is wrapped in a type that may be empty when `tokio_unstable` is
/// not enabled.
pub(crate) spawned_at: crate::runtime::task::SpawnLocation,
_pd: PhantomData<&'a ()>,
}
impl<'a> SpawnMeta<'a> {
/// Create new spawn meta with a name and original size (before possible auto-boxing)
#[cfg(all(tokio_unstable, feature = "tracing"))]
#[track_caller]
pub(crate) fn new(name: Option<&'a str>, original_size: usize) -> Self {
Self {
name,
original_size,
spawned_at: crate::runtime::task::SpawnLocation::capture(),
_pd: PhantomData,
}
}
/// Create a new unnamed spawn meta with the original size (before possible auto-boxing)
#[track_caller]
pub(crate) fn new_unnamed(original_size: usize) -> Self {
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let _original_size = original_size;
Self {
#[cfg(all(tokio_unstable, feature = "tracing"))]
name: None,
#[cfg(all(tokio_unstable, feature = "tracing"))]
original_size,
spawned_at: crate::runtime::task::SpawnLocation::capture(),
_pd: PhantomData,
}
}
}
cfg_trace! {
use core::{
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use std::mem;
use std::future::Future;
use tracing::instrument::Instrument;
pub(crate) use tracing::instrument::Instrumented;
#[inline]
pub(crate) fn task<F>(task: F, kind: &'static str, meta: SpawnMeta<'_>, id: u64) -> Instrumented<F> {
fn get_span(kind: &'static str, spawn_meta: SpawnMeta<'_>, id: u64, task_size: usize) -> tracing::Span {
let original_size = if spawn_meta.original_size != task_size {
Some(spawn_meta.original_size)
} else {
None
};
tracing::trace_span!(
target: "tokio::task",
parent: None,
"runtime.spawn",
%kind,
task.name = %spawn_meta.name.unwrap_or_default(),
task.id = id,
original_size.bytes = original_size,
size.bytes = task_size,
loc.file = spawn_meta.spawned_at.0.file(),
loc.line = spawn_meta.spawned_at.0.line(),
loc.col = spawn_meta.spawned_at.0.column(),
)
}
use tracing::instrument::Instrument;
let span = get_span(kind, meta, id, mem::size_of::<F>());
task.instrument(span)
}
#[inline]
pub(crate) fn blocking_task<Fn, Fut>(task: Fut, spawn_meta: SpawnMeta<'_>, id: u64) -> Instrumented<Fut> {
let fn_size = mem::size_of::<Fn>();
let original_size = if spawn_meta.original_size != fn_size {
Some(spawn_meta.original_size)
} else {
None
};
let span = tracing::trace_span!(
target: "tokio::task::blocking",
"runtime.spawn",
kind = %"blocking",
task.name = %spawn_meta.name.unwrap_or_default(),
task.id = id,
"fn" = %std::any::type_name::<Fn>(),
original_size.bytes = original_size,
size.bytes = fn_size,
loc.file = spawn_meta.spawned_at.0.file(),
loc.line = spawn_meta.spawned_at.0.line(),
loc.col = spawn_meta.spawned_at.0.column(),
);
task.instrument(span)
}
pub(crate) fn async_op<P,F>(inner: P, resource_span: tracing::Span, source: &str, poll_op_name: &'static str, inherits_child_attrs: bool) -> InstrumentedAsyncOp<F>
where P: FnOnce() -> F {
resource_span.in_scope(|| {
let async_op_span = tracing::trace_span!("runtime.resource.async_op", source = source, inherits_child_attrs = inherits_child_attrs);
let enter = async_op_span.enter();
let async_op_poll_span = tracing::trace_span!("runtime.resource.async_op.poll");
let inner = inner();
drop(enter);
let tracing_ctx = AsyncOpTracingCtx {
async_op_span,
async_op_poll_span,
resource_span: resource_span.clone(),
};
InstrumentedAsyncOp {
inner,
tracing_ctx,
poll_op_name,
}
})
}
#[derive(Debug, Clone)]
pub(crate) struct AsyncOpTracingCtx {
pub(crate) async_op_span: tracing::Span,
pub(crate) async_op_poll_span: tracing::Span,
pub(crate) resource_span: tracing::Span,
}
pin_project! {
#[derive(Debug, Clone)]
pub(crate) struct InstrumentedAsyncOp<F> {
#[pin]
pub(crate) inner: F,
pub(crate) tracing_ctx: AsyncOpTracingCtx,
pub(crate) poll_op_name: &'static str
}
}
impl<F: Future> Future for InstrumentedAsyncOp<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let poll_op_name = &*this.poll_op_name;
let _res_enter = this.tracing_ctx.resource_span.enter();
let _async_op_enter = this.tracing_ctx.async_op_span.enter();
let _async_op_poll_enter = this.tracing_ctx.async_op_poll_span.enter();
trace_poll_op!(poll_op_name, this.inner.poll(cx))
}
}
}
cfg_not_trace! {
#[inline]
pub(crate) fn task<F>(task: F, _kind: &'static str, _meta: SpawnMeta<'_>, _id: u64) -> F {
// nop
task
}
#[inline]
pub(crate) fn blocking_task<Fn, Fut>(task: Fut, _spawn_meta: SpawnMeta<'_>, _id: u64) -> Fut {
let _ = PhantomData::<&Fn>;
// nop
task
}
}
}
cfg_time! {
#[track_caller]
pub(crate) fn caller_location() -> Option<&'static std::panic::Location<'static>> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
return Some(std::panic::Location::caller());
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
None
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/as_ref.rs | tokio/src/util/as_ref.rs | use super::typeid;
#[derive(Debug)]
pub(crate) enum OwnedBuf {
Vec(Vec<u8>),
#[cfg(feature = "io-util")]
Bytes(bytes::Bytes),
}
impl AsRef<[u8]> for OwnedBuf {
fn as_ref(&self) -> &[u8] {
match self {
Self::Vec(vec) => vec,
#[cfg(feature = "io-util")]
Self::Bytes(bytes) => bytes,
}
}
}
pub(crate) fn upgrade<B: AsRef<[u8]>>(buf: B) -> OwnedBuf {
let buf = match unsafe { typeid::try_transmute::<B, Vec<u8>>(buf) } {
Ok(vec) => return OwnedBuf::Vec(vec),
Err(original_buf) => original_buf,
};
let buf = match unsafe { typeid::try_transmute::<B, String>(buf) } {
Ok(string) => return OwnedBuf::Vec(string.into_bytes()),
Err(original_buf) => original_buf,
};
#[cfg(feature = "io-util")]
let buf = match unsafe { typeid::try_transmute::<B, bytes::Bytes>(buf) } {
Ok(bytes) => return OwnedBuf::Bytes(bytes),
Err(original_buf) => original_buf,
};
OwnedBuf::Vec(buf.as_ref().to_owned())
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/markers.rs | tokio/src/util/markers.rs | /// Marker for types that are `Sync` but not `Send`
#[allow(dead_code)]
pub(crate) struct SyncNotSend(#[allow(dead_code)] *mut ());
unsafe impl Sync for SyncNotSend {}
cfg_rt! {
pub(crate) struct NotSendOrSync(#[allow(dead_code)] *mut ());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/error.rs | tokio/src/util/error.rs | // Some combinations of features may not use these constants.
#![cfg_attr(not(feature = "full"), allow(dead_code))]
/// Error string explaining that the Tokio context hasn't been instantiated.
pub(crate) const CONTEXT_MISSING_ERROR: &str =
"there is no reactor running, must be called from the context of a Tokio 1.x runtime";
/// Error string explaining that the Tokio context is shutting down and cannot drive timers.
pub(crate) const RUNTIME_SHUTTING_DOWN_ERROR: &str =
"A Tokio 1.x context was found, but it is being shutdown.";
/// Error string explaining that the Tokio context is not available because the
/// thread-local storing it has been destroyed. This usually only happens during
/// destructors of other thread-locals.
pub(crate) const THREAD_LOCAL_DESTROYED_ERROR: &str =
"The Tokio context thread-local variable has been destroyed.";
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/rand.rs | tokio/src/util/rand.rs | cfg_rt! {
mod rt;
pub(crate) use rt::RngSeedGenerator;
cfg_unstable! {
mod rt_unstable;
}
}
/// A seed for random number generation.
///
/// In order to make certain functions within a runtime deterministic, a seed
/// can be specified at the time of creation.
#[allow(unreachable_pub)]
#[derive(Clone, Debug)]
pub struct RngSeed {
s: u32,
r: u32,
}
/// Fast random number generate.
///
/// Implement `xorshift64+`: 2 32-bit `xorshift` sequences added together.
/// Shift triplet `[17,7,16]` was calculated as indicated in Marsaglia's
/// `Xorshift` paper: <https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf>
/// This generator passes the SmallCrush suite, part of TestU01 framework:
/// <http://simul.iro.umontreal.ca/testu01/tu01.html>
#[derive(Clone, Copy, Debug)]
pub(crate) struct FastRand {
one: u32,
two: u32,
}
impl RngSeed {
/// Creates a random seed using loom internally.
pub(crate) fn new() -> Self {
Self::from_u64(crate::loom::rand::seed())
}
fn from_u64(seed: u64) -> Self {
let one = (seed >> 32) as u32;
let mut two = seed as u32;
if two == 0 {
// This value cannot be zero
two = 1;
}
Self::from_pair(one, two)
}
fn from_pair(s: u32, r: u32) -> Self {
Self { s, r }
}
}
impl FastRand {
/// Initialize a new fast random number generator using the default source of entropy.
pub(crate) fn new() -> FastRand {
FastRand::from_seed(RngSeed::new())
}
/// Initializes a new, thread-local, fast random number generator.
pub(crate) fn from_seed(seed: RngSeed) -> FastRand {
FastRand {
one: seed.s,
two: seed.r,
}
}
#[cfg(any(
feature = "macros",
feature = "rt-multi-thread",
all(feature = "sync", feature = "rt")
))]
pub(crate) fn fastrand_n(&mut self, n: u32) -> u32 {
// This is similar to fastrand() % n, but faster.
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
let mul = (self.fastrand() as u64).wrapping_mul(n as u64);
(mul >> 32) as u32
}
fn fastrand(&mut self) -> u32 {
let mut s1 = self.one;
let s0 = self.two;
s1 ^= s1 << 17;
s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16;
self.one = s0;
self.two = s1;
s0.wrapping_add(s1)
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/mod.rs | tokio/src/util/mod.rs | cfg_io_driver! {
pub(crate) mod bit;
}
#[cfg(feature = "fs")]
pub(crate) mod as_ref;
#[cfg(feature = "rt")]
pub(crate) mod atomic_cell;
#[cfg(feature = "net")]
mod blocking_check;
#[cfg(feature = "net")]
#[allow(unused_imports)]
pub(crate) use blocking_check::check_socket_for_blocking;
pub(crate) mod metric_atomics;
#[cfg(any(
// io driver uses `WakeList` directly
feature = "net",
feature = "process",
// `sync` enables `Notify` and `batch_semaphore`, which require `WakeList`.
feature = "sync",
// `fs` uses `batch_semaphore`, which requires `WakeList`.
feature = "fs",
// rt and signal use `Notify`, which requires `WakeList`.
feature = "rt",
feature = "signal",
// time driver uses `WakeList` in `Handle::process_at_time`.
feature = "time",
))]
mod wake_list;
#[cfg(any(
feature = "net",
feature = "process",
feature = "sync",
feature = "fs",
feature = "rt",
feature = "signal",
feature = "time",
))]
pub(crate) use wake_list::WakeList;
#[cfg(any(
feature = "fs",
feature = "net",
feature = "process",
feature = "rt",
feature = "sync",
feature = "signal",
feature = "time",
fuzzing,
))]
pub(crate) mod linked_list;
cfg_rt! {
pub(crate) mod sharded_list;
}
#[cfg(any(feature = "rt", feature = "macros"))]
pub(crate) mod rand;
cfg_rt! {
mod idle_notified_set;
pub(crate) use idle_notified_set::IdleNotifiedSet;
pub(crate) use self::rand::RngSeedGenerator;
mod wake;
pub(crate) use wake::WakerRef;
pub(crate) use wake::{waker_ref, Wake};
mod sync_wrapper;
pub(crate) use sync_wrapper::SyncWrapper;
mod rc_cell;
pub(crate) use rc_cell::RcCell;
}
cfg_rt_multi_thread! {
mod try_lock;
pub(crate) use try_lock::TryLock;
}
pub(crate) mod trace;
#[cfg(feature = "fs")]
pub(crate) mod typeid;
pub(crate) mod error;
#[cfg(feature = "io-util")]
pub(crate) mod memchr;
pub(crate) mod markers;
pub(crate) mod cacheline;
cfg_io_driver_impl! {
pub(crate) mod ptr_expose;
}
use std::{ops::DerefMut, pin::Pin};
/// Copy of [`std::pin::Pin::as_deref_mut`].
// TODO: Remove this once we bump the MSRV to 1.84.
pub(crate) fn pin_as_deref_mut<P: DerefMut>(ptr: Pin<&mut Pin<P>>) -> Pin<&mut P::Target> {
unsafe { ptr.get_unchecked_mut() }.as_mut()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/memchr.rs | tokio/src/util/memchr.rs | //! Search for a byte in a byte array using libc.
//!
//! When nothing pulls in libc, then just use a trivial implementation. Note
//! that we only depend on libc on unix.
#[cfg(not(all(unix, feature = "libc")))]
pub(crate) fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.iter().position(|val| needle == *val)
}
#[cfg(all(unix, feature = "libc"))]
pub(crate) fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
let start = haystack.as_ptr();
// SAFETY: `start` is valid for `haystack.len()` bytes.
let ptr = unsafe { libc::memchr(start.cast(), needle as _, haystack.len()) };
if ptr.is_null() {
None
} else {
Some(ptr as usize - start as usize)
}
}
#[cfg(test)]
mod tests {
use super::memchr;
#[test]
fn memchr_test() {
let haystack = b"123abc456\0\xffabc\n";
assert_eq!(memchr(b'1', haystack), Some(0));
assert_eq!(memchr(b'2', haystack), Some(1));
assert_eq!(memchr(b'3', haystack), Some(2));
assert_eq!(memchr(b'4', haystack), Some(6));
assert_eq!(memchr(b'5', haystack), Some(7));
assert_eq!(memchr(b'6', haystack), Some(8));
assert_eq!(memchr(b'7', haystack), None);
assert_eq!(memchr(b'a', haystack), Some(3));
assert_eq!(memchr(b'b', haystack), Some(4));
assert_eq!(memchr(b'c', haystack), Some(5));
assert_eq!(memchr(b'd', haystack), None);
assert_eq!(memchr(b'A', haystack), None);
assert_eq!(memchr(0, haystack), Some(9));
assert_eq!(memchr(0xff, haystack), Some(10));
assert_eq!(memchr(0xfe, haystack), None);
assert_eq!(memchr(1, haystack), None);
assert_eq!(memchr(b'\n', haystack), Some(14));
assert_eq!(memchr(b'\r', haystack), None);
}
#[test]
fn memchr_all() {
let mut arr = Vec::new();
for b in 0..=255 {
arr.push(b);
}
for b in 0..=255 {
assert_eq!(memchr(b, &arr), Some(b as usize));
}
arr.reverse();
for b in 0..=255 {
assert_eq!(memchr(b, &arr), Some(255 - b as usize));
}
}
#[test]
fn memchr_empty() {
for b in 0..=255 {
assert_eq!(memchr(b, b""), None);
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/cacheline.rs | tokio/src/util/cacheline.rs | #![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
use std::ops::{Deref, DerefMut};
/// Pads and aligns a value to the length of a cache line.
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
// lines at a time, so we have to align to 128 bytes rather than 64.
//
// Sources:
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
//
// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
//
// Sources:
// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
//
// powerpc64 has 128-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
#[cfg_attr(
any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
),
repr(align(128))
)]
// arm, mips and mips64 have 32-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
#[cfg_attr(
any(target_arch = "arm", target_arch = "mips", target_arch = "mips64",),
repr(align(32))
)]
// s390x has 256-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
// x86, riscv and wasm have 64-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10
//
// All others are assumed to have 64-byte cache line size.
#[cfg_attr(
not(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "arm",
target_arch = "mips",
target_arch = "mips64",
target_arch = "s390x",
)),
repr(align(64))
)]
pub(crate) struct CachePadded<T> {
value: T,
}
impl<T> CachePadded<T> {
/// Pads and aligns a value to the length of a cache line.
pub(crate) fn new(value: T) -> CachePadded<T> {
CachePadded::<T> { value }
}
}
impl<T> Deref for CachePadded<T> {
type Target = T;
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for CachePadded<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/linked_list.rs | tokio/src/util/linked_list.rs | #![cfg_attr(not(feature = "full"), allow(dead_code))]
// It doesn't make sense to enforce `unsafe_op_in_unsafe_fn` for this module because
//
// * The intrusive linked list naturally relies on unsafe operations.
// * Excessive `unsafe {}` blocks hurt readability significantly.
// TODO: replace with `#[expect(unsafe_op_in_unsafe_fn)]` after bumpping
// the MSRV to 1.81.0.
#![allow(unsafe_op_in_unsafe_fn)]
//! An intrusive double linked list of data.
//!
//! The data structure supports tracking pinned nodes. Most of the data
//! structure's APIs are `unsafe` as they require the caller to ensure the
//! specified node is actually contained by the list.
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::{PhantomData, PhantomPinned};
use core::mem::ManuallyDrop;
use core::ptr::{self, NonNull};
/// An intrusive linked list.
///
/// Currently, the list is not emptied on drop. It is the caller's
/// responsibility to ensure the list is empty before dropping it.
pub(crate) struct LinkedList<L, T> {
/// Linked list head
head: Option<NonNull<T>>,
/// Linked list tail
tail: Option<NonNull<T>>,
/// Node type marker.
_marker: PhantomData<*const L>,
}
unsafe impl<L: Link> Send for LinkedList<L, L::Target> where L::Target: Send {}
unsafe impl<L: Link> Sync for LinkedList<L, L::Target> where L::Target: Sync {}
/// Defines how a type is tracked within a linked list.
///
/// In order to support storing a single type within multiple lists, accessing
/// the list pointers is decoupled from the entry type.
///
/// # Safety
///
/// Implementations must guarantee that `Target` types are pinned in memory. In
/// other words, when a node is inserted, the value will not be moved as long as
/// it is stored in the list.
pub(crate) unsafe trait Link {
/// Handle to the list entry.
///
/// This is usually a pointer-ish type.
type Handle;
/// Node type.
type Target;
/// Convert the handle to a raw pointer without consuming the handle.
#[allow(clippy::wrong_self_convention)]
fn as_raw(handle: &Self::Handle) -> NonNull<Self::Target>;
/// Convert the raw pointer to a handle
unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle;
/// Return the pointers for a node
///
/// # Safety
///
/// The resulting pointer should have the same tag in the stacked-borrows
/// stack as the argument. In particular, the method may not create an
/// intermediate reference in the process of creating the resulting raw
/// pointer.
///
/// The `target` pointer must be valid.
unsafe fn pointers(target: NonNull<Self::Target>) -> NonNull<Pointers<Self::Target>>;
}
/// Previous / next pointers.
pub(crate) struct Pointers<T> {
inner: UnsafeCell<PointersInner<T>>,
}
/// We do not want the compiler to put the `noalias` attribute on mutable
/// references to this type, so the type has been made `!Unpin` with a
/// `PhantomPinned` field.
///
/// Additionally, we never access the `prev` or `next` fields directly, as any
/// such access would implicitly involve the creation of a reference to the
/// field, which we want to avoid since the fields are not `!Unpin`, and would
/// hence be given the `noalias` attribute if we were to do such an access. As
/// an alternative to accessing the fields directly, the `Pointers` type
/// provides getters and setters for the two fields, and those are implemented
/// using `ptr`-specific methods which avoids the creation of intermediate
/// references.
///
/// See this link for more information:
/// <https://github.com/rust-lang/rust/pull/82834>
struct PointersInner<T> {
/// The previous node in the list. null if there is no previous node.
prev: Option<NonNull<T>>,
/// The next node in the list. null if there is no previous node.
next: Option<NonNull<T>>,
/// This type is !Unpin due to the heuristic from:
/// <https://github.com/rust-lang/rust/pull/82834>
_pin: PhantomPinned,
}
unsafe impl<T: Send> Send for Pointers<T> {}
unsafe impl<T: Sync> Sync for Pointers<T> {}
// ===== impl LinkedList =====
impl<L, T> LinkedList<L, T> {
/// Creates an empty linked list.
pub(crate) const fn new() -> LinkedList<L, T> {
LinkedList {
head: None,
tail: None,
_marker: PhantomData,
}
}
}
impl<L: Link> LinkedList<L, L::Target> {
/// Adds an element first in the list.
pub(crate) fn push_front(&mut self, val: L::Handle) {
// The value should not be dropped, it is being inserted into the list
let val = ManuallyDrop::new(val);
let ptr = L::as_raw(&val);
assert_ne!(self.head, Some(ptr));
unsafe {
L::pointers(ptr).as_mut().set_next(self.head);
L::pointers(ptr).as_mut().set_prev(None);
if let Some(head) = self.head {
L::pointers(head).as_mut().set_prev(Some(ptr));
}
self.head = Some(ptr);
if self.tail.is_none() {
self.tail = Some(ptr);
}
}
}
/// Removes the first element from a list and returns it, or None if it is
/// empty.
pub(crate) fn pop_front(&mut self) -> Option<L::Handle> {
unsafe {
let head = self.head?;
self.head = L::pointers(head).as_ref().get_next();
if let Some(new_head) = L::pointers(head).as_ref().get_next() {
L::pointers(new_head).as_mut().set_prev(None);
} else {
self.tail = None;
}
L::pointers(head).as_mut().set_prev(None);
L::pointers(head).as_mut().set_next(None);
Some(L::from_raw(head))
}
}
/// Removes the last element from a list and returns it, or None if it is
/// empty.
pub(crate) fn pop_back(&mut self) -> Option<L::Handle> {
unsafe {
let last = self.tail?;
self.tail = L::pointers(last).as_ref().get_prev();
if let Some(prev) = L::pointers(last).as_ref().get_prev() {
L::pointers(prev).as_mut().set_next(None);
} else {
self.head = None;
}
L::pointers(last).as_mut().set_prev(None);
L::pointers(last).as_mut().set_next(None);
Some(L::from_raw(last))
}
}
/// Returns whether the linked list does not contain any node
pub(crate) fn is_empty(&self) -> bool {
if self.head.is_some() {
return false;
}
assert!(self.tail.is_none());
true
}
/// Removes the specified node from the list
///
/// # Safety
///
/// The caller **must** ensure that exactly one of the following is true:
/// - `node` is currently contained by `self`,
/// - `node` is not contained by any list,
/// - `node` is currently contained by some other `GuardedLinkedList` **and**
/// the caller has an exclusive access to that list. This condition is
/// used by the linked list in `sync::Notify`.
pub(crate) unsafe fn remove(&mut self, node: NonNull<L::Target>) -> Option<L::Handle> {
if let Some(prev) = L::pointers(node).as_ref().get_prev() {
debug_assert_eq!(L::pointers(prev).as_ref().get_next(), Some(node));
L::pointers(prev)
.as_mut()
.set_next(L::pointers(node).as_ref().get_next());
} else {
if self.head != Some(node) {
return None;
}
self.head = L::pointers(node).as_ref().get_next();
}
if let Some(next) = L::pointers(node).as_ref().get_next() {
debug_assert_eq!(L::pointers(next).as_ref().get_prev(), Some(node));
L::pointers(next)
.as_mut()
.set_prev(L::pointers(node).as_ref().get_prev());
} else {
// This might be the last item in the list
if self.tail != Some(node) {
return None;
}
self.tail = L::pointers(node).as_ref().get_prev();
}
L::pointers(node).as_mut().set_next(None);
L::pointers(node).as_mut().set_prev(None);
Some(L::from_raw(node))
}
}
impl<L: Link> fmt::Debug for LinkedList<L, L::Target> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LinkedList")
.field("head", &self.head)
.field("tail", &self.tail)
.finish()
}
}
#[cfg(any(
feature = "fs",
feature = "rt",
all(unix, feature = "process"),
feature = "signal",
feature = "sync",
))]
impl<L: Link> LinkedList<L, L::Target> {
pub(crate) fn last(&self) -> Option<&L::Target> {
let tail = self.tail.as_ref()?;
unsafe { Some(&*tail.as_ptr()) }
}
}
impl<L: Link> Default for LinkedList<L, L::Target> {
fn default() -> Self {
Self::new()
}
}
// ===== impl DrainFilter =====
cfg_io_driver_impl! {
pub(crate) struct DrainFilter<'a, T: Link, F> {
list: &'a mut LinkedList<T, T::Target>,
filter: F,
curr: Option<NonNull<T::Target>>,
}
impl<T: Link> LinkedList<T, T::Target> {
pub(crate) fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F>
where
F: FnMut(&T::Target) -> bool,
{
let curr = self.head;
DrainFilter {
curr,
filter,
list: self,
}
}
}
impl<'a, T, F> Iterator for DrainFilter<'a, T, F>
where
T: Link,
F: FnMut(&T::Target) -> bool,
{
type Item = T::Handle;
fn next(&mut self) -> Option<Self::Item> {
while let Some(curr) = self.curr {
// safety: the pointer references data contained by the list
self.curr = unsafe { T::pointers(curr).as_ref() }.get_next();
// safety: the value is still owned by the linked list.
if (self.filter)(unsafe { &mut *curr.as_ptr() }) {
return unsafe { self.list.remove(curr) };
}
}
None
}
}
}
cfg_taskdump! {
impl<T: Link> LinkedList<T, T::Target> {
pub(crate) fn for_each<F>(&mut self, mut f: F)
where
F: FnMut(&T::Handle),
{
let mut next = self.head;
while let Some(curr) = next {
unsafe {
let handle = ManuallyDrop::new(T::from_raw(curr));
f(&handle);
next = T::pointers(curr).as_ref().get_next();
}
}
}
}
}
// ===== impl GuardedLinkedList =====
feature! {
#![any(
feature = "process",
feature = "sync",
feature = "rt",
feature = "signal",
)]
/// An intrusive linked list, but instead of keeping pointers to the head
/// and tail nodes, it uses a special guard node linked with those nodes.
/// It means that the list is circular and every pointer of a node from
/// the list is not `None`, including pointers from the guard node.
///
/// If a list is empty, then both pointers of the guard node are pointing
/// at the guard node itself.
pub(crate) struct GuardedLinkedList<L, T> {
/// Pointer to the guard node.
guard: NonNull<T>,
/// Node type marker.
_marker: PhantomData<*const L>,
}
impl<L: Link> LinkedList<L, L::Target> {
/// Turns a linked list into the guarded version by linking the guard node
/// with the head and tail nodes. Like with other nodes, you should guarantee
/// that the guard node is pinned in memory.
pub(crate) fn into_guarded(self, guard_handle: L::Handle) -> GuardedLinkedList<L, L::Target> {
// `guard_handle` is a NonNull pointer, we don't have to care about dropping it.
let guard = L::as_raw(&guard_handle);
unsafe {
if let Some(head) = self.head {
debug_assert!(L::pointers(head).as_ref().get_prev().is_none());
L::pointers(head).as_mut().set_prev(Some(guard));
L::pointers(guard).as_mut().set_next(Some(head));
// The list is not empty, so the tail cannot be `None`.
let tail = self.tail.unwrap();
debug_assert!(L::pointers(tail).as_ref().get_next().is_none());
L::pointers(tail).as_mut().set_next(Some(guard));
L::pointers(guard).as_mut().set_prev(Some(tail));
} else {
// The list is empty.
L::pointers(guard).as_mut().set_prev(Some(guard));
L::pointers(guard).as_mut().set_next(Some(guard));
}
}
GuardedLinkedList { guard, _marker: PhantomData }
}
}
impl<L: Link> GuardedLinkedList<L, L::Target> {
fn tail(&self) -> Option<NonNull<L::Target>> {
let tail_ptr = unsafe {
L::pointers(self.guard).as_ref().get_prev().unwrap()
};
// Compare the tail pointer with the address of the guard node itself.
// If the guard points at itself, then there are no other nodes and
// the list is considered empty.
if tail_ptr != self.guard {
Some(tail_ptr)
} else {
None
}
}
/// Removes the last element from a list and returns it, or None if it is
/// empty.
pub(crate) fn pop_back(&mut self) -> Option<L::Handle> {
unsafe {
let last = self.tail()?;
let before_last = L::pointers(last).as_ref().get_prev().unwrap();
L::pointers(self.guard).as_mut().set_prev(Some(before_last));
L::pointers(before_last).as_mut().set_next(Some(self.guard));
L::pointers(last).as_mut().set_prev(None);
L::pointers(last).as_mut().set_next(None);
Some(L::from_raw(last))
}
}
}
}
// ===== impl Pointers =====
impl<T> Pointers<T> {
/// Create a new set of empty pointers
pub(crate) fn new() -> Pointers<T> {
Pointers {
inner: UnsafeCell::new(PointersInner {
prev: None,
next: None,
_pin: PhantomPinned,
}),
}
}
pub(crate) fn get_prev(&self) -> Option<NonNull<T>> {
// SAFETY: Field is accessed immutably through a reference.
unsafe { ptr::addr_of!((*self.inner.get()).prev).read() }
}
pub(crate) fn get_next(&self) -> Option<NonNull<T>> {
// SAFETY: Field is accessed immutably through a reference.
unsafe { ptr::addr_of!((*self.inner.get()).next).read() }
}
fn set_prev(&mut self, value: Option<NonNull<T>>) {
// SAFETY: Field is accessed mutably through a mutable reference.
unsafe {
ptr::addr_of_mut!((*self.inner.get()).prev).write(value);
}
}
fn set_next(&mut self, value: Option<NonNull<T>>) {
// SAFETY: Field is accessed mutably through a mutable reference.
unsafe {
ptr::addr_of_mut!((*self.inner.get()).next).write(value);
}
}
}
impl<T> fmt::Debug for Pointers<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let prev = self.get_prev();
let next = self.get_next();
f.debug_struct("Pointers")
.field("prev", &prev)
.field("next", &next)
.finish()
}
}
#[cfg(any(test, fuzzing))]
#[cfg(not(loom))]
pub(crate) mod tests {
use super::*;
use std::pin::Pin;
#[derive(Debug)]
#[repr(C)]
struct Entry {
pointers: Pointers<Entry>,
val: i32,
}
unsafe impl<'a> Link for &'a Entry {
type Handle = Pin<&'a Entry>;
type Target = Entry;
fn as_raw(handle: &Pin<&'_ Entry>) -> NonNull<Entry> {
NonNull::from(handle.get_ref())
}
unsafe fn from_raw(ptr: NonNull<Entry>) -> Pin<&'a Entry> {
Pin::new_unchecked(&*ptr.as_ptr())
}
unsafe fn pointers(target: NonNull<Entry>) -> NonNull<Pointers<Entry>> {
target.cast()
}
}
fn entry(val: i32) -> Pin<Box<Entry>> {
Box::pin(Entry {
pointers: Pointers::new(),
val,
})
}
fn ptr(r: &Pin<Box<Entry>>) -> NonNull<Entry> {
r.as_ref().get_ref().into()
}
fn collect_list(list: &mut LinkedList<&'_ Entry, <&'_ Entry as Link>::Target>) -> Vec<i32> {
let mut ret = vec![];
while let Some(entry) = list.pop_back() {
ret.push(entry.val);
}
ret
}
fn push_all<'a>(
list: &mut LinkedList<&'a Entry, <&'_ Entry as Link>::Target>,
entries: &[Pin<&'a Entry>],
) {
for entry in entries.iter() {
list.push_front(*entry);
}
}
#[cfg(test)]
macro_rules! assert_clean {
($e:ident) => {{
assert!($e.pointers.get_next().is_none());
assert!($e.pointers.get_prev().is_none());
}};
}
#[cfg(test)]
macro_rules! assert_ptr_eq {
($a:expr, $b:expr) => {{
// Deal with mapping a Pin<&mut T> -> Option<NonNull<T>>
assert_eq!(Some($a.as_ref().get_ref().into()), $b)
}};
}
#[test]
fn const_new() {
const _: LinkedList<&Entry, <&Entry as Link>::Target> = LinkedList::new();
}
#[test]
fn push_and_drain() {
let a = entry(5);
let b = entry(7);
let c = entry(31);
let mut list = LinkedList::new();
assert!(list.is_empty());
list.push_front(a.as_ref());
assert!(!list.is_empty());
list.push_front(b.as_ref());
list.push_front(c.as_ref());
let items: Vec<i32> = collect_list(&mut list);
assert_eq!([5, 7, 31].to_vec(), items);
assert!(list.is_empty());
}
#[test]
fn push_pop_push_pop() {
let a = entry(5);
let b = entry(7);
let mut list = LinkedList::<&Entry, <&Entry as Link>::Target>::new();
list.push_front(a.as_ref());
let entry = list.pop_back().unwrap();
assert_eq!(5, entry.val);
assert!(list.is_empty());
list.push_front(b.as_ref());
let entry = list.pop_back().unwrap();
assert_eq!(7, entry.val);
assert!(list.is_empty());
assert!(list.pop_back().is_none());
}
#[test]
fn remove_by_address() {
let a = entry(5);
let b = entry(7);
let c = entry(31);
unsafe {
// Remove first
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
// `a` should be no longer there and can't be removed twice
assert!(list.remove(ptr(&a)).is_none());
assert!(!list.is_empty());
assert!(list.remove(ptr(&b)).is_some());
assert_clean!(b);
// `b` should be no longer there and can't be removed twice
assert!(list.remove(ptr(&b)).is_none());
assert!(!list.is_empty());
assert!(list.remove(ptr(&c)).is_some());
assert_clean!(c);
// `b` should be no longer there and can't be removed twice
assert!(list.remove(ptr(&c)).is_none());
assert!(list.is_empty());
}
unsafe {
// Remove middle
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
assert_ptr_eq!(b, list.head);
assert_ptr_eq!(c, b.pointers.get_next());
assert_ptr_eq!(b, c.pointers.get_prev());
let items = collect_list(&mut list);
assert_eq!([31, 7].to_vec(), items);
}
unsafe {
// Remove middle
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&b)).is_some());
assert_clean!(b);
assert_ptr_eq!(c, a.pointers.get_next());
assert_ptr_eq!(a, c.pointers.get_prev());
let items = collect_list(&mut list);
assert_eq!([31, 5].to_vec(), items);
}
unsafe {
// Remove last
// Remove middle
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&c)).is_some());
assert_clean!(c);
assert!(b.pointers.get_next().is_none());
assert_ptr_eq!(b, list.tail);
let items = collect_list(&mut list);
assert_eq!([7, 5].to_vec(), items);
}
unsafe {
// Remove first of two
let mut list = LinkedList::new();
push_all(&mut list, &[b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
// a should be no longer there and can't be removed twice
assert!(list.remove(ptr(&a)).is_none());
assert_ptr_eq!(b, list.head);
assert_ptr_eq!(b, list.tail);
assert!(b.pointers.get_next().is_none());
assert!(b.pointers.get_prev().is_none());
let items = collect_list(&mut list);
assert_eq!([7].to_vec(), items);
}
unsafe {
// Remove last of two
let mut list = LinkedList::new();
push_all(&mut list, &[b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&b)).is_some());
assert_clean!(b);
assert_ptr_eq!(a, list.head);
assert_ptr_eq!(a, list.tail);
assert!(a.pointers.get_next().is_none());
assert!(a.pointers.get_prev().is_none());
let items = collect_list(&mut list);
assert_eq!([5].to_vec(), items);
}
unsafe {
// Remove last item
let mut list = LinkedList::new();
push_all(&mut list, &[a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
assert!(list.head.is_none());
assert!(list.tail.is_none());
let items = collect_list(&mut list);
assert!(items.is_empty());
}
unsafe {
// Remove missing
let mut list = LinkedList::<&Entry, <&Entry as Link>::Target>::new();
list.push_front(b.as_ref());
list.push_front(a.as_ref());
assert!(list.remove(ptr(&c)).is_none());
}
}
/// This is a fuzz test. You run it by entering `cargo fuzz run fuzz_linked_list` in CLI in `/tokio/` module.
#[cfg(fuzzing)]
pub fn fuzz_linked_list(ops: &[u8]) {
enum Op {
Push,
Pop,
Remove(usize),
}
use std::collections::VecDeque;
let ops = ops
.iter()
.map(|i| match i % 3u8 {
0 => Op::Push,
1 => Op::Pop,
2 => Op::Remove((i / 3u8) as usize),
_ => unreachable!(),
})
.collect::<Vec<_>>();
let mut ll = LinkedList::<&Entry, <&Entry as Link>::Target>::new();
let mut reference = VecDeque::new();
let entries: Vec<_> = (0..ops.len()).map(|i| entry(i as i32)).collect();
for (i, op) in ops.iter().enumerate() {
match op {
Op::Push => {
reference.push_front(i as i32);
assert_eq!(entries[i].val, i as i32);
ll.push_front(entries[i].as_ref());
}
Op::Pop => {
if reference.is_empty() {
assert!(ll.is_empty());
continue;
}
let v = reference.pop_back();
assert_eq!(v, ll.pop_back().map(|v| v.val));
}
Op::Remove(n) => {
if reference.is_empty() {
assert!(ll.is_empty());
continue;
}
let idx = n % reference.len();
let expect = reference.remove(idx).unwrap();
unsafe {
let entry = ll.remove(ptr(&entries[expect as usize])).unwrap();
assert_eq!(expect, entry.val);
}
}
}
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/sync_wrapper.rs | tokio/src/util/sync_wrapper.rs | //! This module contains a type that can make `Send + !Sync` types `Sync` by
//! disallowing all immutable access to the value.
//!
//! A similar primitive is provided in the `sync_wrapper` crate.
use std::any::Any;
pub(crate) struct SyncWrapper<T> {
value: T,
}
// safety: The SyncWrapper being send allows you to send the inner value across
// thread boundaries.
unsafe impl<T: Send> Send for SyncWrapper<T> {}
// safety: An immutable reference to a SyncWrapper is useless, so moving such an
// immutable reference across threads is safe.
unsafe impl<T> Sync for SyncWrapper<T> {}
impl<T> SyncWrapper<T> {
pub(crate) fn new(value: T) -> Self {
Self { value }
}
pub(crate) fn into_inner(self) -> T {
self.value
}
}
impl SyncWrapper<Box<dyn Any + Send>> {
/// Attempt to downcast using `Any::downcast_ref()` to a type that is known to be `Sync`.
pub(crate) fn downcast_ref_sync<T: Any + Sync>(&self) -> Option<&T> {
// SAFETY: if the downcast fails, the inner value is not touched,
// so no thread-safety violation can occur.
self.value.downcast_ref()
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/rc_cell.rs | tokio/src/util/rc_cell.rs | use crate::loom::cell::UnsafeCell;
use std::rc::Rc;
/// This is exactly like `Cell<Option<Rc<T>>>`, except that it provides a `get`
/// method even though `Rc` is not `Copy`.
pub(crate) struct RcCell<T> {
inner: UnsafeCell<Option<Rc<T>>>,
}
impl<T> RcCell<T> {
#[cfg(not(all(loom, test)))]
pub(crate) const fn new() -> Self {
Self {
inner: UnsafeCell::new(None),
}
}
// The UnsafeCell in loom does not have a const `new` fn.
#[cfg(all(loom, test))]
pub(crate) fn new() -> Self {
Self {
inner: UnsafeCell::new(None),
}
}
/// Safety: This method may not be called recursively.
#[inline]
unsafe fn with_inner<F, R>(&self, f: F) -> R
where
F: FnOnce(&mut Option<Rc<T>>) -> R,
{
// safety: This type is not Sync, so concurrent calls of this method
// cannot happen. Furthermore, the caller guarantees that the method is
// not called recursively. Finally, this is the only place that can
// create mutable references to the inner Rc. This ensures that any
// mutable references created here are exclusive.
self.inner.with_mut(|ptr| f(unsafe { &mut *ptr }))
}
pub(crate) fn get(&self) -> Option<Rc<T>> {
// safety: The `Rc::clone` method will not call any unknown user-code,
// so it will not result in a recursive call to `with_inner`.
unsafe { self.with_inner(|rc| rc.clone()) }
}
pub(crate) fn replace(&self, val: Option<Rc<T>>) -> Option<Rc<T>> {
// safety: No destructors or other unknown user-code will run inside the
// `with_inner` call, so no recursive call to `with_inner` can happen.
unsafe { self.with_inner(|rc| std::mem::replace(rc, val)) }
}
pub(crate) fn set(&self, val: Option<Rc<T>>) {
let old = self.replace(val);
drop(old);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/metric_atomics.rs | tokio/src/util/metric_atomics.rs | use std::sync::atomic::{AtomicUsize, Ordering};
cfg_64bit_metrics! {
use std::sync::atomic::AtomicU64;
}
/// `AtomicU64` that is a no-op on platforms without 64-bit atomics
///
/// When used on platforms without 64-bit atomics, writes to this are no-ops.
/// The `load` method is only defined when 64-bit atomics are available.
#[derive(Debug, Default)]
pub(crate) struct MetricAtomicU64 {
#[cfg(target_has_atomic = "64")]
value: AtomicU64,
}
// some of these are currently only used behind cfg_unstable
#[allow(dead_code)]
impl MetricAtomicU64 {
// Load is only defined when supported
cfg_64bit_metrics! {
pub(crate) fn load(&self, ordering: Ordering) -> u64 {
self.value.load(ordering)
}
}
cfg_64bit_metrics! {
pub(crate) fn store(&self, val: u64, ordering: Ordering) {
self.value.store(val, ordering)
}
pub(crate) fn new(value: u64) -> Self {
Self { value: AtomicU64::new(value) }
}
pub(crate) fn add(&self, value: u64, ordering: Ordering) {
self.value.fetch_add(value, ordering);
}
}
cfg_no_64bit_metrics! {
pub(crate) fn store(&self, _val: u64, _ordering: Ordering) { }
// on platforms without 64-bit atomics, fetch-add returns unit
pub(crate) fn add(&self, _value: u64, _ordering: Ordering) { }
pub(crate) fn new(_value: u64) -> Self { Self { } }
}
}
#[cfg_attr(not(all(tokio_unstable, feature = "rt")), allow(dead_code))]
/// `AtomicUsize` for use in metrics.
///
/// This exposes simplified APIs for use in metrics & uses `std::sync` instead of Loom to avoid polluting loom logs with metric information.
#[derive(Debug, Default)]
pub(crate) struct MetricAtomicUsize {
value: AtomicUsize,
}
#[cfg_attr(not(all(tokio_unstable, feature = "rt")), allow(dead_code))]
impl MetricAtomicUsize {
pub(crate) fn new(value: usize) -> Self {
Self {
value: AtomicUsize::new(value),
}
}
pub(crate) fn load(&self, ordering: Ordering) -> usize {
self.value.load(ordering)
}
pub(crate) fn store(&self, val: usize, ordering: Ordering) {
self.value.store(val, ordering)
}
pub(crate) fn increment(&self) -> usize {
self.value.fetch_add(1, Ordering::Relaxed)
}
pub(crate) fn decrement(&self) -> usize {
self.value.fetch_sub(1, Ordering::Relaxed)
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/atomic_cell.rs | tokio/src/util/atomic_cell.rs | use crate::loom::sync::atomic::AtomicPtr;
use std::ptr;
use std::sync::atomic::Ordering::AcqRel;
pub(crate) struct AtomicCell<T> {
data: AtomicPtr<T>,
}
unsafe impl<T: Send> Send for AtomicCell<T> {}
unsafe impl<T: Send> Sync for AtomicCell<T> {}
impl<T> AtomicCell<T> {
pub(crate) fn new(data: Option<Box<T>>) -> AtomicCell<T> {
AtomicCell {
data: AtomicPtr::new(to_raw(data)),
}
}
pub(crate) fn swap(&self, val: Option<Box<T>>) -> Option<Box<T>> {
let old = self.data.swap(to_raw(val), AcqRel);
from_raw(old)
}
pub(crate) fn set(&self, val: Box<T>) {
let _ = self.swap(Some(val));
}
pub(crate) fn take(&self) -> Option<Box<T>> {
self.swap(None)
}
}
fn to_raw<T>(data: Option<Box<T>>) -> *mut T {
data.map_or(ptr::null_mut(), Box::into_raw)
}
fn from_raw<T>(val: *mut T) -> Option<Box<T>> {
if val.is_null() {
None
} else {
Some(unsafe { Box::from_raw(val) })
}
}
impl<T> Drop for AtomicCell<T> {
fn drop(&mut self) {
// Free any data still held by the cell
let _ = self.take();
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/idle_notified_set.rs | tokio/src/util/idle_notified_set.rs | //! This module defines an `IdleNotifiedSet`, which is a collection of elements.
//! Each element is intended to correspond to a task, and the collection will
//! keep track of which tasks have had their waker notified, and which have not.
//!
//! Each entry in the set holds some user-specified value. The value's type is
//! specified using the `T` parameter. It will usually be a `JoinHandle` or
//! similar.
use std::marker::PhantomPinned;
use std::mem::ManuallyDrop;
use std::ptr::NonNull;
use std::task::{Context, Waker};
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::{Arc, Mutex};
use crate::util::linked_list::{self, Link};
use crate::util::{waker_ref, Wake};
type LinkedList<T> =
linked_list::LinkedList<ListEntry<T>, <ListEntry<T> as linked_list::Link>::Target>;
/// This is the main handle to the collection.
pub(crate) struct IdleNotifiedSet<T> {
lists: Arc<Lists<T>>,
length: usize,
}
/// A handle to an entry that is guaranteed to be stored in the idle or notified
/// list of its `IdleNotifiedSet`. This value borrows the `IdleNotifiedSet`
/// mutably to prevent the entry from being moved to the `Neither` list, which
/// only the `IdleNotifiedSet` may do.
///
/// The main consequence of being stored in one of the lists is that the `value`
/// field has not yet been consumed.
///
/// Note: This entry can be moved from the idle to the notified list while this
/// object exists by waking its waker.
pub(crate) struct EntryInOneOfTheLists<'a, T> {
entry: Arc<ListEntry<T>>,
set: &'a mut IdleNotifiedSet<T>,
}
type Lists<T> = Mutex<ListsInner<T>>;
/// The linked lists hold strong references to the `ListEntry` items, and the
/// `ListEntry` items also hold a strong reference back to the Lists object, but
/// the destructor of the `IdleNotifiedSet` will clear the two lists, so once
/// that object is destroyed, no ref-cycles will remain.
struct ListsInner<T> {
notified: LinkedList<T>,
idle: LinkedList<T>,
/// Whenever an element in the `notified` list is woken, this waker will be
/// notified and consumed, if it exists.
waker: Option<Waker>,
}
/// Which of the two lists in the shared Lists object is this entry stored in?
///
/// If the value is `Idle`, then an entry's waker may move it to the notified
/// list. Otherwise, only the `IdleNotifiedSet` may move it.
///
/// If the value is `Neither`, then it is still possible that the entry is in
/// some third external list (this happens in `drain`).
#[derive(Copy, Clone, Eq, PartialEq)]
enum List {
Notified,
Idle,
Neither,
}
/// An entry in the list.
///
/// # Safety
///
/// The `my_list` field must only be accessed while holding the mutex in
/// `parent`. It is an invariant that the value of `my_list` corresponds to
/// which linked list in the `parent` holds this entry. Once this field takes
/// the value `Neither`, then it may never be modified again.
///
/// If the value of `my_list` is `Notified` or `Idle`, then the `pointers` field
/// must only be accessed while holding the mutex. If the value of `my_list` is
/// `Neither`, then the `pointers` field may be accessed by the
/// `IdleNotifiedSet` (this happens inside `drain`).
///
/// The `value` field is owned by the `IdleNotifiedSet` and may only be accessed
/// by the `IdleNotifiedSet`. The operation that sets the value of `my_list` to
/// `Neither` assumes ownership of the `value`, and it must either drop it or
/// move it out from this entry to prevent it from getting leaked. (Since the
/// two linked lists are emptied in the destructor of `IdleNotifiedSet`, the
/// value should not be leaked.)
struct ListEntry<T> {
/// The linked list pointers of the list this entry is in.
pointers: linked_list::Pointers<ListEntry<T>>,
/// Pointer to the shared `Lists` struct.
parent: Arc<Lists<T>>,
/// The value stored in this entry.
value: UnsafeCell<ManuallyDrop<T>>,
/// Used to remember which list this entry is in.
my_list: UnsafeCell<List>,
/// Required by the `linked_list::Pointers` field.
_pin: PhantomPinned,
}
generate_addr_of_methods! {
impl<T> ListEntry<T> {
unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<ListEntry<T>>> {
&self.pointers
}
}
}
// With mutable access to the `IdleNotifiedSet`, you can get mutable access to
// the values.
unsafe impl<T: Send> Send for IdleNotifiedSet<T> {}
// With the current API we strictly speaking don't even need `T: Sync`, but we
// require it anyway to support adding &self APIs that access the values in the
// future.
unsafe impl<T: Sync> Sync for IdleNotifiedSet<T> {}
// These impls control when it is safe to create a Waker. Since the waker does
// not allow access to the value in any way (including its destructor), it is
// not necessary for `T` to be Send or Sync.
unsafe impl<T> Send for ListEntry<T> {}
unsafe impl<T> Sync for ListEntry<T> {}
impl<T> IdleNotifiedSet<T> {
/// Create a new `IdleNotifiedSet`.
pub(crate) fn new() -> Self {
let lists = Mutex::new(ListsInner {
notified: LinkedList::new(),
idle: LinkedList::new(),
waker: None,
});
IdleNotifiedSet {
lists: Arc::new(lists),
length: 0,
}
}
pub(crate) fn len(&self) -> usize {
self.length
}
pub(crate) fn is_empty(&self) -> bool {
self.length == 0
}
/// Insert the given value into the `idle` list.
pub(crate) fn insert_idle(&mut self, value: T) -> EntryInOneOfTheLists<'_, T> {
self.length += 1;
let entry = Arc::new(ListEntry {
parent: self.lists.clone(),
value: UnsafeCell::new(ManuallyDrop::new(value)),
my_list: UnsafeCell::new(List::Idle),
pointers: linked_list::Pointers::new(),
_pin: PhantomPinned,
});
{
let mut lock = self.lists.lock();
lock.idle.push_front(entry.clone());
}
// Safety: We just put the entry in the idle list, so it is in one of the lists.
EntryInOneOfTheLists { entry, set: self }
}
/// Pop an entry from the notified list to poll it. The entry is moved to
/// the idle list atomically.
pub(crate) fn pop_notified(&mut self, waker: &Waker) -> Option<EntryInOneOfTheLists<'_, T>> {
// We don't decrement the length because this call moves the entry to
// the idle list rather than removing it.
if self.length == 0 {
// Fast path.
return None;
}
let mut lock = self.lists.lock();
let should_update_waker = match lock.waker.as_mut() {
Some(cur_waker) => !waker.will_wake(cur_waker),
None => true,
};
if should_update_waker {
lock.waker = Some(waker.clone());
}
// Pop the entry, returning None if empty.
let entry = lock.notified.pop_back()?;
lock.idle.push_front(entry.clone());
// Safety: We are holding the lock.
entry.my_list.with_mut(|ptr| unsafe {
*ptr = List::Idle;
});
drop(lock);
// Safety: We just put the entry in the idle list, so it is in one of the lists.
Some(EntryInOneOfTheLists { entry, set: self })
}
/// Tries to pop an entry from the notified list to poll it. The entry is moved to
/// the idle list atomically.
pub(crate) fn try_pop_notified(&mut self) -> Option<EntryInOneOfTheLists<'_, T>> {
// We don't decrement the length because this call moves the entry to
// the idle list rather than removing it.
if self.length == 0 {
// Fast path.
return None;
}
let mut lock = self.lists.lock();
// Pop the entry, returning None if empty.
let entry = lock.notified.pop_back()?;
lock.idle.push_front(entry.clone());
// Safety: We are holding the lock.
entry.my_list.with_mut(|ptr| unsafe {
*ptr = List::Idle;
});
drop(lock);
// Safety: We just put the entry in the idle list, so it is in one of the lists.
Some(EntryInOneOfTheLists { entry, set: self })
}
/// Call a function on every element in this list.
pub(crate) fn for_each<F: FnMut(&mut T)>(&mut self, mut func: F) {
fn get_ptrs<T>(list: &mut LinkedList<T>, ptrs: &mut Vec<*mut T>) {
let mut node = list.last();
while let Some(entry) = node {
ptrs.push(entry.value.with_mut(|ptr| {
let ptr: *mut ManuallyDrop<T> = ptr;
let ptr: *mut T = ptr.cast();
ptr
}));
let prev = entry.pointers.get_prev();
node = prev.map(|prev| unsafe { &*prev.as_ptr() });
}
}
// Atomically get a raw pointer to the value of every entry.
//
// Since this only locks the mutex once, it is not possible for a value
// to get moved from the idle list to the notified list during the
// operation, which would otherwise result in some value being listed
// twice.
let mut ptrs = Vec::with_capacity(self.len());
{
let mut lock = self.lists.lock();
get_ptrs(&mut lock.idle, &mut ptrs);
get_ptrs(&mut lock.notified, &mut ptrs);
}
debug_assert_eq!(ptrs.len(), ptrs.capacity());
for ptr in ptrs {
// Safety: When we grabbed the pointers, the entries were in one of
// the two lists. This means that their value was valid at the time,
// and it must still be valid because we are the IdleNotifiedSet,
// and only we can remove an entry from the two lists. (It's
// possible that an entry is moved from one list to the other during
// this loop, but that is ok.)
func(unsafe { &mut *ptr });
}
}
/// Remove all entries in both lists, applying some function to each element.
///
/// The closure is called on all elements even if it panics. Having it panic
/// twice is a double-panic, and will abort the application.
pub(crate) fn drain<F: FnMut(T)>(&mut self, func: F) {
if self.length == 0 {
// Fast path.
return;
}
self.length = 0;
// The LinkedList is not cleared on panic, so we use a bomb to clear it.
//
// This value has the invariant that any entry in its `all_entries` list
// has `my_list` set to `Neither` and that the value has not yet been
// dropped.
struct AllEntries<T, F: FnMut(T)> {
all_entries: LinkedList<T>,
func: F,
}
impl<T, F: FnMut(T)> AllEntries<T, F> {
fn pop_next(&mut self) -> bool {
if let Some(entry) = self.all_entries.pop_back() {
// Safety: We just took this value from the list, so we can
// destroy the value in the entry.
entry
.value
.with_mut(|ptr| unsafe { (self.func)(ManuallyDrop::take(&mut *ptr)) });
true
} else {
false
}
}
}
impl<T, F: FnMut(T)> Drop for AllEntries<T, F> {
fn drop(&mut self) {
while self.pop_next() {}
}
}
let mut all_entries = AllEntries {
all_entries: LinkedList::new(),
func,
};
// Atomically move all entries to the new linked list in the AllEntries
// object.
{
let mut lock = self.lists.lock();
unsafe {
// Safety: We are holding the lock and `all_entries` is a new
// LinkedList.
move_to_new_list(&mut lock.idle, &mut all_entries.all_entries);
move_to_new_list(&mut lock.notified, &mut all_entries.all_entries);
}
}
// Keep destroying entries in the list until it is empty.
//
// If the closure panics, then the destructor of the `AllEntries` bomb
// ensures that we keep running the destructor on the remaining values.
// A second panic will abort the program.
while all_entries.pop_next() {}
}
}
/// # Safety
///
/// The mutex for the entries must be held, and the target list must be such
/// that setting `my_list` to `Neither` is ok.
unsafe fn move_to_new_list<T>(from: &mut LinkedList<T>, to: &mut LinkedList<T>) {
while let Some(entry) = from.pop_back() {
entry.my_list.with_mut(|ptr| {
// Safety: pointer is accessed while holding the mutex.
unsafe {
*ptr = List::Neither;
}
});
to.push_front(entry);
}
}
impl<'a, T> EntryInOneOfTheLists<'a, T> {
/// Remove this entry from the list it is in, returning the value associated
/// with the entry.
///
/// This consumes the value, since it is no longer guaranteed to be in a
/// list.
pub(crate) fn remove(self) -> T {
self.set.length -= 1;
{
let mut lock = self.set.lists.lock();
// Safety: We are holding the lock so there is no race, and we will
// remove the entry afterwards to uphold invariants.
let old_my_list = self.entry.my_list.with_mut(|ptr| unsafe {
let old_my_list = *ptr;
*ptr = List::Neither;
old_my_list
});
let list = match old_my_list {
List::Idle => &mut lock.idle,
List::Notified => &mut lock.notified,
// An entry in one of the lists is in one of the lists.
List::Neither => unreachable!(),
};
unsafe {
// Safety: We just checked that the entry is in this particular
// list.
list.remove(ListEntry::as_raw(&self.entry)).unwrap();
}
}
// By setting `my_list` to `Neither`, we have taken ownership of the
// value. We return it to the caller.
//
// Safety: We have a mutable reference to the `IdleNotifiedSet` that
// owns this entry, so we can use its permission to access the value.
self.entry
.value
.with_mut(|ptr| unsafe { ManuallyDrop::take(&mut *ptr) })
}
/// Access the value in this entry together with a context for its waker.
pub(crate) fn with_value_and_context<F, U>(&mut self, func: F) -> U
where
F: FnOnce(&mut T, &mut Context<'_>) -> U,
T: 'static,
{
let waker = waker_ref(&self.entry);
let mut context = Context::from_waker(&waker);
// Safety: We have a mutable reference to the `IdleNotifiedSet` that
// owns this entry, so we can use its permission to access the value.
self.entry
.value
.with_mut(|ptr| unsafe { func(&mut *ptr, &mut context) })
}
}
impl<T> Drop for IdleNotifiedSet<T> {
fn drop(&mut self) {
// Clear both lists.
self.drain(drop);
#[cfg(debug_assertions)]
if !std::thread::panicking() {
let lock = self.lists.lock();
assert!(lock.idle.is_empty());
assert!(lock.notified.is_empty());
}
}
}
impl<T: 'static> Wake for ListEntry<T> {
fn wake_by_ref(me: &Arc<Self>) {
let mut lock = me.parent.lock();
// Safety: We are holding the lock and we will update the lists to
// maintain invariants.
let old_my_list = me.my_list.with_mut(|ptr| unsafe {
let old_my_list = *ptr;
if old_my_list == List::Idle {
*ptr = List::Notified;
}
old_my_list
});
if old_my_list == List::Idle {
// We move ourself to the notified list.
let me = unsafe {
// Safety: We just checked that we are in this particular list.
lock.idle.remove(ListEntry::as_raw(me)).unwrap()
};
lock.notified.push_front(me);
if let Some(waker) = lock.waker.take() {
drop(lock);
waker.wake();
}
}
}
fn wake(me: Arc<Self>) {
Self::wake_by_ref(&me);
}
}
/// # Safety
///
/// `ListEntry` is forced to be !Unpin.
unsafe impl<T> linked_list::Link for ListEntry<T> {
type Handle = Arc<ListEntry<T>>;
type Target = ListEntry<T>;
fn as_raw(handle: &Self::Handle) -> NonNull<ListEntry<T>> {
let ptr: *const ListEntry<T> = Arc::as_ptr(handle);
// Safety: We can't get a null pointer from `Arc::as_ptr`.
unsafe { NonNull::new_unchecked(ptr as *mut ListEntry<T>) }
}
unsafe fn from_raw(ptr: NonNull<ListEntry<T>>) -> Arc<ListEntry<T>> {
unsafe { Arc::from_raw(ptr.as_ptr()) }
}
unsafe fn pointers(
target: NonNull<ListEntry<T>>,
) -> NonNull<linked_list::Pointers<ListEntry<T>>> {
unsafe { ListEntry::addr_of_pointers(target) }
}
}
#[cfg(all(test, not(loom)))]
mod tests {
use crate::runtime::Builder;
use crate::task::JoinSet;
// A test that runs under miri.
//
// https://github.com/tokio-rs/tokio/pull/5693
#[test]
fn join_set_test() {
let rt = Builder::new_current_thread().build().unwrap();
let mut set = JoinSet::new();
set.spawn_on(futures::future::ready(()), rt.handle());
rt.block_on(set.join_next()).unwrap().unwrap();
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/try_lock.rs | tokio/src/util/try_lock.rs | use crate::loom::sync::atomic::AtomicBool;
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::Ordering::SeqCst;
pub(crate) struct TryLock<T> {
locked: AtomicBool,
data: UnsafeCell<T>,
}
pub(crate) struct LockGuard<'a, T> {
lock: &'a TryLock<T>,
_p: PhantomData<std::rc::Rc<()>>,
}
unsafe impl<T: Send> Send for TryLock<T> {}
unsafe impl<T: Send> Sync for TryLock<T> {}
unsafe impl<T: Sync> Sync for LockGuard<'_, T> {}
macro_rules! new {
($data:ident) => {
TryLock {
locked: AtomicBool::new(false),
data: UnsafeCell::new($data),
}
};
}
impl<T> TryLock<T> {
#[cfg(not(loom))]
/// Create a new `TryLock`
pub(crate) const fn new(data: T) -> TryLock<T> {
new!(data)
}
#[cfg(loom)]
/// Create a new `TryLock`
pub(crate) fn new(data: T) -> TryLock<T> {
new!(data)
}
/// Attempt to acquire lock
pub(crate) fn try_lock(&self) -> Option<LockGuard<'_, T>> {
if self
.locked
.compare_exchange(false, true, SeqCst, SeqCst)
.is_err()
{
return None;
}
Some(LockGuard {
lock: self,
_p: PhantomData,
})
}
}
impl<T> Deref for LockGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.data.get() }
}
}
impl<T> DerefMut for LockGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.lock.data.get() }
}
}
impl<T> Drop for LockGuard<'_, T> {
fn drop(&mut self) {
self.lock.locked.store(false, SeqCst);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/rand/rt_unstable.rs | tokio/src/util/rand/rt_unstable.rs | use super::RngSeed;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
impl RngSeed {
/// Generates a seed from the provided byte slice.
///
/// # Example
///
/// ```
/// # use tokio::runtime::RngSeed;
/// let seed = RngSeed::from_bytes(b"make me a seed");
/// ```
pub fn from_bytes(bytes: &[u8]) -> Self {
let mut hasher = DefaultHasher::default();
hasher.write(bytes);
Self::from_u64(hasher.finish())
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/util/rand/rt.rs | tokio/src/util/rand/rt.rs | use super::{FastRand, RngSeed};
use std::sync::Mutex;
/// A deterministic generator for seeds (and other generators).
///
/// Given the same initial seed, the generator will output the same sequence of seeds.
///
/// Since the seed generator will be kept in a runtime handle, we need to wrap `FastRand`
/// in a Mutex to make it thread safe. Different to the `FastRand` that we keep in a
/// thread local store, the expectation is that seed generation will not need to happen
/// very frequently, so the cost of the mutex should be minimal.
#[derive(Debug)]
pub(crate) struct RngSeedGenerator {
/// Internal state for the seed generator. We keep it in a Mutex so that we can safely
/// use it across multiple threads.
state: Mutex<FastRand>,
}
impl RngSeedGenerator {
/// Returns a new generator from the provided seed.
pub(crate) fn new(seed: RngSeed) -> Self {
Self {
state: Mutex::new(FastRand::from_seed(seed)),
}
}
/// Returns the next seed in the sequence.
pub(crate) fn next_seed(&self) -> RngSeed {
let mut rng = self
.state
.lock()
.expect("RNG seed generator is internally corrupt");
let s = rng.fastrand();
let r = rng.fastrand();
RngSeed::from_pair(s, r)
}
/// Directly creates a generator using the next seed.
pub(crate) fn next_generator(&self) -> Self {
RngSeedGenerator::new(self.next_seed())
}
}
impl FastRand {
/// Replaces the state of the random number generator with the provided seed, returning
/// the seed that represents the previous state of the random number generator.
///
/// The random number generator will become equivalent to one created with
/// the same seed.
pub(crate) fn replace_seed(&mut self, seed: RngSeed) -> RngSeed {
let old_seed = RngSeed::from_pair(self.one, self.two);
self.one = seed.s;
self.two = seed.r;
old_seed
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/process/kill.rs | tokio/src/process/kill.rs | use std::io;
/// An interface for killing a running process.
pub(crate) trait Kill {
/// Forcefully kills the process.
fn kill(&mut self) -> io::Result<()>;
}
impl<T: Kill> Kill for &mut T {
fn kill(&mut self) -> io::Result<()> {
(**self).kill()
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/process/windows.rs | tokio/src/process/windows.rs | //! Windows asynchronous process handling.
//!
//! Like with Unix we don't actually have a way of registering a process with an
//! IOCP object. As a result we similarly need another mechanism for getting a
//! signal when a process has exited. For now this is implemented with the
//! `RegisterWaitForSingleObject` function in the kernel32.dll.
//!
//! This strategy is the same that libuv takes and essentially just queues up a
//! wait for the process in a kernel32-specific thread pool. Once the object is
//! notified (e.g. the process exits) then we have a callback that basically
//! just completes a `Oneshot`.
//!
//! The `poll_exit` implementation will attempt to wait for the process in a
//! nonblocking fashion, but failing that it'll fire off a
//! `RegisterWaitForSingleObject` and then wait on the other end of the oneshot
//! from then on out.
use crate::io::{blocking::Blocking, AsyncRead, AsyncWrite, ReadBuf};
use crate::process::kill::Kill;
use crate::process::SpawnedChild;
use crate::sync::oneshot;
use std::fmt;
use std::fs::File as StdFile;
use std::future::Future;
use std::io;
use std::os::windows::prelude::{AsRawHandle, IntoRawHandle, OwnedHandle, RawHandle};
use std::pin::Pin;
use std::process::Stdio;
use std::process::{Child as StdChild, ExitStatus};
use std::ptr::null_mut;
use std::sync::Arc;
use std::task::{Context, Poll};
use windows_sys::{
Win32::Foundation::{DuplicateHandle, DUPLICATE_SAME_ACCESS, HANDLE, INVALID_HANDLE_VALUE},
Win32::System::Threading::{
GetCurrentProcess, RegisterWaitForSingleObject, UnregisterWaitEx, INFINITE,
WT_EXECUTEINWAITTHREAD, WT_EXECUTEONLYONCE,
},
};
#[must_use = "futures do nothing unless polled"]
pub(crate) struct Child {
child: StdChild,
waiting: Option<Waiting>,
}
impl fmt::Debug for Child {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Child")
.field("pid", &self.id())
.field("child", &self.child)
.field("waiting", &"..")
.finish()
}
}
struct Waiting {
rx: oneshot::Receiver<()>,
wait_object: HANDLE,
tx: *mut Option<oneshot::Sender<()>>,
}
unsafe impl Sync for Waiting {}
unsafe impl Send for Waiting {}
pub(crate) fn build_child(mut child: StdChild) -> io::Result<SpawnedChild> {
let stdin = child.stdin.take().map(stdio).transpose()?;
let stdout = child.stdout.take().map(stdio).transpose()?;
let stderr = child.stderr.take().map(stdio).transpose()?;
Ok(SpawnedChild {
child: Child {
child,
waiting: None,
},
stdin,
stdout,
stderr,
})
}
impl Child {
pub(crate) fn id(&self) -> u32 {
self.child.id()
}
pub(crate) fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
self.child.try_wait()
}
}
impl Kill for Child {
fn kill(&mut self) -> io::Result<()> {
self.child.kill()
}
}
impl Future for Child {
type Output = io::Result<ExitStatus>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let inner = Pin::get_mut(self);
loop {
if let Some(ref mut w) = inner.waiting {
match Pin::new(&mut w.rx).poll(cx) {
Poll::Ready(Ok(())) => {}
Poll::Ready(Err(_)) => panic!("should not be canceled"),
Poll::Pending => return Poll::Pending,
}
let status = inner.try_wait()?.expect("not ready yet");
return Poll::Ready(Ok(status));
}
if let Some(e) = inner.try_wait()? {
return Poll::Ready(Ok(e));
}
let (tx, rx) = oneshot::channel();
let ptr = Box::into_raw(Box::new(Some(tx)));
let mut wait_object = null_mut();
let rc = unsafe {
RegisterWaitForSingleObject(
&mut wait_object,
inner.child.as_raw_handle() as _,
Some(callback),
ptr as *mut _,
INFINITE,
WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE,
)
};
if rc == 0 {
let err = io::Error::last_os_error();
drop(unsafe { Box::from_raw(ptr) });
return Poll::Ready(Err(err));
}
inner.waiting = Some(Waiting {
rx,
wait_object,
tx: ptr,
});
}
}
}
impl AsRawHandle for Child {
fn as_raw_handle(&self) -> RawHandle {
self.child.as_raw_handle()
}
}
impl Drop for Waiting {
fn drop(&mut self) {
unsafe {
let rc = UnregisterWaitEx(self.wait_object, INVALID_HANDLE_VALUE);
if rc == 0 {
panic!("failed to unregister: {}", io::Error::last_os_error());
}
drop(Box::from_raw(self.tx));
}
}
}
unsafe extern "system" fn callback(ptr: *mut std::ffi::c_void, _timer_fired: bool) {
let complete = unsafe { &mut *(ptr as *mut Option<oneshot::Sender<()>>) };
let _ = complete.take().unwrap().send(());
}
#[derive(Debug)]
struct ArcFile(Arc<StdFile>);
impl io::Read for ArcFile {
fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
(&*self.0).read(bytes)
}
}
impl io::Write for ArcFile {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
(&*self.0).write(bytes)
}
fn flush(&mut self) -> io::Result<()> {
(&*self.0).flush()
}
}
#[derive(Debug)]
pub(crate) struct ChildStdio {
// Used for accessing the raw handle, even if the io version is busy
raw: Arc<StdFile>,
// For doing I/O operations asynchronously
io: Blocking<ArcFile>,
}
impl ChildStdio {
pub(super) fn into_owned_handle(self) -> io::Result<OwnedHandle> {
convert_to_file(self).map(OwnedHandle::from)
}
}
impl AsRawHandle for ChildStdio {
fn as_raw_handle(&self) -> RawHandle {
self.raw.as_raw_handle()
}
}
impl AsyncRead for ChildStdio {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_read(cx, buf)
}
}
impl AsyncWrite for ChildStdio {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_shutdown(cx)
}
}
pub(super) fn stdio<T>(io: T) -> io::Result<ChildStdio>
where
T: IntoRawHandle,
{
use std::os::windows::prelude::FromRawHandle;
let raw = Arc::new(unsafe { StdFile::from_raw_handle(io.into_raw_handle()) });
let io = ArcFile(raw.clone());
// SAFETY: the `Read` implementation of `io` does not
// read from the buffer it is borrowing and correctly
// reports the length of the data written into the buffer.
let io = unsafe { Blocking::new(io) };
Ok(ChildStdio { raw, io })
}
fn convert_to_file(child_stdio: ChildStdio) -> io::Result<StdFile> {
let ChildStdio { raw, io } = child_stdio;
drop(io); // Try to drop the Arc count here
Arc::try_unwrap(raw).or_else(|raw| duplicate_handle(&*raw))
}
pub(crate) fn convert_to_stdio(child_stdio: ChildStdio) -> io::Result<Stdio> {
convert_to_file(child_stdio).map(Stdio::from)
}
fn duplicate_handle<T: AsRawHandle>(io: &T) -> io::Result<StdFile> {
use std::os::windows::prelude::FromRawHandle;
unsafe {
let mut dup_handle = INVALID_HANDLE_VALUE;
let cur_proc = GetCurrentProcess();
let status = DuplicateHandle(
cur_proc,
io.as_raw_handle() as _,
cur_proc,
&mut dup_handle,
0,
0,
DUPLICATE_SAME_ACCESS,
);
if status == 0 {
return Err(io::Error::last_os_error());
}
Ok(StdFile::from_raw_handle(dup_handle as _))
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/process/mod.rs | tokio/src/process/mod.rs | //! An implementation of asynchronous process management for Tokio.
//!
//! This module provides a [`Command`] struct that imitates the interface of the
//! [`std::process::Command`] type in the standard library, but provides asynchronous versions of
//! functions that create processes. These functions (`spawn`, `status`, `output` and their
//! variants) return "future aware" types that interoperate with Tokio. The asynchronous process
//! support is provided through signal handling on Unix and system APIs on Windows.
//!
//! [`std::process::Command`]: std::process::Command
//!
//! # Examples
//!
//! Here's an example program which will spawn `echo hello world` and then wait
//! for it complete.
//!
//! ```no_run
//! use tokio::process::Command;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // The usage is similar as with the standard library's `Command` type
//! let mut child = Command::new("echo")
//! .arg("hello")
//! .arg("world")
//! .spawn()
//! .expect("failed to spawn");
//!
//! // Await until the command completes
//! let status = child.wait().await?;
//! println!("the command exited with: {}", status);
//! Ok(())
//! }
//! ```
//!
//! Next, let's take a look at an example where we not only spawn `echo hello
//! world` but we also capture its output.
//!
//! ```no_run
//! use tokio::process::Command;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Like above, but use `output` which returns a future instead of
//! // immediately returning the `Child`.
//! let output = Command::new("echo").arg("hello").arg("world")
//! .output();
//!
//! let output = output.await?;
//!
//! assert!(output.status.success());
//! assert_eq!(output.stdout, b"hello world\n");
//! Ok(())
//! }
//! ```
//!
//! We can also read input line by line.
//!
//! ```no_run
//! use tokio::io::{BufReader, AsyncBufReadExt};
//! use tokio::process::Command;
//!
//! use std::process::Stdio;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut cmd = Command::new("cat");
//!
//! // Specify that we want the command's standard output piped back to us.
//! // By default, standard input/output/error will be inherited from the
//! // current process (for example, this means that standard input will
//! // come from the keyboard and standard output/error will go directly to
//! // the terminal if this process is invoked from the command line).
//! cmd.stdout(Stdio::piped());
//!
//! let mut child = cmd.spawn()
//! .expect("failed to spawn command");
//!
//! let stdout = child.stdout.take()
//! .expect("child did not have a handle to stdout");
//!
//! let mut reader = BufReader::new(stdout).lines();
//!
//! // Ensure the child process is spawned in the runtime so it can
//! // make progress on its own while we await for any output.
//! tokio::spawn(async move {
//! let status = child.wait().await
//! .expect("child process encountered an error");
//!
//! println!("child status was: {}", status);
//! });
//!
//! while let Some(line) = reader.next_line().await? {
//! println!("Line: {}", line);
//! }
//!
//! Ok(())
//! }
//! ```
//!
//! Here is another example using `sort` writing into the child process
//! standard input, capturing the output of the sorted text.
//!
//! ```no_run
//! use tokio::io::AsyncWriteExt;
//! use tokio::process::Command;
//!
//! use std::process::Stdio;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut cmd = Command::new("sort");
//!
//! // Specifying that we want pipe both the output and the input.
//! // Similarly to capturing the output, by configuring the pipe
//! // to stdin it can now be used as an asynchronous writer.
//! cmd.stdout(Stdio::piped());
//! cmd.stdin(Stdio::piped());
//!
//! let mut child = cmd.spawn().expect("failed to spawn command");
//!
//! // These are the animals we want to sort
//! let animals: &[&str] = &["dog", "bird", "frog", "cat", "fish"];
//!
//! let mut stdin = child
//! .stdin
//! .take()
//! .expect("child did not have a handle to stdin");
//!
//! // Write our animals to the child process
//! // Note that the behavior of `sort` is to buffer _all input_ before writing any output.
//! // In the general sense, it is recommended to write to the child in a separate task as
//! // awaiting its exit (or output) to avoid deadlocks (for example, the child tries to write
//! // some output but gets stuck waiting on the parent to read from it, meanwhile the parent
//! // is stuck waiting to write its input completely before reading the output).
//! stdin
//! .write(animals.join("\n").as_bytes())
//! .await
//! .expect("could not write to stdin");
//!
//! // We drop the handle here which signals EOF to the child process.
//! // This tells the child process that it there is no more data on the pipe.
//! drop(stdin);
//!
//! let op = child.wait_with_output().await?;
//!
//! // Results should come back in sorted order
//! assert_eq!(op.stdout, "bird\ncat\ndog\nfish\nfrog\n".as_bytes());
//!
//! Ok(())
//! }
//! ```
//!
//! With some coordination, we can also pipe the output of one command into
//! another.
//!
//! ```no_run
//! use tokio::join;
//! use tokio::process::Command;
//! use std::process::Stdio;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut echo = Command::new("echo")
//! .arg("hello world!")
//! .stdout(Stdio::piped())
//! .spawn()
//! .expect("failed to spawn echo");
//!
//! let tr_stdin: Stdio = echo
//! .stdout
//! .take()
//! .unwrap()
//! .try_into()
//! .expect("failed to convert to Stdio");
//!
//! let tr = Command::new("tr")
//! .arg("a-z")
//! .arg("A-Z")
//! .stdin(tr_stdin)
//! .stdout(Stdio::piped())
//! .spawn()
//! .expect("failed to spawn tr");
//!
//! let (echo_result, tr_output) = join!(echo.wait(), tr.wait_with_output());
//!
//! assert!(echo_result.unwrap().success());
//!
//! let tr_output = tr_output.expect("failed to await tr");
//! assert!(tr_output.status.success());
//!
//! assert_eq!(tr_output.stdout, b"HELLO WORLD!\n");
//!
//! Ok(())
//! }
//! ```
//!
//! # Caveats
//!
//! ## Dropping/Cancellation
//!
//! Similar to the behavior to the standard library, and unlike the futures
//! paradigm of dropping-implies-cancellation, a spawned process will, by
//! default, continue to execute even after the `Child` handle has been dropped.
//!
//! The [`Command::kill_on_drop`] method can be used to modify this behavior
//! and kill the child process if the `Child` wrapper is dropped before it
//! has exited.
//!
//! ## Unix Processes
//!
//! On Unix platforms processes must be "reaped" by their parent process after
//! they have exited in order to release all OS resources. A child process which
//! has exited, but has not yet been reaped by its parent is considered a "zombie"
//! process. Such processes continue to count against limits imposed by the system,
//! and having too many zombie processes present can prevent additional processes
//! from being spawned.
//!
//! The tokio runtime will, on a best-effort basis, attempt to reap and clean up
//! any process which it has spawned. No additional guarantees are made with regard to
//! how quickly or how often this procedure will take place.
//!
//! It is recommended to avoid dropping a [`Child`] process handle before it has been
//! fully `await`ed if stricter cleanup guarantees are required.
//!
//! [`Command`]: crate::process::Command
//! [`Command::kill_on_drop`]: crate::process::Command::kill_on_drop
//! [`Child`]: crate::process::Child
#[path = "unix/mod.rs"]
#[cfg(unix)]
mod imp;
#[cfg(unix)]
pub(crate) mod unix {
pub(crate) use super::imp::*;
}
#[path = "windows.rs"]
#[cfg(windows)]
mod imp;
mod kill;
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use crate::process::kill::Kill;
use std::ffi::OsStr;
use std::future::Future;
use std::io;
use std::path::Path;
use std::pin::Pin;
use std::process::{Child as StdChild, Command as StdCommand, ExitStatus, Output, Stdio};
use std::task::{ready, Context, Poll};
#[cfg(unix)]
use std::os::unix::process::CommandExt;
#[cfg(windows)]
use std::os::windows::process::CommandExt;
cfg_windows! {
use crate::os::windows::io::{AsRawHandle, RawHandle};
}
/// This structure mimics the API of [`std::process::Command`] found in the standard library, but
/// replaces functions that create a process with an asynchronous variant. The main provided
/// asynchronous functions are [spawn](Command::spawn), [status](Command::status), and
/// [output](Command::output).
///
/// `Command` uses asynchronous versions of some `std` types (for example [`Child`]).
///
/// [`std::process::Command`]: std::process::Command
/// [`Child`]: struct@Child
#[derive(Debug)]
pub struct Command {
std: StdCommand,
kill_on_drop: bool,
}
pub(crate) struct SpawnedChild {
child: imp::Child,
stdin: Option<imp::ChildStdio>,
stdout: Option<imp::ChildStdio>,
stderr: Option<imp::ChildStdio>,
}
impl Command {
/// Constructs a new `Command` for launching the program at
/// path `program`, with the following default configuration:
///
/// * No arguments to the program
/// * Inherit the current process's environment
/// * Inherit the current process's working directory
/// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output`
///
/// Builder methods are provided to change these defaults and
/// otherwise configure the process.
///
/// If `program` is not an absolute path, the `PATH` will be searched in
/// an OS-defined way.
///
/// The search path to be used may be controlled by setting the
/// `PATH` environment variable on the Command,
/// but this has some implementation limitations on Windows
/// (see issue [rust-lang/rust#37519]).
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// use tokio::process::Command;
/// let mut command = Command::new("sh");
/// # let _ = command.output(); // assert borrow checker
/// ```
///
/// [rust-lang/rust#37519]: https://github.com/rust-lang/rust/issues/37519
pub fn new<S: AsRef<OsStr>>(program: S) -> Command {
Self::from(StdCommand::new(program))
}
/// Cheaply convert to a `&std::process::Command` for places where the type from the standard
/// library is expected.
pub fn as_std(&self) -> &StdCommand {
&self.std
}
/// Cheaply convert to a `&mut std::process::Command` for places where the type from the
/// standard library is expected.
pub fn as_std_mut(&mut self) -> &mut StdCommand {
&mut self.std
}
/// Cheaply convert into a `std::process::Command`.
///
/// Note that Tokio specific options will be lost. Currently, this only applies to [`kill_on_drop`].
///
/// [`kill_on_drop`]: Command::kill_on_drop
pub fn into_std(self) -> StdCommand {
self.std
}
/// Adds an argument to pass to the program.
///
/// Only one argument can be passed per use. So instead of:
///
/// ```no_run
/// let mut command = tokio::process::Command::new("sh");
/// command.arg("-C /path/to/repo");
///
/// # let _ = command.output(); // assert borrow checker
/// ```
///
/// usage would be:
///
/// ```no_run
/// let mut command = tokio::process::Command::new("sh");
/// command.arg("-C");
/// command.arg("/path/to/repo");
///
/// # let _ = command.output(); // assert borrow checker
/// ```
///
/// To pass multiple arguments see [`args`].
///
/// [`args`]: method@Self::args
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
///
/// let output = Command::new("ls")
/// .arg("-l")
/// .arg("-a")
/// .output().await.unwrap();
/// # }
///
/// ```
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
self.std.arg(arg);
self
}
/// Adds multiple arguments to pass to the program.
///
/// To pass a single argument see [`arg`].
///
/// [`arg`]: method@Self::arg
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
///
/// let output = Command::new("ls")
/// .args(&["-l", "-a"])
/// .output().await.unwrap();
/// # }
/// ```
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
self.std.args(args);
self
}
cfg_windows! {
/// Append literal text to the command line without any quoting or escaping.
///
/// This is useful for passing arguments to `cmd.exe /c`, which doesn't follow
/// `CommandLineToArgvW` escaping rules.
pub fn raw_arg<S: AsRef<OsStr>>(&mut self, text_to_append_as_is: S) -> &mut Command {
self.std.raw_arg(text_to_append_as_is);
self
}
}
/// Inserts or updates an environment variable mapping.
///
/// Note that environment variable names are case-insensitive (but case-preserving) on Windows,
/// and case-sensitive on all other platforms.
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
///
/// let output = Command::new("ls")
/// .env("PATH", "/bin")
/// .output().await.unwrap();
/// # }
/// ```
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
self.std.env(key, val);
self
}
/// Adds or updates multiple environment variable mappings.
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
/// use std::process::{Stdio};
/// use std::env;
/// use std::collections::HashMap;
///
/// let filtered_env : HashMap<String, String> =
/// env::vars().filter(|&(ref k, _)|
/// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH"
/// ).collect();
///
/// let output = Command::new("printenv")
/// .stdin(Stdio::null())
/// .stdout(Stdio::inherit())
/// .env_clear()
/// .envs(&filtered_env)
/// .output().await.unwrap();
/// # }
/// ```
pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Command
where
I: IntoIterator<Item = (K, V)>,
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
self.std.envs(vars);
self
}
/// Removes an environment variable mapping.
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
///
/// let output = Command::new("ls")
/// .env_remove("PATH")
/// .output().await.unwrap();
/// # }
/// ```
pub fn env_remove<K: AsRef<OsStr>>(&mut self, key: K) -> &mut Command {
self.std.env_remove(key);
self
}
/// Clears the entire environment map for the child process.
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
///
/// let output = Command::new("ls")
/// .env_clear()
/// .output().await.unwrap();
/// # }
/// ```
pub fn env_clear(&mut self) -> &mut Command {
self.std.env_clear();
self
}
/// Sets the working directory for the child process.
///
/// # Platform-specific behavior
///
/// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous
/// whether it should be interpreted relative to the parent's working
/// directory or relative to `current_dir`. The behavior in this case is
/// platform specific and unstable, and it's recommended to use
/// [`canonicalize`] to get an absolute program path instead.
///
/// [`canonicalize`]: crate::fs::canonicalize()
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
///
/// let output = Command::new("ls")
/// .current_dir("/bin")
/// .output().await.unwrap();
/// # }
/// ```
pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command {
self.std.current_dir(dir);
self
}
/// Sets configuration for the child process's standard input (stdin) handle.
///
/// Defaults to [`inherit`].
///
/// [`inherit`]: std::process::Stdio::inherit
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use std::process::{Stdio};
/// use tokio::process::Command;
///
/// let output = Command::new("ls")
/// .stdin(Stdio::null())
/// .output().await.unwrap();
/// # }
/// ```
pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
self.std.stdin(cfg);
self
}
/// Sets configuration for the child process's standard output (stdout) handle.
///
/// Defaults to [`inherit`] when used with `spawn` or `status`, and
/// defaults to [`piped`] when used with `output`.
///
/// [`inherit`]: std::process::Stdio::inherit
/// [`piped`]: std::process::Stdio::piped
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
/// use std::process::Stdio;
///
/// let output = Command::new("ls")
/// .stdout(Stdio::null())
/// .output().await.unwrap();
/// # }
/// ```
pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
self.std.stdout(cfg);
self
}
/// Sets configuration for the child process's standard error (stderr) handle.
///
/// Defaults to [`inherit`] when used with `spawn` or `status`, and
/// defaults to [`piped`] when used with `output`.
///
/// [`inherit`]: std::process::Stdio::inherit
/// [`piped`]: std::process::Stdio::piped
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
/// use std::process::{Stdio};
///
/// let output = Command::new("ls")
/// .stderr(Stdio::null())
/// .output().await.unwrap();
/// # }
/// ```
pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
self.std.stderr(cfg);
self
}
/// Controls whether a `kill` operation should be invoked on a spawned child
/// process when its corresponding `Child` handle is dropped.
///
/// By default, this value is assumed to be `false`, meaning the next spawned
/// process will not be killed on drop, similar to the behavior of the standard
/// library.
///
/// # Caveats
///
/// On Unix platforms processes must be "reaped" by their parent process after
/// they have exited in order to release all OS resources. A child process which
/// has exited, but has not yet been reaped by its parent is considered a "zombie"
/// process. Such processes continue to count against limits imposed by the system,
/// and having too many zombie processes present can prevent additional processes
/// from being spawned.
///
/// Although issuing a `kill` signal to the child process is a synchronous
/// operation, the resulting zombie process cannot be `.await`ed inside of the
/// destructor to avoid blocking other tasks. The tokio runtime will, on a
/// best-effort basis, attempt to reap and clean up such processes in the
/// background, but no additional guarantees are made with regard to
/// how quickly or how often this procedure will take place.
///
/// If stronger guarantees are required, it is recommended to avoid dropping
/// a [`Child`] handle where possible, and instead utilize `child.wait().await`
/// or `child.kill().await` where possible.
pub fn kill_on_drop(&mut self, kill_on_drop: bool) -> &mut Command {
self.kill_on_drop = kill_on_drop;
self
}
cfg_windows! {
/// Sets the [process creation flags][1] to be passed to `CreateProcess`.
///
/// These will always be ORed with `CREATE_UNICODE_ENVIRONMENT`.
///
/// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx
pub fn creation_flags(&mut self, flags: u32) -> &mut Command {
self.std.creation_flags(flags);
self
}
}
/// Sets the child process's user ID. This translates to a
/// `setuid` call in the child process. Failure in the `setuid`
/// call will cause the spawn to fail.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub fn uid(&mut self, id: u32) -> &mut Command {
#[cfg(target_os = "nto")]
let id = id as i32;
self.std.uid(id);
self
}
/// Similar to `uid` but sets the group ID of the child process. This has
/// the same semantics as the `uid` field.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub fn gid(&mut self, id: u32) -> &mut Command {
#[cfg(target_os = "nto")]
let id = id as i32;
self.std.gid(id);
self
}
/// Sets executable argument.
///
/// Set the first process argument, `argv[0]`, to something other than the
/// default executable path.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub fn arg0<S>(&mut self, arg: S) -> &mut Command
where
S: AsRef<OsStr>,
{
self.std.arg0(arg);
self
}
/// Schedules a closure to be run just before the `exec` function is
/// invoked.
///
/// The closure is allowed to return an I/O error whose OS error code will
/// be communicated back to the parent and returned as an error from when
/// the spawn was requested.
///
/// Multiple closures can be registered and they will be called in order of
/// their registration. If a closure returns `Err` then no further closures
/// will be called and the spawn operation will immediately return with a
/// failure.
///
/// # Safety
///
/// This closure will be run in the context of the child process after a
/// `fork`. This primarily means that any modifications made to memory on
/// behalf of this closure will **not** be visible to the parent process.
/// This is often a very constrained environment where normal operations
/// like `malloc` or acquiring a mutex are not guaranteed to work (due to
/// other threads perhaps still running when the `fork` was run).
///
/// This also means that all resources such as file descriptors and
/// memory-mapped regions got duplicated. It is your responsibility to make
/// sure that the closure does not violate library invariants by making
/// invalid use of these duplicates.
///
/// When this closure is run, aspects such as the stdio file descriptors and
/// working directory have successfully been changed, so output to these
/// locations may not appear where intended.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub unsafe fn pre_exec<F>(&mut self, f: F) -> &mut Command
where
F: FnMut() -> io::Result<()> + Send + Sync + 'static,
{
unsafe { self.std.pre_exec(f) };
self
}
/// Sets the process group ID (PGID) of the child process. Equivalent to a
/// `setpgid` call in the child process, but may be more efficient.
///
/// Process groups determine which processes receive signals.
///
/// # Examples
///
/// Pressing Ctrl-C in a terminal will send `SIGINT` to all processes
/// in the current foreground process group. By spawning the `sleep`
/// subprocess in a new process group, it will not receive `SIGINT`
/// from the terminal.
///
/// The parent process could install a [signal handler] and manage the
/// process on its own terms.
///
/// A process group ID of 0 will use the process ID as the PGID.
///
/// ```no_run
/// # async fn test() { // allow using await
/// use tokio::process::Command;
///
/// let output = Command::new("sleep")
/// .arg("10")
/// .process_group(0)
/// .output()
/// .await
/// .unwrap();
/// # }
/// ```
///
/// [signal handler]: crate::signal
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub fn process_group(&mut self, pgroup: i32) -> &mut Command {
self.std.process_group(pgroup);
self
}
/// Executes the command as a child process, returning a handle to it.
///
/// By default, stdin, stdout and stderr are inherited from the parent.
///
/// This method will spawn the child process synchronously and return a
/// handle to a future-aware child process. The `Child` returned implements
/// `Future` itself to acquire the `ExitStatus` of the child, and otherwise
/// the `Child` has methods to acquire handles to the stdin, stdout, and
/// stderr streams.
///
/// All I/O this child does will be associated with the current default
/// event loop.
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// use tokio::process::Command;
///
/// async fn run_ls() -> std::process::ExitStatus {
/// Command::new("ls")
/// .spawn()
/// .expect("ls command failed to start")
/// .wait()
/// .await
/// .expect("ls command failed to run")
/// }
/// ```
///
/// # Caveats
///
/// ## Dropping/Cancellation
///
/// Similar to the behavior to the standard library, and unlike the futures
/// paradigm of dropping-implies-cancellation, a spawned process will, by
/// default, continue to execute even after the `Child` handle has been dropped.
///
/// The [`Command::kill_on_drop`] method can be used to modify this behavior
/// and kill the child process if the `Child` wrapper is dropped before it
/// has exited.
///
/// ## Unix Processes
///
/// On Unix platforms processes must be "reaped" by their parent process after
/// they have exited in order to release all OS resources. A child process which
/// has exited, but has not yet been reaped by its parent is considered a "zombie"
/// process. Such processes continue to count against limits imposed by the system,
/// and having too many zombie processes present can prevent additional processes
/// from being spawned.
///
/// The tokio runtime will, on a best-effort basis, attempt to reap and clean up
/// any process which it has spawned. No additional guarantees are made with regard to
/// how quickly or how often this procedure will take place.
///
/// It is recommended to avoid dropping a [`Child`] process handle before it has been
/// fully `await`ed if stricter cleanup guarantees are required.
///
/// [`Command`]: crate::process::Command
/// [`Command::kill_on_drop`]: crate::process::Command::kill_on_drop
/// [`Child`]: crate::process::Child
///
/// # Errors
///
/// On Unix platforms this method will fail with `std::io::ErrorKind::WouldBlock`
/// if the system process limit is reached (which includes other applications
/// running on the system).
#[inline]
pub fn spawn(&mut self) -> io::Result<Child> {
// On two lines to circumvent a mutable borrow check failure.
let child = self.std.spawn()?;
self.build_child(child)
}
/// Executes the command as a child process with a custom spawning function,
/// returning a handle to it.
///
/// This is identical to [`Self::spawn`] in every aspect except the spawn:
/// here, it is customizable through the `with` parameter instead of
/// defaulting to the usual spawn. In fact, [`Self::spawn`] is just
/// [`Self::spawn_with`] with [`StdCommand::spawn`].
///
/// This is useful mostly under Windows for now, since the platform exposes
/// special APIs to configure child processes when spawning them with various
/// attributes that customize the exact behavior of the spawn operation.
///
/// # Examples
///
/// Basic usage:
///
/// ```no_run
/// # async fn test() { // allow using await
/// use std::process::Stdio;
///
/// let output = tokio::process::Command::new("ls")
/// .stdin(Stdio::null())
/// .stdout(Stdio::piped())
/// .stderr(Stdio::piped())
/// .spawn_with(std::process::Command::spawn)
/// .unwrap()
/// .wait_with_output()
/// .await
/// .unwrap();
/// # }
/// ```
///
/// Actually customizing the spawn under Windows:
///
/// ```ignore
/// #![feature(windows_process_extensions_raw_attribute)]
/// # #[cfg(windows)] // Windows-only nightly APIs are used here.
/// # async fn test() { // Allow using await.
/// use std::os::windows::process::{CommandExt, ProcThreadAttributeList};
/// use std::process::Stdio;
/// use tokio::process::Command;
///
/// let parent = Command::new("cmd").spawn().unwrap();
/// let parent_process_handle = parent.raw_handle();
///
/// const PROC_THREAD_ATTRIBUTE_PARENT_PROCESS: usize = 0x00020000;
/// let attribute_list = ProcThreadAttributeList::build()
/// .attribute(PROC_THREAD_ATTRIBUTE_PARENT_PROCESS, &parent_process_handle)
/// .finish()
/// .unwrap();
///
/// let _output = Command::new("ls")
/// .stdin(Stdio::null())
/// .stdout(Stdio::piped())
/// .stderr(Stdio::piped())
/// .spawn_with(|cmd| cmd.spawn_with_attributes(&attribute_list))
/// .unwrap()
/// .wait_with_output()
/// .await
/// .unwrap();
/// # }
/// ```
#[cfg(tokio_unstable)]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
#[inline]
pub fn spawn_with(
&mut self,
with: impl FnOnce(&mut StdCommand) -> io::Result<StdChild>,
) -> io::Result<Child> {
// On two lines to circumvent a mutable borrow check failure.
let child = with(&mut self.std)?;
self.build_child(child)
}
/// Small indirection for the spawn implementations.
///
/// This is introduced for [`Self::spawn`] and [`Self::spawn_with`] to use:
/// [`Self::spawn`] cannot depend directly on [`Self::spawn_with`] since
/// it is behind `tokio_unstable`. It also serves as a way to reduce
/// monomorphization bloat by taking in an already-spawned child process
/// instead of a command and custom spawn function.
fn build_child(&self, child: StdChild) -> io::Result<Child> {
let spawned_child = imp::build_child(child)?;
Ok(Child {
child: FusedChild::Child(ChildDropGuard {
inner: spawned_child.child,
kill_on_drop: self.kill_on_drop,
}),
stdin: spawned_child.stdin.map(|inner| ChildStdin { inner }),
stdout: spawned_child.stdout.map(|inner| ChildStdout { inner }),
stderr: spawned_child.stderr.map(|inner| ChildStderr { inner }),
})
}
/// Executes the command as a child process, waiting for it to finish and
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/process/unix/reap.rs | tokio/src/process/unix/reap.rs | use crate::process::imp::orphan::{OrphanQueue, Wait};
use crate::process::kill::Kill;
use crate::signal::unix::InternalStream;
use std::future::Future;
use std::io;
use std::ops::Deref;
use std::pin::Pin;
use std::process::ExitStatus;
use std::task::Context;
use std::task::Poll;
/// Orchestrates between registering interest for receiving signals when a
/// child process has exited, and attempting to poll for process completion.
#[derive(Debug)]
pub(crate) struct Reaper<W, Q, S>
where
W: Wait,
Q: OrphanQueue<W>,
{
inner: Option<W>,
orphan_queue: Q,
signal: S,
}
impl<W, Q, S> Deref for Reaper<W, Q, S>
where
W: Wait,
Q: OrphanQueue<W>,
{
type Target = W;
fn deref(&self) -> &Self::Target {
self.inner()
}
}
impl<W, Q, S> Reaper<W, Q, S>
where
W: Wait,
Q: OrphanQueue<W>,
{
pub(crate) fn new(inner: W, orphan_queue: Q, signal: S) -> Self {
Self {
inner: Some(inner),
orphan_queue,
signal,
}
}
fn inner(&self) -> &W {
self.inner.as_ref().expect("inner has gone away")
}
pub(crate) fn inner_mut(&mut self) -> &mut W {
self.inner.as_mut().expect("inner has gone away")
}
}
impl<W, Q, S> Future for Reaper<W, Q, S>
where
W: Wait + Unpin,
Q: OrphanQueue<W> + Unpin,
S: InternalStream + Unpin,
{
type Output = io::Result<ExitStatus>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
// If the child hasn't exited yet, then it's our responsibility to
// ensure the current task gets notified when it might be able to
// make progress. We can use the delivery of a SIGCHLD signal as a
// sign that we can potentially make progress.
//
// However, we will register for a notification on the next signal
// BEFORE we poll the child. Otherwise it is possible that the child
// can exit and the signal can arrive after we last polled the child,
// but before we've registered for a notification on the next signal
// (this can cause a deadlock if there are no more spawned children
// which can generate a different signal for us). A side effect of
// pre-registering for signal notifications is that when the child
// exits, we will have already registered for an additional
// notification we don't need to consume. If another signal arrives,
// this future's task will be notified/woken up again. Since the
// futures model allows for spurious wake ups this extra wakeup
// should not cause significant issues with parent futures.
let registered_interest = self.signal.poll_recv(cx).is_pending();
if let Some(status) = self.inner_mut().try_wait()? {
return Poll::Ready(Ok(status));
}
// If our attempt to poll for the next signal was not ready, then
// we've arranged for our task to get notified and we can bail out.
if registered_interest {
return Poll::Pending;
} else {
// Otherwise, if the signal stream delivered a signal to us, we
// won't get notified at the next signal, so we'll loop and try
// again.
continue;
}
}
}
}
impl<W, Q, S> Kill for Reaper<W, Q, S>
where
W: Kill + Wait,
Q: OrphanQueue<W>,
{
fn kill(&mut self) -> io::Result<()> {
self.inner_mut().kill()
}
}
impl<W, Q, S> Drop for Reaper<W, Q, S>
where
W: Wait,
Q: OrphanQueue<W>,
{
fn drop(&mut self) {
if let Ok(Some(_)) = self.inner_mut().try_wait() {
return;
}
let orphan = self.inner.take().unwrap();
self.orphan_queue.push_orphan(orphan);
}
}
#[cfg(all(test, not(loom)))]
mod test {
use super::*;
use crate::process::unix::orphan::test::MockQueue;
use futures::future::FutureExt;
use std::os::unix::process::ExitStatusExt;
use std::process::ExitStatus;
use std::task::Context;
use std::task::Poll;
#[derive(Debug)]
struct MockWait {
total_kills: usize,
total_waits: usize,
num_wait_until_status: usize,
status: ExitStatus,
}
impl MockWait {
fn new(status: ExitStatus, num_wait_until_status: usize) -> Self {
Self {
total_kills: 0,
total_waits: 0,
num_wait_until_status,
status,
}
}
}
impl Wait for MockWait {
fn id(&self) -> u32 {
0
}
fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
let ret = if self.num_wait_until_status == self.total_waits {
Some(self.status)
} else {
None
};
self.total_waits += 1;
Ok(ret)
}
}
impl Kill for MockWait {
fn kill(&mut self) -> io::Result<()> {
self.total_kills += 1;
Ok(())
}
}
struct MockStream {
total_polls: usize,
values: Vec<Option<()>>,
}
impl MockStream {
fn new(values: Vec<Option<()>>) -> Self {
Self {
total_polls: 0,
values,
}
}
}
impl InternalStream for MockStream {
fn poll_recv(&mut self, _cx: &mut Context<'_>) -> Poll<Option<()>> {
self.total_polls += 1;
match self.values.remove(0) {
Some(()) => Poll::Ready(Some(())),
None => Poll::Pending,
}
}
}
#[test]
fn reaper() {
let exit = ExitStatus::from_raw(0);
let mock = MockWait::new(exit, 3);
let mut grim = Reaper::new(
mock,
MockQueue::new(),
MockStream::new(vec![None, Some(()), None, None, None]),
);
let waker = futures::task::noop_waker();
let mut context = Context::from_waker(&waker);
// Not yet exited, interest registered
assert!(grim.poll_unpin(&mut context).is_pending());
assert_eq!(1, grim.signal.total_polls);
assert_eq!(1, grim.total_waits);
assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
// Not yet exited, couldn't register interest the first time
// but managed to register interest the second time around
assert!(grim.poll_unpin(&mut context).is_pending());
assert_eq!(3, grim.signal.total_polls);
assert_eq!(3, grim.total_waits);
assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
// Exited
if let Poll::Ready(r) = grim.poll_unpin(&mut context) {
assert!(r.is_ok());
let exit_code = r.unwrap();
assert_eq!(exit_code, exit);
} else {
unreachable!();
}
assert_eq!(4, grim.signal.total_polls);
assert_eq!(4, grim.total_waits);
assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
}
#[test]
fn kill() {
let exit = ExitStatus::from_raw(0);
let mut grim = Reaper::new(
MockWait::new(exit, 0),
MockQueue::new(),
MockStream::new(vec![None]),
);
grim.kill().unwrap();
assert_eq!(1, grim.total_kills);
assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
}
#[test]
fn drop_reaps_if_possible() {
let exit = ExitStatus::from_raw(0);
let mut mock = MockWait::new(exit, 0);
{
let queue = MockQueue::new();
let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![]));
drop(grim);
assert!(queue.all_enqueued.borrow().is_empty());
}
assert_eq!(1, mock.total_waits);
assert_eq!(0, mock.total_kills);
}
#[test]
fn drop_enqueues_orphan_if_wait_fails() {
let exit = ExitStatus::from_raw(0);
let mut mock = MockWait::new(exit, 2);
{
let queue = MockQueue::<&mut MockWait>::new();
let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![]));
drop(grim);
assert_eq!(1, queue.all_enqueued.borrow().len());
}
assert_eq!(1, mock.total_waits);
assert_eq!(0, mock.total_kills);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/process/unix/orphan.rs | tokio/src/process/unix/orphan.rs | use crate::loom::sync::{Mutex, MutexGuard};
use crate::runtime::signal::Handle as SignalHandle;
use crate::signal::unix::{signal_with_handle, SignalKind};
use crate::sync::watch;
use std::io;
use std::process::ExitStatus;
/// An interface for waiting on a process to exit.
pub(crate) trait Wait {
/// Get the identifier for this process or diagnostics.
#[allow(dead_code)]
fn id(&self) -> u32;
/// Try waiting for a process to exit in a non-blocking manner.
fn try_wait(&mut self) -> io::Result<Option<ExitStatus>>;
}
impl<T: Wait> Wait for &mut T {
fn id(&self) -> u32 {
(**self).id()
}
fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
(**self).try_wait()
}
}
/// An interface for queueing up an orphaned process so that it can be reaped.
pub(crate) trait OrphanQueue<T> {
/// Adds an orphan to the queue.
fn push_orphan(&self, orphan: T);
}
impl<T, O: OrphanQueue<T>> OrphanQueue<T> for &O {
fn push_orphan(&self, orphan: T) {
(**self).push_orphan(orphan);
}
}
/// An implementation of `OrphanQueue`.
#[derive(Debug)]
pub(crate) struct OrphanQueueImpl<T> {
sigchild: Mutex<Option<watch::Receiver<()>>>,
queue: Mutex<Vec<T>>,
}
impl<T> OrphanQueueImpl<T> {
cfg_not_has_const_mutex_new! {
pub(crate) fn new() -> Self {
Self {
sigchild: Mutex::new(None),
queue: Mutex::new(Vec::new()),
}
}
}
cfg_has_const_mutex_new! {
pub(crate) const fn new() -> Self {
Self {
sigchild: Mutex::const_new(None),
queue: Mutex::const_new(Vec::new()),
}
}
}
#[cfg(test)]
fn len(&self) -> usize {
self.queue.lock().len()
}
pub(crate) fn push_orphan(&self, orphan: T)
where
T: Wait,
{
self.queue.lock().push(orphan);
}
/// Attempts to reap every process in the queue, ignoring any errors and
/// enqueueing any orphans which have not yet exited.
pub(crate) fn reap_orphans(&self, handle: &SignalHandle)
where
T: Wait,
{
// If someone else is holding the lock, they will be responsible for draining
// the queue as necessary, so we can safely bail if that happens
if let Some(mut sigchild_guard) = self.sigchild.try_lock() {
match &mut *sigchild_guard {
Some(sigchild) => {
if sigchild.try_has_changed().and_then(Result::ok).is_some() {
drain_orphan_queue(self.queue.lock());
}
}
None => {
let queue = self.queue.lock();
// Be lazy and only initialize the SIGCHLD listener if there
// are any orphaned processes in the queue.
if !queue.is_empty() {
// An errors shouldn't really happen here, but if it does it
// means that the signal driver isn't running, in
// which case there isn't anything we can
// register/initialize here, so we can try again later
if let Ok(sigchild) = signal_with_handle(SignalKind::child(), handle) {
*sigchild_guard = Some(sigchild);
drain_orphan_queue(queue);
}
}
}
}
}
}
}
fn drain_orphan_queue<T>(mut queue: MutexGuard<'_, Vec<T>>)
where
T: Wait,
{
for i in (0..queue.len()).rev() {
match queue[i].try_wait() {
Ok(None) => {}
Ok(Some(_)) | Err(_) => {
// The stdlib handles interruption errors (EINTR) when polling a child process.
// All other errors represent invalid inputs or pids that have already been
// reaped, so we can drop the orphan in case an error is raised.
queue.swap_remove(i);
}
}
}
drop(queue);
}
#[cfg(all(test, not(loom)))]
pub(crate) mod test {
use super::*;
use crate::runtime::io::Driver as IoDriver;
use crate::runtime::signal::{Driver as SignalDriver, Handle as SignalHandle};
use crate::sync::watch;
use std::cell::{Cell, RefCell};
use std::io;
use std::os::unix::process::ExitStatusExt;
use std::process::ExitStatus;
use std::rc::Rc;
pub(crate) struct MockQueue<W> {
pub(crate) all_enqueued: RefCell<Vec<W>>,
}
impl<W> MockQueue<W> {
pub(crate) fn new() -> Self {
Self {
all_enqueued: RefCell::new(Vec::new()),
}
}
}
impl<W> OrphanQueue<W> for MockQueue<W> {
fn push_orphan(&self, orphan: W) {
self.all_enqueued.borrow_mut().push(orphan);
}
}
struct MockWait {
total_waits: Rc<Cell<usize>>,
num_wait_until_status: usize,
return_err: bool,
}
impl MockWait {
fn new(num_wait_until_status: usize) -> Self {
Self {
total_waits: Rc::new(Cell::new(0)),
num_wait_until_status,
return_err: false,
}
}
fn with_err() -> Self {
Self {
total_waits: Rc::new(Cell::new(0)),
num_wait_until_status: 0,
return_err: true,
}
}
}
impl Wait for MockWait {
fn id(&self) -> u32 {
42
}
fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
let waits = self.total_waits.get();
let ret = if self.num_wait_until_status == waits {
if self.return_err {
Ok(Some(ExitStatus::from_raw(0)))
} else {
Err(io::Error::new(io::ErrorKind::Other, "mock err"))
}
} else {
Ok(None)
};
self.total_waits.set(waits + 1);
ret
}
}
#[test]
fn drain_attempts_a_single_reap_of_all_queued_orphans() {
let first_orphan = MockWait::new(0);
let second_orphan = MockWait::new(1);
let third_orphan = MockWait::new(2);
let fourth_orphan = MockWait::with_err();
let first_waits = first_orphan.total_waits.clone();
let second_waits = second_orphan.total_waits.clone();
let third_waits = third_orphan.total_waits.clone();
let fourth_waits = fourth_orphan.total_waits.clone();
let orphanage = OrphanQueueImpl::new();
orphanage.push_orphan(first_orphan);
orphanage.push_orphan(third_orphan);
orphanage.push_orphan(second_orphan);
orphanage.push_orphan(fourth_orphan);
assert_eq!(orphanage.len(), 4);
drain_orphan_queue(orphanage.queue.lock());
assert_eq!(orphanage.len(), 2);
assert_eq!(first_waits.get(), 1);
assert_eq!(second_waits.get(), 1);
assert_eq!(third_waits.get(), 1);
assert_eq!(fourth_waits.get(), 1);
drain_orphan_queue(orphanage.queue.lock());
assert_eq!(orphanage.len(), 1);
assert_eq!(first_waits.get(), 1);
assert_eq!(second_waits.get(), 2);
assert_eq!(third_waits.get(), 2);
assert_eq!(fourth_waits.get(), 1);
drain_orphan_queue(orphanage.queue.lock());
assert_eq!(orphanage.len(), 0);
assert_eq!(first_waits.get(), 1);
assert_eq!(second_waits.get(), 2);
assert_eq!(third_waits.get(), 3);
assert_eq!(fourth_waits.get(), 1);
// Safe to reap when empty
drain_orphan_queue(orphanage.queue.lock());
}
#[test]
fn no_reap_if_no_signal_received() {
let (tx, rx) = watch::channel(());
let handle = SignalHandle::default();
let orphanage = OrphanQueueImpl::new();
*orphanage.sigchild.lock() = Some(rx);
let orphan = MockWait::new(2);
let waits = orphan.total_waits.clone();
orphanage.push_orphan(orphan);
orphanage.reap_orphans(&handle);
assert_eq!(waits.get(), 0);
orphanage.reap_orphans(&handle);
assert_eq!(waits.get(), 0);
tx.send(()).unwrap();
orphanage.reap_orphans(&handle);
assert_eq!(waits.get(), 1);
}
#[test]
fn no_reap_if_signal_lock_held() {
let handle = SignalHandle::default();
let orphanage = OrphanQueueImpl::new();
let signal_guard = orphanage.sigchild.lock();
let orphan = MockWait::new(2);
let waits = orphan.total_waits.clone();
orphanage.push_orphan(orphan);
orphanage.reap_orphans(&handle);
assert_eq!(waits.get(), 0);
drop(signal_guard);
}
#[cfg_attr(miri, ignore)] // No `sigaction` on Miri
#[test]
fn does_not_register_signal_if_queue_empty() {
let (io_driver, io_handle) = IoDriver::new(1024).unwrap();
let signal_driver = SignalDriver::new(io_driver, &io_handle).unwrap();
let handle = signal_driver.handle();
let orphanage = OrphanQueueImpl::new();
assert!(orphanage.sigchild.lock().is_none()); // Sanity
// No register when queue empty
orphanage.reap_orphans(&handle);
assert!(orphanage.sigchild.lock().is_none());
let orphan = MockWait::new(2);
let waits = orphan.total_waits.clone();
orphanage.push_orphan(orphan);
orphanage.reap_orphans(&handle);
assert!(orphanage.sigchild.lock().is_some());
assert_eq!(waits.get(), 1); // Eager reap when registering listener
}
#[test]
fn does_nothing_if_signal_could_not_be_registered() {
let handle = SignalHandle::default();
let orphanage = OrphanQueueImpl::new();
assert!(orphanage.sigchild.lock().is_none());
let orphan = MockWait::new(2);
let waits = orphan.total_waits.clone();
orphanage.push_orphan(orphan);
// Signal handler has "gone away", nothing to register or reap
orphanage.reap_orphans(&handle);
assert!(orphanage.sigchild.lock().is_none());
assert_eq!(waits.get(), 0);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/process/unix/mod.rs | tokio/src/process/unix/mod.rs | //! Unix handling of child processes.
//!
//! Right now the only "fancy" thing about this is how we implement the
//! `Future` implementation on `Child` to get the exit status. Unix offers
//! no way to register a child with epoll, and the only real way to get a
//! notification when a process exits is the SIGCHLD signal.
//!
//! Signal handling in general is *super* hairy and complicated, and it's even
//! more complicated here with the fact that signals are coalesced, so we may
//! not get a SIGCHLD-per-child.
//!
//! Our best approximation here is to check *all spawned processes* for all
//! SIGCHLD signals received. To do that we create a `Signal`, implemented in
//! the `tokio-net` crate, which is a stream over signals being received.
//!
//! Later when we poll the process's exit status we simply check to see if a
//! SIGCHLD has happened since we last checked, and while that returns "yes" we
//! keep trying.
//!
//! Note that this means that this isn't really scalable, but then again
//! processes in general aren't scalable (e.g. millions) so it shouldn't be that
//! bad in theory...
pub(crate) mod orphan;
use orphan::{OrphanQueue, OrphanQueueImpl, Wait};
mod reap;
use reap::Reaper;
#[cfg(all(target_os = "linux", feature = "rt"))]
mod pidfd_reaper;
use crate::io::{AsyncRead, AsyncWrite, PollEvented, ReadBuf};
use crate::process::kill::Kill;
use crate::process::SpawnedChild;
use crate::runtime::signal::Handle as SignalHandle;
use crate::signal::unix::{signal, Signal, SignalKind};
use mio::event::Source;
use mio::unix::SourceFd;
use std::fmt;
use std::fs::File;
use std::future::Future;
use std::io;
use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
use std::pin::Pin;
use std::process::{Child as StdChild, ExitStatus, Stdio};
use std::task::Context;
use std::task::Poll;
impl Wait for StdChild {
fn id(&self) -> u32 {
self.id()
}
fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
self.try_wait()
}
}
impl Kill for StdChild {
fn kill(&mut self) -> io::Result<()> {
self.kill()
}
}
cfg_not_has_const_mutex_new! {
fn get_orphan_queue() -> &'static OrphanQueueImpl<StdChild> {
use std::sync::OnceLock;
static ORPHAN_QUEUE: OnceLock<OrphanQueueImpl<StdChild>> = OnceLock::new();
ORPHAN_QUEUE.get_or_init(OrphanQueueImpl::new)
}
}
cfg_has_const_mutex_new! {
fn get_orphan_queue() -> &'static OrphanQueueImpl<StdChild> {
static ORPHAN_QUEUE: OrphanQueueImpl<StdChild> = OrphanQueueImpl::new();
&ORPHAN_QUEUE
}
}
pub(crate) struct GlobalOrphanQueue;
impl fmt::Debug for GlobalOrphanQueue {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
get_orphan_queue().fmt(fmt)
}
}
impl GlobalOrphanQueue {
pub(crate) fn reap_orphans(handle: &SignalHandle) {
get_orphan_queue().reap_orphans(handle);
}
}
impl OrphanQueue<StdChild> for GlobalOrphanQueue {
fn push_orphan(&self, orphan: StdChild) {
get_orphan_queue().push_orphan(orphan);
}
}
#[must_use = "futures do nothing unless polled"]
pub(crate) enum Child {
SignalReaper(Reaper<StdChild, GlobalOrphanQueue, Signal>),
#[cfg(all(target_os = "linux", feature = "rt"))]
PidfdReaper(pidfd_reaper::PidfdReaper<StdChild, GlobalOrphanQueue>),
}
impl fmt::Debug for Child {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Child").field("pid", &self.id()).finish()
}
}
pub(crate) fn build_child(mut child: StdChild) -> io::Result<SpawnedChild> {
let stdin = child.stdin.take().map(stdio).transpose()?;
let stdout = child.stdout.take().map(stdio).transpose()?;
let stderr = child.stderr.take().map(stdio).transpose()?;
#[cfg(all(target_os = "linux", feature = "rt"))]
match pidfd_reaper::PidfdReaper::new(child, GlobalOrphanQueue) {
Ok(pidfd_reaper) => {
return Ok(SpawnedChild {
child: Child::PidfdReaper(pidfd_reaper),
stdin,
stdout,
stderr,
})
}
Err((Some(err), _child)) => return Err(err),
Err((None, child_returned)) => child = child_returned,
}
let signal = signal(SignalKind::child())?;
Ok(SpawnedChild {
child: Child::SignalReaper(Reaper::new(child, GlobalOrphanQueue, signal)),
stdin,
stdout,
stderr,
})
}
impl Child {
pub(crate) fn id(&self) -> u32 {
match self {
Self::SignalReaper(signal_reaper) => signal_reaper.id(),
#[cfg(all(target_os = "linux", feature = "rt"))]
Self::PidfdReaper(pidfd_reaper) => pidfd_reaper.id(),
}
}
fn std_child(&mut self) -> &mut StdChild {
match self {
Self::SignalReaper(signal_reaper) => signal_reaper.inner_mut(),
#[cfg(all(target_os = "linux", feature = "rt"))]
Self::PidfdReaper(pidfd_reaper) => pidfd_reaper.inner_mut(),
}
}
pub(crate) fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
self.std_child().try_wait()
}
}
impl Kill for Child {
fn kill(&mut self) -> io::Result<()> {
self.std_child().kill()
}
}
impl Future for Child {
type Output = io::Result<ExitStatus>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::into_inner(self) {
Self::SignalReaper(signal_reaper) => Pin::new(signal_reaper).poll(cx),
#[cfg(all(target_os = "linux", feature = "rt"))]
Self::PidfdReaper(pidfd_reaper) => Pin::new(pidfd_reaper).poll(cx),
}
}
}
#[derive(Debug)]
pub(crate) struct Pipe {
// Actually a pipe is not a File. However, we are reusing `File` to get
// close on drop. This is a similar trick as `mio`.
fd: File,
}
impl<T: IntoRawFd> From<T> for Pipe {
fn from(fd: T) -> Self {
let fd = unsafe { File::from_raw_fd(fd.into_raw_fd()) };
Self { fd }
}
}
impl io::Read for &Pipe {
fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
(&self.fd).read(bytes)
}
}
impl io::Write for &Pipe {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
(&self.fd).write(bytes)
}
fn flush(&mut self) -> io::Result<()> {
(&self.fd).flush()
}
fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
(&self.fd).write_vectored(bufs)
}
}
impl AsRawFd for Pipe {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl AsFd for Pipe {
fn as_fd(&self) -> BorrowedFd<'_> {
unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
}
}
fn convert_to_blocking_file(io: ChildStdio) -> io::Result<File> {
let mut fd = io.inner.into_inner()?.fd;
// Ensure that the fd to be inherited is set to *blocking* mode, as this
// is the default that virtually all programs expect to have. Those
// programs that know how to work with nonblocking stdio will know how to
// change it to nonblocking mode.
set_nonblocking(&mut fd, false)?;
Ok(fd)
}
pub(crate) fn convert_to_stdio(io: ChildStdio) -> io::Result<Stdio> {
convert_to_blocking_file(io).map(Stdio::from)
}
impl Source for Pipe {
fn register(
&mut self,
registry: &mio::Registry,
token: mio::Token,
interest: mio::Interest,
) -> io::Result<()> {
SourceFd(&self.as_raw_fd()).register(registry, token, interest)
}
fn reregister(
&mut self,
registry: &mio::Registry,
token: mio::Token,
interest: mio::Interest,
) -> io::Result<()> {
SourceFd(&self.as_raw_fd()).reregister(registry, token, interest)
}
fn deregister(&mut self, registry: &mio::Registry) -> io::Result<()> {
SourceFd(&self.as_raw_fd()).deregister(registry)
}
}
pub(crate) struct ChildStdio {
inner: PollEvented<Pipe>,
}
impl ChildStdio {
pub(super) fn into_owned_fd(self) -> io::Result<OwnedFd> {
convert_to_blocking_file(self).map(OwnedFd::from)
}
}
impl fmt::Debug for ChildStdio {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(fmt)
}
}
impl AsRawFd for ChildStdio {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
impl AsFd for ChildStdio {
fn as_fd(&self) -> BorrowedFd<'_> {
unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
}
}
impl AsyncWrite for ChildStdio {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.inner.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
self.inner.poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
true
}
}
impl AsyncRead for ChildStdio {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
// Safety: pipes support reading into uninitialized memory
unsafe { self.inner.poll_read(cx, buf) }
}
}
fn set_nonblocking<T: AsRawFd>(fd: &mut T, nonblocking: bool) -> io::Result<()> {
unsafe {
let fd = fd.as_raw_fd();
let previous = libc::fcntl(fd, libc::F_GETFL);
if previous == -1 {
return Err(io::Error::last_os_error());
}
let new = if nonblocking {
previous | libc::O_NONBLOCK
} else {
previous & !libc::O_NONBLOCK
};
let r = libc::fcntl(fd, libc::F_SETFL, new);
if r == -1 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
pub(super) fn stdio<T>(io: T) -> io::Result<ChildStdio>
where
T: IntoRawFd,
{
// Set the fd to nonblocking before we pass it to the event loop
let mut pipe = Pipe::from(io);
set_nonblocking(&mut pipe, true)?;
PollEvented::new(pipe).map(|inner| ChildStdio { inner })
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/process/unix/pidfd_reaper.rs | tokio/src/process/unix/pidfd_reaper.rs | use crate::{
io::{interest::Interest, PollEvented},
process::{
imp::{orphan::Wait, OrphanQueue},
kill::Kill,
},
util::error::RUNTIME_SHUTTING_DOWN_ERROR,
};
use libc::{syscall, SYS_pidfd_open, ENOSYS, PIDFD_NONBLOCK};
use mio::{event::Source, unix::SourceFd};
use std::{
fs::File,
future::Future,
io,
marker::Unpin,
ops::Deref,
os::unix::io::{AsRawFd, FromRawFd, RawFd},
pin::Pin,
process::ExitStatus,
sync::atomic::{AtomicBool, Ordering::Relaxed},
task::{Context, Poll},
};
#[derive(Debug)]
struct Pidfd {
fd: File,
}
impl Pidfd {
fn open(pid: u32) -> Option<Pidfd> {
// Store false (0) to reduce executable size
static NO_PIDFD_SUPPORT: AtomicBool = AtomicBool::new(false);
if NO_PIDFD_SUPPORT.load(Relaxed) {
return None;
}
// Safety: The following function calls invovkes syscall pidfd_open,
// which takes two parameter: pidfd_open(fd: c_int, flag: c_int)
let fd = unsafe { syscall(SYS_pidfd_open, pid, PIDFD_NONBLOCK) };
if fd == -1 {
let errno = io::Error::last_os_error().raw_os_error().unwrap();
if errno == ENOSYS {
NO_PIDFD_SUPPORT.store(true, Relaxed)
}
None
} else {
// Safety: pidfd_open returns -1 on error or a valid fd with ownership.
Some(Pidfd {
fd: unsafe { File::from_raw_fd(fd as i32) },
})
}
}
}
impl AsRawFd for Pidfd {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl Source for Pidfd {
fn register(
&mut self,
registry: &mio::Registry,
token: mio::Token,
interest: mio::Interest,
) -> io::Result<()> {
SourceFd(&self.as_raw_fd()).register(registry, token, interest)
}
fn reregister(
&mut self,
registry: &mio::Registry,
token: mio::Token,
interest: mio::Interest,
) -> io::Result<()> {
SourceFd(&self.as_raw_fd()).reregister(registry, token, interest)
}
fn deregister(&mut self, registry: &mio::Registry) -> io::Result<()> {
SourceFd(&self.as_raw_fd()).deregister(registry)
}
}
#[derive(Debug)]
struct PidfdReaperInner<W>
where
W: Unpin,
{
inner: W,
pidfd: PollEvented<Pidfd>,
}
fn display_eq(d: impl std::fmt::Display, s: &str) -> bool {
use std::fmt::Write;
struct FormatEq<'r> {
remainder: &'r str,
unequal: bool,
}
impl<'r> Write for FormatEq<'r> {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
if !self.unequal {
if let Some(new_remainder) = self.remainder.strip_prefix(s) {
self.remainder = new_remainder;
} else {
self.unequal = true;
}
}
Ok(())
}
}
let mut fmt_eq = FormatEq {
remainder: s,
unequal: false,
};
let _ = write!(fmt_eq, "{d}");
fmt_eq.remainder.is_empty() && !fmt_eq.unequal
}
fn is_rt_shutdown_err(err: &io::Error) -> bool {
if let Some(inner) = err.get_ref() {
err.kind() == io::ErrorKind::Other
&& inner.source().is_none()
&& display_eq(inner, RUNTIME_SHUTTING_DOWN_ERROR)
} else {
false
}
}
impl<W> Future for PidfdReaperInner<W>
where
W: Wait + Unpin,
{
type Output = io::Result<ExitStatus>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = Pin::into_inner(self);
match this.pidfd.registration().poll_read_ready(cx) {
Poll::Ready(Ok(evt)) => {
if let Some(exit_code) = this.inner.try_wait()? {
return Poll::Ready(Ok(exit_code));
}
this.pidfd.registration().clear_readiness(evt);
}
Poll::Ready(Err(err)) if is_rt_shutdown_err(&err) => {}
Poll::Ready(Err(err)) => return Poll::Ready(Err(err)),
Poll::Pending => return Poll::Pending,
};
this.pidfd.reregister(Interest::READABLE)?;
cx.waker().wake_by_ref();
Poll::Pending
}
}
#[derive(Debug)]
pub(crate) struct PidfdReaper<W, Q>
where
W: Wait + Unpin,
Q: OrphanQueue<W> + Unpin,
{
inner: Option<PidfdReaperInner<W>>,
orphan_queue: Q,
}
impl<W, Q> Deref for PidfdReaper<W, Q>
where
W: Wait + Unpin,
Q: OrphanQueue<W> + Unpin,
{
type Target = W;
fn deref(&self) -> &Self::Target {
&self.inner.as_ref().expect("inner has gone away").inner
}
}
impl<W, Q> PidfdReaper<W, Q>
where
W: Wait + Unpin,
Q: OrphanQueue<W> + Unpin,
{
pub(crate) fn new(inner: W, orphan_queue: Q) -> Result<Self, (Option<io::Error>, W)> {
if let Some(pidfd) = Pidfd::open(inner.id()) {
match PollEvented::new_with_interest(pidfd, Interest::READABLE) {
Ok(pidfd) => Ok(Self {
inner: Some(PidfdReaperInner { pidfd, inner }),
orphan_queue,
}),
Err(io_error) => Err((Some(io_error), inner)),
}
} else {
Err((None, inner))
}
}
pub(crate) fn inner_mut(&mut self) -> &mut W {
&mut self.inner.as_mut().expect("inner has gone away").inner
}
}
impl<W, Q> Future for PidfdReaper<W, Q>
where
W: Wait + Unpin,
Q: OrphanQueue<W> + Unpin,
{
type Output = io::Result<ExitStatus>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(
Pin::into_inner(self)
.inner
.as_mut()
.expect("inner has gone away"),
)
.poll(cx)
}
}
impl<W, Q> Kill for PidfdReaper<W, Q>
where
W: Wait + Unpin + Kill,
Q: OrphanQueue<W> + Unpin,
{
fn kill(&mut self) -> io::Result<()> {
self.inner_mut().kill()
}
}
impl<W, Q> Drop for PidfdReaper<W, Q>
where
W: Wait + Unpin,
Q: OrphanQueue<W> + Unpin,
{
fn drop(&mut self) {
let mut orphan = self.inner.take().expect("inner has gone away").inner;
if let Ok(Some(_)) = orphan.try_wait() {
return;
}
self.orphan_queue.push_orphan(orphan);
}
}
#[cfg(all(test, not(loom), not(miri)))]
mod test {
use super::*;
use crate::{
process::unix::orphan::test::MockQueue,
runtime::{Builder as RuntimeBuilder, Runtime},
};
use std::process::{Command, Output};
fn create_runtime() -> Runtime {
RuntimeBuilder::new_current_thread()
.enable_io()
.build()
.unwrap()
}
fn run_test(fut: impl Future<Output = ()>) {
create_runtime().block_on(fut)
}
fn is_pidfd_available() -> bool {
let Output { stdout, status, .. } = Command::new("uname").arg("-r").output().unwrap();
assert!(status.success());
let stdout = String::from_utf8_lossy(&stdout);
let mut kernel_version_iter = match stdout.split_once('-') {
Some((version, _)) => version,
_ => &stdout,
}
.split('.');
let major: u32 = kernel_version_iter.next().unwrap().parse().unwrap();
let minor: u32 = kernel_version_iter.next().unwrap().trim().parse().unwrap();
major >= 6 || (major == 5 && minor >= 10)
}
#[test]
fn test_pidfd_reaper_poll() {
if !is_pidfd_available() {
eprintln!("pidfd is not available on this linux kernel, skip this test");
return;
}
let queue = MockQueue::new();
run_test(async {
let child = Command::new("true").spawn().unwrap();
let pidfd_reaper = PidfdReaper::new(child, &queue).unwrap();
let exit_status = pidfd_reaper.await.unwrap();
assert!(exit_status.success());
});
assert!(queue.all_enqueued.borrow().is_empty());
}
#[test]
fn test_pidfd_reaper_kill() {
if !is_pidfd_available() {
eprintln!("pidfd is not available on this linux kernel, skip this test");
return;
}
let queue = MockQueue::new();
run_test(async {
let child = Command::new("sleep").arg("1800").spawn().unwrap();
let mut pidfd_reaper = PidfdReaper::new(child, &queue).unwrap();
pidfd_reaper.kill().unwrap();
let exit_status = pidfd_reaper.await.unwrap();
assert!(!exit_status.success());
});
assert!(queue.all_enqueued.borrow().is_empty());
}
#[test]
fn test_pidfd_reaper_drop() {
if !is_pidfd_available() {
eprintln!("pidfd is not available on this linux kernel, skip this test");
return;
}
let queue = MockQueue::new();
let mut child = Command::new("sleep").arg("1800").spawn().unwrap();
run_test(async {
let _pidfd_reaper = PidfdReaper::new(&mut child, &queue).unwrap();
});
assert_eq!(queue.all_enqueued.borrow().len(), 1);
child.kill().unwrap();
child.wait().unwrap();
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/loom.rs | tokio/src/macros/loom.rs | macro_rules! if_loom {
($($t:tt)*) => {{
#[cfg(loom)]
{
$($t)*
}
}}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/support.rs | tokio/src/macros/support.rs | cfg_macros! {
pub use crate::future::maybe_done::maybe_done;
pub use std::future::poll_fn;
pub use crate::macros::join::{BiasedRotator, Rotator, RotatorSelect, SelectNormal, SelectBiased};
#[doc(hidden)]
pub fn thread_rng_n(n: u32) -> u32 {
crate::runtime::context::thread_rng_n(n)
}
cfg_coop! {
#[doc(hidden)]
#[inline]
pub fn poll_budget_available(cx: &mut Context<'_>) -> Poll<()> {
crate::task::coop::poll_budget_available(cx)
}
}
cfg_not_coop! {
#[doc(hidden)]
#[inline]
pub fn poll_budget_available(_: &mut Context<'_>) -> Poll<()> {
Poll::Ready(())
}
}
}
pub use std::future::{Future, IntoFuture};
pub use std::pin::Pin;
pub use std::task::{Context, Poll};
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/select.rs | tokio/src/macros/select.rs | macro_rules! doc {
($select:item) => {
/// Waits on multiple concurrent branches, returning when the **first** branch
/// completes, cancelling the remaining branches.
///
/// The `select!` macro must be used inside of async functions, closures, and
/// blocks.
///
/// The `select!` macro accepts one or more branches with the following pattern:
///
/// ```text
/// <pattern> = <async expression> (, if <precondition>)? => <handler>,
/// ```
///
/// Additionally, the `select!` macro may include a single, optional `else`
/// branch, which evaluates if none of the other branches match their patterns:
///
/// ```text
/// else => <expression>
/// ```
///
/// The macro aggregates all `<async expression>` expressions and runs them
/// concurrently on the **current** task. Once the **first** expression
/// completes with a value that matches its `<pattern>`, the `select!` macro
/// returns the result of evaluating the completed branch's `<handler>`
/// expression.
///
/// Additionally, each branch may include an optional `if` precondition. If the
/// precondition returns `false`, then the branch is disabled. The provided
/// `<async expression>` is still evaluated but the resulting future is never
/// polled. This capability is useful when using `select!` within a loop.
///
/// The complete lifecycle of a `select!` expression is as follows:
///
/// 1. Evaluate all provided `<precondition>` expressions. If the precondition
/// returns `false`, disable the branch for the remainder of the current call
/// to `select!`. Re-entering `select!` due to a loop clears the "disabled"
/// state.
/// 2. Aggregate the `<async expression>`s from each branch, including the
/// disabled ones. If the branch is disabled, `<async expression>` is still
/// evaluated, but the resulting future is not polled.
/// 3. If **all** branches are disabled: go to step 6.
/// 4. Concurrently await on the results for all remaining `<async expression>`s.
/// 5. Once an `<async expression>` returns a value, attempt to apply the value to the
/// provided `<pattern>`. If the pattern matches, evaluate the `<handler>` and return.
/// If the pattern **does not** match, disable the current branch for the remainder of
/// the current call to `select!`. Continue from step 3.
/// 6. Evaluate the `else` expression. If no else expression is provided, panic.
///
/// # Runtime characteristics
///
/// By running all async expressions on the current task, the expressions are
/// able to run **concurrently** but not in **parallel**. This means all
/// expressions are run on the same thread and if one branch blocks the thread,
/// all other expressions will be unable to continue. If parallelism is
/// required, spawn each async expression using [`tokio::spawn`] and pass the
/// join handle to `select!`.
///
/// [`tokio::spawn`]: crate::spawn
///
/// # Fairness
///
/// By default, `select!` randomly picks a branch to check first. This provides
/// some level of fairness when calling `select!` in a loop with branches that
/// are always ready.
///
/// This behavior can be overridden by adding `biased;` to the beginning of the
/// macro usage. See the examples for details. This will cause `select` to poll
/// the futures in the order they appear from top to bottom. There are a few
/// reasons you may want this:
///
/// - The random number generation of `tokio::select!` has a non-zero CPU cost
/// - Your futures may interact in a way where known polling order is significant
///
/// But there is an important caveat to this mode. It becomes your responsibility
/// to ensure that the polling order of your futures is fair. If for example you
/// are selecting between a stream and a shutdown future, and the stream has a
/// huge volume of messages and zero or nearly zero time between them, you should
/// place the shutdown future earlier in the `select!` list to ensure that it is
/// always polled, and will not be ignored due to the stream being constantly
/// ready.
///
/// # Panics
///
/// The `select!` macro panics if all branches are disabled **and** there is no
/// provided `else` branch. A branch is disabled when the provided `if`
/// precondition returns `false` **or** when the pattern does not match the
/// result of `<async expression>`.
///
/// # Cancellation safety
///
/// When using `select!` in a loop to receive messages from multiple sources,
/// you should make sure that the receive call is cancellation safe to avoid
/// losing messages. This section goes through various common methods and
/// describes whether they are cancel safe. The lists in this section are not
/// exhaustive.
///
/// The following methods are cancellation safe:
///
/// * [`tokio::sync::mpsc::Receiver::recv`](crate::sync::mpsc::Receiver::recv)
/// * [`tokio::sync::mpsc::UnboundedReceiver::recv`](crate::sync::mpsc::UnboundedReceiver::recv)
/// * [`tokio::sync::broadcast::Receiver::recv`](crate::sync::broadcast::Receiver::recv)
/// * [`tokio::sync::watch::Receiver::changed`](crate::sync::watch::Receiver::changed)
/// * [`tokio::net::TcpListener::accept`](crate::net::TcpListener::accept)
/// * [`tokio::net::UnixListener::accept`](crate::net::UnixListener::accept)
/// * [`tokio::signal::unix::Signal::recv`](crate::signal::unix::Signal::recv)
/// * [`tokio::io::AsyncReadExt::read`](crate::io::AsyncReadExt::read) on any `AsyncRead`
/// * [`tokio::io::AsyncReadExt::read_buf`](crate::io::AsyncReadExt::read_buf) on any `AsyncRead`
/// * [`tokio::io::AsyncWriteExt::write`](crate::io::AsyncWriteExt::write) on any `AsyncWrite`
/// * [`tokio::io::AsyncWriteExt::write_buf`](crate::io::AsyncWriteExt::write_buf) on any `AsyncWrite`
/// * [`tokio_stream::StreamExt::next`](https://docs.rs/tokio-stream/0.1/tokio_stream/trait.StreamExt.html#method.next) on any `Stream`
/// * [`futures::stream::StreamExt::next`](https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.next) on any `Stream`
///
/// The following methods are not cancellation safe and can lead to loss of data:
///
/// * [`tokio::io::AsyncReadExt::read_exact`](crate::io::AsyncReadExt::read_exact)
/// * [`tokio::io::AsyncReadExt::read_to_end`](crate::io::AsyncReadExt::read_to_end)
/// * [`tokio::io::AsyncReadExt::read_to_string`](crate::io::AsyncReadExt::read_to_string)
/// * [`tokio::io::AsyncWriteExt::write_all`](crate::io::AsyncWriteExt::write_all)
///
/// The following methods are not cancellation safe because they use a queue for
/// fairness and cancellation makes you lose your place in the queue:
///
/// * [`tokio::sync::Mutex::lock`](crate::sync::Mutex::lock)
/// * [`tokio::sync::RwLock::read`](crate::sync::RwLock::read)
/// * [`tokio::sync::RwLock::write`](crate::sync::RwLock::write)
/// * [`tokio::sync::Semaphore::acquire`](crate::sync::Semaphore::acquire)
/// * [`tokio::sync::Notify::notified`](crate::sync::Notify::notified)
///
/// To determine whether your own methods are cancellation safe, look for the
/// location of uses of `.await`. This is because when an asynchronous method is
/// cancelled, that always happens at an `.await`. If your function behaves
/// correctly even if it is restarted while waiting at an `.await`, then it is
/// cancellation safe.
///
/// Cancellation safety can be defined in the following way: If you have a
/// future that has not yet completed, then it must be a no-op to drop that
/// future and recreate it. This definition is motivated by the situation where
/// a `select!` is used in a loop. Without this guarantee, you would lose your
/// progress when another branch completes and you restart the `select!` by
/// going around the loop.
///
/// Be aware that cancelling something that is not cancellation safe is not
/// necessarily wrong. For example, if you are cancelling a task because the
/// application is shutting down, then you probably don't care that partially
/// read data is lost.
///
/// # Examples
///
/// Basic select with two branches.
///
/// ```
/// async fn do_stuff_async() {
/// // async work
/// }
///
/// async fn more_async_work() {
/// // more here
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// tokio::select! {
/// _ = do_stuff_async() => {
/// println!("do_stuff_async() completed first")
/// }
/// _ = more_async_work() => {
/// println!("more_async_work() completed first")
/// }
/// };
/// # }
/// ```
///
/// Basic stream selecting.
///
/// ```
/// use tokio_stream::{self as stream, StreamExt};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut stream1 = stream::iter(vec![1, 2, 3]);
/// let mut stream2 = stream::iter(vec![4, 5, 6]);
///
/// let next = tokio::select! {
/// v = stream1.next() => v.unwrap(),
/// v = stream2.next() => v.unwrap(),
/// };
///
/// assert!(next == 1 || next == 4);
/// # }
/// ```
///
/// Collect the contents of two streams. In this example, we rely on pattern
/// matching and the fact that `stream::iter` is "fused", i.e. once the stream
/// is complete, all calls to `next()` return `None`.
///
/// ```
/// use tokio_stream::{self as stream, StreamExt};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut stream1 = stream::iter(vec![1, 2, 3]);
/// let mut stream2 = stream::iter(vec![4, 5, 6]);
///
/// let mut values = vec![];
///
/// loop {
/// tokio::select! {
/// Some(v) = stream1.next() => values.push(v),
/// Some(v) = stream2.next() => values.push(v),
/// else => break,
/// }
/// }
///
/// values.sort();
/// assert_eq!(&[1, 2, 3, 4, 5, 6], &values[..]);
/// # }
/// ```
///
/// Using the same future in multiple `select!` expressions can be done by passing
/// a reference to the future. Doing so requires the future to be [`Unpin`]. A
/// future can be made [`Unpin`] by either using [`Box::pin`] or stack pinning.
///
/// [`Unpin`]: std::marker::Unpin
/// [`Box::pin`]: std::boxed::Box::pin
///
/// Here, a stream is consumed for at most 1 second.
///
/// ```
/// use tokio_stream::{self as stream, StreamExt};
/// use tokio::time::{self, Duration};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut stream = stream::iter(vec![1, 2, 3]);
/// let sleep = time::sleep(Duration::from_secs(1));
/// tokio::pin!(sleep);
///
/// loop {
/// tokio::select! {
/// maybe_v = stream.next() => {
/// if let Some(v) = maybe_v {
/// println!("got = {}", v);
/// } else {
/// break;
/// }
/// }
/// _ = &mut sleep => {
/// println!("timeout");
/// break;
/// }
/// }
/// }
/// # }
/// ```
///
/// Joining two values using `select!`.
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx1, mut rx1) = oneshot::channel();
/// let (tx2, mut rx2) = oneshot::channel();
///
/// tokio::spawn(async move {
/// tx1.send("first").unwrap();
/// });
///
/// tokio::spawn(async move {
/// tx2.send("second").unwrap();
/// });
///
/// let mut a = None;
/// let mut b = None;
///
/// while a.is_none() || b.is_none() {
/// tokio::select! {
/// v1 = (&mut rx1), if a.is_none() => a = Some(v1.unwrap()),
/// v2 = (&mut rx2), if b.is_none() => b = Some(v2.unwrap()),
/// }
/// }
///
/// let res = (a.unwrap(), b.unwrap());
///
/// assert_eq!(res.0, "first");
/// assert_eq!(res.1, "second");
/// # }
/// ```
///
/// Using the `biased;` mode to control polling order.
///
/// ```
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut count = 0u8;
///
/// loop {
/// tokio::select! {
/// // If you run this example without `biased;`, the polling order is
/// // pseudo-random, and the assertions on the value of count will
/// // (probably) fail.
/// biased;
///
/// _ = async {}, if count < 1 => {
/// count += 1;
/// assert_eq!(count, 1);
/// }
/// _ = async {}, if count < 2 => {
/// count += 1;
/// assert_eq!(count, 2);
/// }
/// _ = async {}, if count < 3 => {
/// count += 1;
/// assert_eq!(count, 3);
/// }
/// _ = async {}, if count < 4 => {
/// count += 1;
/// assert_eq!(count, 4);
/// }
///
/// else => {
/// break;
/// }
/// };
/// }
/// # }
/// ```
///
/// ## Avoid racy `if` preconditions
///
/// Given that `if` preconditions are used to disable `select!` branches, some
/// caution must be used to avoid missing values.
///
/// For example, here is **incorrect** usage of `sleep` with `if`. The objective
/// is to repeatedly run an asynchronous task for up to 50 milliseconds.
/// However, there is a potential for the `sleep` completion to be missed.
///
/// ```no_run,should_panic
/// use tokio::time::{self, Duration};
///
/// async fn some_async_work() {
/// // do work
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let sleep = time::sleep(Duration::from_millis(50));
/// tokio::pin!(sleep);
///
/// while !sleep.is_elapsed() {
/// tokio::select! {
/// _ = &mut sleep, if !sleep.is_elapsed() => {
/// println!("operation timed out");
/// }
/// _ = some_async_work() => {
/// println!("operation completed");
/// }
/// }
/// }
///
/// panic!("This example shows how not to do it!");
/// # }
/// ```
///
/// In the above example, `sleep.is_elapsed()` may return `true` even if
/// `sleep.poll()` never returned `Ready`. This opens up a potential race
/// condition where `sleep` expires between the `while !sleep.is_elapsed()`
/// check and the call to `select!` resulting in the `some_async_work()` call to
/// run uninterrupted despite the sleep having elapsed.
///
/// One way to write the above example without the race would be:
///
/// ```
/// use tokio::time::{self, Duration};
///
/// async fn some_async_work() {
/// # time::sleep(Duration::from_millis(10)).await;
/// // do work
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let sleep = time::sleep(Duration::from_millis(50));
/// tokio::pin!(sleep);
///
/// loop {
/// tokio::select! {
/// _ = &mut sleep => {
/// println!("operation timed out");
/// break;
/// }
/// _ = some_async_work() => {
/// println!("operation completed");
/// }
/// }
/// }
/// # }
/// ```
/// # Alternatives from the Ecosystem
///
/// The `select!` macro is a powerful tool for managing multiple asynchronous
/// branches, enabling tasks to run concurrently within the same thread. However,
/// its use can introduce challenges, particularly around cancellation safety, which
/// can lead to subtle and hard-to-debug errors. For many use cases, ecosystem
/// alternatives may be preferable as they mitigate these concerns by offering
/// clearer syntax, more predictable control flow, and reducing the need to manually
/// handle issues like fuse semantics or cancellation safety.
///
/// ## Merging Streams
///
/// For cases where `loop { select! { ... } }` is used to poll multiple tasks,
/// stream merging offers a concise alternative, inherently handle cancellation-safe
/// processing, removing the risk of data loss. Libraries such as [`tokio_stream`],
/// [`futures::stream`] and [`futures_concurrency`] provide tools for merging
/// streams and handling their outputs sequentially.
///
/// [`tokio_stream`]: https://docs.rs/tokio-stream/latest/tokio_stream/
/// [`futures::stream`]: https://docs.rs/futures/latest/futures/stream/
/// [`futures_concurrency`]: https://docs.rs/futures-concurrency/latest/futures_concurrency/
///
/// ### Example with `select!`
///
/// ```
/// struct File;
/// struct Channel;
/// struct Socket;
///
/// impl Socket {
/// async fn read_packet(&mut self) -> Vec<u8> {
/// vec![]
/// }
/// }
///
/// async fn read_send(_file: &mut File, _channel: &mut Channel) {
/// // do work that is not cancel safe
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// // open our IO types
/// let mut file = File;
/// let mut channel = Channel;
/// let mut socket = Socket;
///
/// loop {
/// tokio::select! {
/// _ = read_send(&mut file, &mut channel) => { /* ... */ },
/// _data = socket.read_packet() => { /* ... */ }
/// _ = futures::future::ready(()) => break
/// }
/// }
/// # }
/// ```
///
/// ### Moving to `merge`
///
/// By using merge, you can unify multiple asynchronous tasks into a single stream,
/// eliminating the need to manage tasks manually and reducing the risk of
/// unintended behavior like data loss.
///
/// ```
/// use std::pin::pin;
///
/// use futures::stream::unfold;
/// use tokio_stream::StreamExt;
///
/// struct File;
/// struct Channel;
/// struct Socket;
///
/// impl Socket {
/// async fn read_packet(&mut self) -> Vec<u8> {
/// vec![]
/// }
/// }
///
/// async fn read_send(_file: &mut File, _channel: &mut Channel) {
/// // do work that is not cancel safe
/// }
///
/// enum Message {
/// Stop,
/// Sent,
/// Data(Vec<u8>),
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// // open our IO types
/// let file = File;
/// let channel = Channel;
/// let socket = Socket;
///
/// let a = unfold((file, channel), |(mut file, mut channel)| async {
/// read_send(&mut file, &mut channel).await;
/// Some((Message::Sent, (file, channel)))
/// });
/// let b = unfold(socket, |mut socket| async {
/// let data = socket.read_packet().await;
/// Some((Message::Data(data), socket))
/// });
/// let c = tokio_stream::iter([Message::Stop]);
///
/// let mut s = pin!(a.merge(b).merge(c));
/// while let Some(msg) = s.next().await {
/// match msg {
/// Message::Data(_data) => { /* ... */ }
/// Message::Sent => continue,
/// Message::Stop => break,
/// }
/// }
/// # }
/// ```
///
/// ## Racing Futures
///
/// If you need to wait for the first completion among several asynchronous tasks,
/// ecosystem utilities such as
/// [`futures`](https://docs.rs/futures/latest/futures/),
/// [`futures-lite`](https://docs.rs/futures-lite/latest/futures_lite/) or
/// [`futures-concurrency`](https://docs.rs/futures-concurrency/latest/futures_concurrency/)
/// provide streamlined syntax for racing futures:
///
/// - [`futures_concurrency::future::Race`](https://docs.rs/futures-concurrency/latest/futures_concurrency/future/trait.Race.html)
/// - [`futures::select`](https://docs.rs/futures/latest/futures/macro.select.html)
/// - [`futures::stream::select_all`](https://docs.rs/futures/latest/futures/stream/select_all/index.html) (for streams)
/// - [`futures_lite::future::or`](https://docs.rs/futures-lite/latest/futures_lite/future/fn.or.html)
/// - [`futures_lite::future::race`](https://docs.rs/futures-lite/latest/futures_lite/future/fn.race.html)
///
/// ```
/// use futures_concurrency::future::Race;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let task_a = async { Ok("ok") };
/// let task_b = async { Err("error") };
/// let result = (task_a, task_b).race().await;
///
/// match result {
/// Ok(output) => println!("First task completed with: {output}"),
/// Err(err) => eprintln!("Error occurred: {err}"),
/// }
/// # }
/// ```
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
$select
};
}
#[cfg(doc)]
doc! {macro_rules! select {
{
$(
biased;
)?
$(
$bind:pat = $fut:expr $(, if $cond:expr)? => $handler:expr,
)*
$(
else => $els:expr $(,)?
)?
} => {
unimplemented!()
};
}}
#[cfg(not(doc))]
doc! {macro_rules! select {
// Uses a declarative macro to do **most** of the work. While it is possible
// to implement fully with a declarative macro, a procedural macro is used
// to enable improved error messages.
//
// The macro is structured as a tt-muncher. All branches are processed and
// normalized. Once the input is normalized, it is passed to the top-most
// rule. When entering the macro, `@{ }` is inserted at the front. This is
// used to collect the normalized input.
//
// The macro only recurses once per branch. This allows using `select!`
// without requiring the user to increase the recursion limit.
// All input is normalized, now transform.
(@ {
// The index of the future to poll first (in bias mode), or the RNG
// expression to use to pick a future to poll first.
start=$start:expr;
// One `_` for each branch in the `select!` macro. Passing this to
// `count!` converts $skip to an integer.
( $($count:tt)* )
// Normalized select branches. `( $skip )` is a set of `_` characters.
// There is one `_` for each select branch **before** this one. Given
// that all input futures are stored in a tuple, $skip is useful for
// generating a pattern to reference the future for the current branch.
// $skip is also used as an argument to `count!`, returning the index of
// the current select branch.
$( ( $($skip:tt)* ) $bind:pat = $fut:expr, if $c:expr => $handle:expr, )+
// Fallback expression used when all select branches have been disabled.
; $else:expr
}) => {{
// Enter a context where stable "function-like" proc macros can be used.
//
// This module is defined within a scope and should not leak out of this
// macro.
#[doc(hidden)]
mod __tokio_select_util {
// Generate an enum with one variant per select branch
$crate::select_priv_declare_output_enum!( ( $($count)* ) );
}
// `tokio::macros::support` is a public, but doc(hidden) module
// including a re-export of all types needed by this macro.
use $crate::macros::support::Future;
use $crate::macros::support::Pin;
use $crate::macros::support::Poll::{Ready, Pending};
const BRANCHES: u32 = $crate::count!( $($count)* );
let mut disabled: __tokio_select_util::Mask = Default::default();
// First, invoke all the pre-conditions. For any that return true,
// set the appropriate bit in `disabled`.
$(
if !$c {
let mask: __tokio_select_util::Mask = 1 << $crate::count!( $($skip)* );
disabled |= mask;
}
)*
// Create a scope to separate polling from handling the output. This
// adds borrow checker flexibility when using the macro.
let mut output = {
// Store each future directly first (that is, without wrapping the future in a call to
// `IntoFuture::into_future`). This allows the `$fut` expression to make use of
// temporary lifetime extension.
//
// https://doc.rust-lang.org/1.58.1/reference/destructors.html#temporary-lifetime-extension
let futures_init = ($( $fut, )+);
// Safety: Nothing must be moved out of `futures`. This is to
// satisfy the requirement of `Pin::new_unchecked` called below.
//
// We can't use the `pin!` macro for this because `futures` is a
// tuple and the standard library provides no way to pin-project to
// the fields of a tuple.
let mut futures = ($( $crate::macros::support::IntoFuture::into_future(
$crate::count_field!( futures_init.$($skip)* )
),)+);
// This assignment makes sure that the `poll_fn` closure only has a
// reference to the futures, instead of taking ownership of them.
// This mitigates the issue described in
// <https://internals.rust-lang.org/t/surprising-soundness-trouble-around-pollfn/17484>
let mut futures = &mut futures;
$crate::macros::support::poll_fn(|cx| {
// Return `Pending` when the task budget is depleted since budget-aware futures
// are going to yield anyway and other futures will not cooperate.
::std::task::ready!($crate::macros::support::poll_budget_available(cx));
// Track if any branch returns pending. If no branch completes
// **or** returns pending, this implies that all branches are
// disabled.
let mut is_pending = false;
// Choose a starting index to begin polling the futures at. In
// practice, this will either be a pseudo-randomly generated
// number by default, or the constant 0 if `biased;` is
// supplied.
let start = $start;
for i in 0..BRANCHES {
let branch;
#[allow(clippy::modulo_one)]
{
branch = (start + i) % BRANCHES;
}
match branch {
$(
#[allow(unreachable_code)]
$crate::count!( $($skip)* ) => {
// First, if the future has previously been
// disabled, do not poll it again. This is done
// by checking the associated bit in the
// `disabled` bit field.
let mask = 1 << branch;
if disabled & mask == mask {
// The future has been disabled.
continue;
}
// Extract the future for this branch from the
// tuple
let ( $($skip,)* fut, .. ) = &mut *futures;
// Safety: future is stored on the stack above
// and never moved.
let mut fut = unsafe { Pin::new_unchecked(fut) };
// Try polling it
let out = match Future::poll(fut, cx) {
Ready(out) => out,
Pending => {
// Track that at least one future is
// still pending and continue polling.
is_pending = true;
continue;
}
};
// Disable the future from future polling.
disabled |= mask;
// The future returned a value, check if matches
// the specified pattern.
#[allow(unused_variables)]
#[allow(unused_mut)]
match &out {
$crate::select_priv_clean_pattern!($bind) => {}
_ => continue,
}
// The select is complete, return the value
return Ready($crate::select_variant!(__tokio_select_util::Out, ($($skip)*))(out));
}
)*
_ => unreachable!("reaching this means there probably is an off by one bug"),
}
}
if is_pending {
Pending
} else {
// All branches have been disabled.
Ready(__tokio_select_util::Out::Disabled)
}
}).await
};
match output {
$(
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/try_join.rs | tokio/src/macros/try_join.rs | macro_rules! doc {
($try_join:item) => {
/// Waits on multiple concurrent branches, returning when **all** branches
/// complete with `Ok(_)` or on the first `Err(_)`.
///
/// The `try_join!` macro must be used inside of async functions, closures, and
/// blocks.
///
/// Similar to [`join!`], the `try_join!` macro takes a list of async
/// expressions and evaluates them concurrently on the same task. Each async
/// expression evaluates to a future and the futures from each expression are
/// multiplexed on the current task. The `try_join!` macro returns when **all**
/// branches return with `Ok` or when the **first** branch returns with `Err`.
///
/// [`join!`]: macro@join
///
/// # Notes
///
/// The supplied futures are stored inline and do not require allocating a
/// `Vec`.
///
/// ## Runtime characteristics
///
/// By running all async expressions on the current task, the expressions are
/// able to run **concurrently** but not in **parallel**. This means all
/// expressions are run on the same thread and if one branch blocks the thread,
/// all other expressions will be unable to continue. If parallelism is
/// required, spawn each async expression using [`tokio::spawn`] and pass the
/// join handle to `try_join!`.
///
/// [`tokio::spawn`]: crate::spawn
///
/// ## Fairness
///
/// By default, `try_join!`'s generated future rotates which
/// contained future is polled first whenever it is woken.
///
/// This behavior can be overridden by adding `biased;` to the beginning of the
/// macro usage. See the examples for details. This will cause `try_join` to poll
/// the futures in the order they appear from top to bottom.
///
/// You may want this if your futures may interact in a way where known polling order is significant.
///
/// But there is an important caveat to this mode. It becomes your responsibility
/// to ensure that the polling order of your futures is fair. If for example you
/// are joining a stream and a shutdown future, and the stream has a
/// huge volume of messages that takes a long time to finish processing per poll, you should
/// place the shutdown future earlier in the `try_join!` list to ensure that it is
/// always polled, and will not be delayed due to the stream future taking a long time to return
/// `Poll::Pending`.
///
/// # Examples
///
/// Basic `try_join` with two branches.
///
/// ```
/// async fn do_stuff_async() -> Result<(), &'static str> {
/// // async work
/// # Ok(())
/// }
///
/// async fn more_async_work() -> Result<(), &'static str> {
/// // more here
/// # Ok(())
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let res = tokio::try_join!(
/// do_stuff_async(),
/// more_async_work());
///
/// match res {
/// Ok((first, second)) => {
/// // do something with the values
/// }
/// Err(err) => {
/// println!("processing failed; error = {}", err);
/// }
/// }
/// # }
/// ```
///
/// Using `try_join!` with spawned tasks.
///
/// ```
/// use tokio::task::JoinHandle;
///
/// async fn do_stuff_async() -> Result<(), &'static str> {
/// // async work
/// # Err("failed")
/// }
///
/// async fn more_async_work() -> Result<(), &'static str> {
/// // more here
/// # Ok(())
/// }
///
/// async fn flatten<T>(handle: JoinHandle<Result<T, &'static str>>) -> Result<T, &'static str> {
/// match handle.await {
/// Ok(Ok(result)) => Ok(result),
/// Ok(Err(err)) => Err(err),
/// Err(err) => Err("handling failed"),
/// }
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let handle1 = tokio::spawn(do_stuff_async());
/// let handle2 = tokio::spawn(more_async_work());
/// match tokio::try_join!(flatten(handle1), flatten(handle2)) {
/// Ok(val) => {
/// // do something with the values
/// }
/// Err(err) => {
/// println!("Failed with {}.", err);
/// # assert_eq!(err, "failed");
/// }
/// }
/// # }
/// ```
/// Using the `biased;` mode to control polling order.
///
/// ```
/// async fn do_stuff_async() -> Result<(), &'static str> {
/// // async work
/// # Ok(())
/// }
///
/// async fn more_async_work() -> Result<(), &'static str> {
/// // more here
/// # Ok(())
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let res = tokio::try_join!(
/// biased;
/// do_stuff_async(),
/// more_async_work()
/// );
///
/// match res {
/// Ok((first, second)) => {
/// // do something with the values
/// }
/// Err(err) => {
/// println!("processing failed; error = {}", err);
/// }
/// }
/// # }
/// ```
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
$try_join
};
}
#[cfg(doc)]
doc! {macro_rules! try_join {
($(biased;)? $($future:expr),*) => { unimplemented!() }
}}
#[cfg(not(doc))]
doc! {macro_rules! try_join {
(@ {
// Type of rotator that controls which inner future to start with
// when polling our output future.
rotator_select=$rotator_select:ty;
// One `_` for each branch in the `try_join!` macro. This is not used once
// normalization is complete.
( $($count:tt)* )
// The expression `0+1+1+ ... +1` equal to the number of branches.
( $($total:tt)* )
// Normalized try_join! branches
$( ( $($skip:tt)* ) $e:expr, )*
}) => {{
// Safety: nothing must be moved out of `futures`. This is to satisfy
// the requirement of `Pin::new_unchecked` called below.
//
// We can't use the `pin!` macro for this because `futures` is a tuple
// and the standard library provides no way to pin-project to the fields
// of a tuple.
let mut futures = ( $( $crate::macros::support::maybe_done($e), )* );
// This assignment makes sure that the `poll_fn` closure only has a
// reference to the futures, instead of taking ownership of them. This
// mitigates the issue described in
// <https://internals.rust-lang.org/t/surprising-soundness-trouble-around-pollfn/17484>
let mut futures = &mut futures;
// Each time the future created by poll_fn is polled, if not using biased mode,
// a different future is polled first to ensure every future passed to try_join!
// can make progress even if one of the futures consumes the whole budget.
let mut rotator = <$rotator_select as $crate::macros::support::RotatorSelect>::Rotator::<{$($total)*}>::default();
$crate::macros::support::poll_fn(move |cx| {
const COUNT: u32 = $($total)*;
let mut is_pending = false;
let mut to_run = COUNT;
// The number of futures that will be skipped in the first loop iteration.
let mut skip = rotator.num_skip();
// This loop runs twice and the first `skip` futures
// are not polled in the first iteration.
loop {
$(
if skip == 0 {
if to_run == 0 {
// Every future has been polled
break;
}
to_run -= 1;
// Extract the future for this branch from the tuple.
let ( $($skip,)* fut, .. ) = &mut *futures;
// Safety: future is stored on the stack above
// and never moved.
let mut fut = unsafe { $crate::macros::support::Pin::new_unchecked(fut) };
// Try polling
if $crate::macros::support::Future::poll(fut.as_mut(), cx).is_pending() {
is_pending = true;
} else if fut.as_mut().output_mut().expect("expected completed future").is_err() {
return $crate::macros::support::Poll::Ready(Err(fut.take_output().expect("expected completed future").err().unwrap()))
}
} else {
// Future skipped, one less future to skip in the next iteration
skip -= 1;
}
)*
}
if is_pending {
$crate::macros::support::Poll::Pending
} else {
$crate::macros::support::Poll::Ready(Ok(($({
// Extract the future for this branch from the tuple.
let ( $($skip,)* fut, .. ) = &mut futures;
// Safety: future is stored on the stack above
// and never moved.
let mut fut = unsafe { $crate::macros::support::Pin::new_unchecked(fut) };
fut
.take_output()
.expect("expected completed future")
.ok()
.expect("expected Ok(_)")
},)*)))
}
}).await
}};
// ===== Normalize =====
(@ { rotator_select=$rotator_select:ty; ( $($s:tt)* ) ( $($n:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
$crate::try_join!(@{ rotator_select=$rotator_select; ($($s)* _) ($($n)* + 1) $($t)* ($($s)*) $e, } $($r)*)
};
// ===== Entry point =====
( biased; $($e:expr),+ $(,)?) => {
$crate::try_join!(@{ rotator_select=$crate::macros::support::SelectBiased; () (0) } $($e,)*)
};
( $($e:expr),+ $(,)?) => {
$crate::try_join!(@{ rotator_select=$crate::macros::support::SelectNormal; () (0) } $($e,)*)
};
(biased;) => { async { Ok(()) }.await };
() => { async { Ok(()) }.await }
}}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/trace.rs | tokio/src/macros/trace.rs | cfg_trace! {
macro_rules! trace_op {
($name:expr, $readiness:literal) => {
tracing::trace!(
target: "runtime::resource::poll_op",
op_name = $name,
is_ready = $readiness
);
}
}
macro_rules! trace_poll_op {
($name:expr, $poll:expr $(,)*) => {
match $poll {
std::task::Poll::Ready(t) => {
trace_op!($name, true);
std::task::Poll::Ready(t)
}
std::task::Poll::Pending => {
trace_op!($name, false);
return std::task::Poll::Pending;
}
}
};
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/addr_of.rs | tokio/src/macros/addr_of.rs | //! This module defines a macro that lets you go from a raw pointer to a struct
//! to a raw pointer to a field of the struct.
macro_rules! generate_addr_of_methods {
(
impl<$($gen:ident)*> $struct_name:ty {$(
$(#[$attrs:meta])*
$vis:vis unsafe fn $fn_name:ident(self: NonNull<Self>) -> NonNull<$field_type:ty> {
&self$(.$field_name:tt)+
}
)*}
) => {
impl<$($gen)*> $struct_name {$(
#[doc = "# Safety"]
#[doc = ""]
#[doc = "The `me` pointer must be valid."]
$(#[$attrs])*
$vis unsafe fn $fn_name(me: ::core::ptr::NonNull<Self>) -> ::core::ptr::NonNull<$field_type> {
let me = me.as_ptr();
// safety: the caller guarantees that `me` is valid
let field = unsafe { ::std::ptr::addr_of_mut!((*me) $(.$field_name)+ ) };
// safety: the field pointer is never null
unsafe { ::core::ptr::NonNull::new_unchecked(field) }
}
)*}
};
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/mod.rs | tokio/src/macros/mod.rs | #![cfg_attr(not(feature = "full"), allow(unused_macros))]
#[macro_use]
mod cfg;
#[macro_use]
mod loom;
#[macro_use]
mod pin;
#[macro_use]
mod thread_local;
#[macro_use]
mod addr_of;
cfg_trace! {
#[macro_use]
mod trace;
}
cfg_macros! {
#[macro_use]
mod select;
#[macro_use]
mod join;
#[macro_use]
mod try_join;
}
// Includes re-exports needed to implement macros
#[doc(hidden)]
pub mod support;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/join.rs | tokio/src/macros/join.rs | macro_rules! doc {
($join:item) => {
/// Waits on multiple concurrent branches, returning when **all** branches
/// complete.
///
/// The `join!` macro must be used inside of async functions, closures, and
/// blocks.
///
/// The `join!` macro takes a list of async expressions and evaluates them
/// concurrently on the same task. Each async expression evaluates to a future
/// and the futures from each expression are multiplexed on the current task.
///
/// When working with async expressions returning `Result`, `join!` will wait
/// for **all** branches complete regardless if any complete with `Err`. Use
/// [`try_join!`] to return early when `Err` is encountered.
///
/// [`try_join!`]: crate::try_join
///
/// # Notes
///
/// The supplied futures are stored inline and do not require allocating a
/// `Vec`.
///
/// ## Runtime characteristics
///
/// By running all async expressions on the current task, the expressions are
/// able to run **concurrently** but not in **parallel**. This means all
/// expressions are run on the same thread and if one branch blocks the thread,
/// all other expressions will be unable to continue. If parallelism is
/// required, spawn each async expression using [`tokio::spawn`] and pass the
/// join handle to `join!`.
///
/// [`tokio::spawn`]: crate::spawn
///
/// ## Fairness
///
/// By default, `join!`'s generated future rotates which contained
/// future is polled first whenever it is woken.
///
/// This behavior can be overridden by adding `biased;` to the beginning of the
/// macro usage. See the examples for details. This will cause `join` to poll
/// the futures in the order they appear from top to bottom.
///
/// You may want this if your futures may interact in a way where known polling order is significant.
///
/// But there is an important caveat to this mode. It becomes your responsibility
/// to ensure that the polling order of your futures is fair. If for example you
/// are joining a stream and a shutdown future, and the stream has a
/// huge volume of messages that takes a long time to finish processing per poll, you should
/// place the shutdown future earlier in the `join!` list to ensure that it is
/// always polled, and will not be delayed due to the stream future taking a long time to return
/// `Poll::Pending`.
///
/// # Examples
///
/// Basic join with two branches
///
/// ```
/// async fn do_stuff_async() {
/// // async work
/// }
///
/// async fn more_async_work() {
/// // more here
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (first, second) = tokio::join!(
/// do_stuff_async(),
/// more_async_work());
///
/// // do something with the values
/// # }
/// ```
///
/// Using the `biased;` mode to control polling order.
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// async fn do_stuff_async() {
/// // async work
/// }
///
/// async fn more_async_work() {
/// // more here
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (first, second) = tokio::join!(
/// biased;
/// do_stuff_async(),
/// more_async_work()
/// );
///
/// // do something with the values
/// # }
/// # }
/// ```
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
$join
};
}
#[cfg(doc)]
doc! {macro_rules! join {
($(biased;)? $($future:expr),*) => { unimplemented!() }
}}
#[cfg(not(doc))]
doc! {macro_rules! join {
(@ {
// Type of rotator that controls which inner future to start with
// when polling our output future.
rotator_select=$rotator_select:ty;
// One `_` for each branch in the `join!` macro. This is not used once
// normalization is complete.
( $($count:tt)* )
// The expression `0+1+1+ ... +1` equal to the number of branches.
( $($total:tt)* )
// Normalized join! branches
$( ( $($skip:tt)* ) $e:expr, )*
}) => {{
// Safety: nothing must be moved out of `futures`. This is to satisfy
// the requirement of `Pin::new_unchecked` called below.
//
// We can't use the `pin!` macro for this because `futures` is a tuple
// and the standard library provides no way to pin-project to the fields
// of a tuple.
let mut futures = ( $( $crate::macros::support::maybe_done($e), )* );
// This assignment makes sure that the `poll_fn` closure only has a
// reference to the futures, instead of taking ownership of them. This
// mitigates the issue described in
// <https://internals.rust-lang.org/t/surprising-soundness-trouble-around-pollfn/17484>
let mut futures = &mut futures;
// Each time the future created by poll_fn is polled, if not using biased mode,
// a different future is polled first to ensure every future passed to join!
// can make progress even if one of the futures consumes the whole budget.
let mut rotator = <$rotator_select as $crate::macros::support::RotatorSelect>::Rotator::<{$($total)*}>::default();
$crate::macros::support::poll_fn(move |cx| {
const COUNT: u32 = $($total)*;
let mut is_pending = false;
let mut to_run = COUNT;
// The number of futures that will be skipped in the first loop iteration.
let mut skip = rotator.num_skip();
// This loop runs twice and the first `skip` futures
// are not polled in the first iteration.
loop {
$(
if skip == 0 {
if to_run == 0 {
// Every future has been polled
break;
}
to_run -= 1;
// Extract the future for this branch from the tuple.
let ( $($skip,)* fut, .. ) = &mut *futures;
// Safety: future is stored on the stack above
// and never moved.
let mut fut = unsafe { $crate::macros::support::Pin::new_unchecked(fut) };
// Try polling
if $crate::macros::support::Future::poll(fut.as_mut(), cx).is_pending() {
is_pending = true;
}
} else {
// Future skipped, one less future to skip in the next iteration
skip -= 1;
}
)*
}
if is_pending {
$crate::macros::support::Poll::Pending
} else {
$crate::macros::support::Poll::Ready(($({
// Extract the future for this branch from the tuple.
let ( $($skip,)* fut, .. ) = &mut futures;
// Safety: future is stored on the stack above
// and never moved.
let mut fut = unsafe { $crate::macros::support::Pin::new_unchecked(fut) };
fut.take_output().expect("expected completed future")
},)*))
}
}).await
}};
// ===== Normalize =====
(@ { rotator_select=$rotator_select:ty; ( $($s:tt)* ) ( $($n:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
$crate::join!(@{ rotator_select=$rotator_select; ($($s)* _) ($($n)* + 1) $($t)* ($($s)*) $e, } $($r)*)
};
// ===== Entry point =====
( biased; $($e:expr),+ $(,)?) => {
$crate::join!(@{ rotator_select=$crate::macros::support::SelectBiased; () (0) } $($e,)*)
};
( $($e:expr),+ $(,)?) => {
$crate::join!(@{ rotator_select=$crate::macros::support::SelectNormal; () (0) } $($e,)*)
};
(biased;) => { async {}.await };
() => { async {}.await }
}}
/// Helper trait to select which type of `Rotator` to use.
// We need this to allow specifying a const generic without
// colliding with caller const names due to macro hygiene.
pub trait RotatorSelect {
type Rotator<const COUNT: u32>: Default;
}
/// Marker type indicating that the starting branch should
/// rotate each poll.
#[derive(Debug)]
pub struct SelectNormal;
/// Marker type indicating that the starting branch should
/// be the first declared branch each poll.
#[derive(Debug)]
pub struct SelectBiased;
impl RotatorSelect for SelectNormal {
type Rotator<const COUNT: u32> = Rotator<COUNT>;
}
impl RotatorSelect for SelectBiased {
type Rotator<const COUNT: u32> = BiasedRotator;
}
/// Rotates by one each [`Self::num_skip`] call up to COUNT - 1.
#[derive(Default, Debug)]
pub struct Rotator<const COUNT: u32> {
next: u32,
}
impl<const COUNT: u32> Rotator<COUNT> {
/// Rotates by one each [`Self::num_skip`] call up to COUNT - 1
#[inline]
pub fn num_skip(&mut self) -> u32 {
let num_skip = self.next;
self.next += 1;
if self.next == COUNT {
self.next = 0;
}
num_skip
}
}
/// [`Self::num_skip`] always returns 0.
#[derive(Default, Debug)]
pub struct BiasedRotator {}
impl BiasedRotator {
/// Always returns 0.
#[inline]
pub fn num_skip(&mut self) -> u32 {
0
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/pin.rs | tokio/src/macros/pin.rs | /// Pins a value on the stack.
///
/// Calls to `async fn` return anonymous [`Future`] values that are `!Unpin`.
/// These values must be pinned before they can be polled. Calling `.await` will
/// handle this, but consumes the future. If it is required to call `.await` on
/// a `&mut _` reference, the caller is responsible for pinning the future.
///
/// Pinning may be done by allocating with [`Box::pin`] or by using the stack
/// with the `pin!` macro.
///
/// The following will **fail to compile**:
///
/// ```compile_fail
/// async fn my_async_fn() {
/// // async logic here
/// }
///
/// #[tokio::main]
/// async fn main() {
/// let mut future = my_async_fn();
/// (&mut future).await;
/// }
/// ```
///
/// To make this work requires pinning:
///
/// ```
/// use tokio::pin;
///
/// async fn my_async_fn() {
/// // async logic here
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let future = my_async_fn();
/// pin!(future);
///
/// (&mut future).await;
/// # }
/// ```
///
/// Pinning is useful when using `select!` and stream operators that require `T:
/// Stream + Unpin`.
///
/// [`Future`]: trait@std::future::Future
/// [`Box::pin`]: std::boxed::Box::pin
///
/// # Usage
///
/// The `pin!` macro takes **identifiers** as arguments. It does **not** work
/// with expressions.
///
/// The following does not compile as an expression is passed to `pin!`.
///
/// ```compile_fail
/// async fn my_async_fn() {
/// // async logic here
/// }
///
/// #[tokio::main]
/// async fn main() {
/// let mut future = pin!(my_async_fn());
/// (&mut future).await;
/// }
/// ```
///
/// # Examples
///
/// Using with select:
///
/// ```
/// use tokio::{pin, select};
/// use tokio_stream::{self as stream, StreamExt};
///
/// async fn my_async_fn() {
/// // async logic here
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut stream = stream::iter(vec![1, 2, 3, 4]);
///
/// let future = my_async_fn();
/// pin!(future);
///
/// loop {
/// select! {
/// _ = &mut future => {
/// // Stop looping `future` will be polled after completion
/// break;
/// }
/// Some(val) = stream.next() => {
/// println!("got value = {}", val);
/// }
/// }
/// }
/// # }
/// ```
///
/// Because assigning to a variable followed by pinning is common, there is also
/// a variant of the macro that supports doing both in one go.
///
/// ```
/// use tokio::{pin, select};
///
/// async fn my_async_fn() {
/// // async logic here
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// pin! {
/// let future1 = my_async_fn();
/// let future2 = my_async_fn();
/// }
///
/// select! {
/// _ = &mut future1 => {}
/// _ = &mut future2 => {}
/// }
/// # }
/// ```
#[macro_export]
macro_rules! pin {
($($x:ident),*) => { $(
// Move the value to ensure that it is owned
let mut $x = $x;
// Shadow the original binding so that it can't be directly accessed
// ever again.
#[allow(unused_mut)]
let mut $x = unsafe {
$crate::macros::support::Pin::new_unchecked(&mut $x)
};
)* };
($(
let $x:ident = $init:expr;
)*) => {
$(
let $x = $init;
$crate::pin!($x);
)*
};
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/thread_local.rs | tokio/src/macros/thread_local.rs | #[cfg(all(loom, test))]
macro_rules! tokio_thread_local {
($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty = const { $expr:expr } $(;)?) => {
loom::thread_local! {
$(#[$attrs])*
$vis static $name: $ty = $expr;
}
};
($($tts:tt)+) => { loom::thread_local!{ $($tts)+ } }
}
#[cfg(not(all(loom, test)))]
macro_rules! tokio_thread_local {
($($tts:tt)+) => {
::std::thread_local!{ $($tts)+ }
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/macros/cfg.rs | tokio/src/macros/cfg.rs | #![allow(unused_macros)]
/// Allows specifying arbitrary combinations of features and config flags,
/// which are also propagated to `docsrs` config.
///
/// Each contained item will have the annotations applied
///
/// ## Example usage:
/// ```no-compile
/// feature! {
/// #![any(
/// feature = "process",
/// feature = "sync",
/// feature = "rt",
/// tokio_unstable
/// )]
/// /// docs
/// pub struct MyStruct {};
/// /// docs
/// pub struct AnotherStruct {};
/// }
/// ```
///
macro_rules! feature {
(
#![$meta:meta]
$($item:item)*
) => {
$(
#[cfg($meta)]
#[cfg_attr(docsrs, doc(cfg($meta)))]
$item
)*
}
}
/// Enables Windows-specific code.
/// Use this macro instead of `cfg(windows)` to generate docs properly.
macro_rules! cfg_windows {
($($item:item)*) => {
$(
#[cfg(any(all(doc, docsrs), windows))]
#[cfg_attr(docsrs, doc(cfg(windows)))]
$item
)*
}
}
/// Enables Unix-specific code.
/// Use this macro instead of `cfg(unix)` to generate docs properly.
macro_rules! cfg_unix {
($($item:item)*) => {
$(
#[cfg(any(all(doc, docsrs), unix))]
#[cfg_attr(docsrs, doc(cfg(unix)))]
$item
)*
}
}
/// Enables unstable Windows-specific code.
/// Use this macro instead of `cfg(windows)` to generate docs properly.
macro_rules! cfg_unstable_windows {
($($item:item)*) => {
$(
#[cfg(all(any(all(doc, docsrs), windows), tokio_unstable))]
#[cfg_attr(docsrs, doc(cfg(all(windows, tokio_unstable))))]
$item
)*
}
}
/// Enables `enter::block_on`.
macro_rules! cfg_block_on {
($($item:item)*) => {
$(
#[cfg(any(
feature = "fs",
feature = "net",
feature = "io-std",
feature = "rt",
))]
$item
)*
}
}
/// Enables internal `AtomicWaker` impl.
macro_rules! cfg_atomic_waker_impl {
($($item:item)*) => {
$(
#[cfg(any(
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "time",
))]
#[cfg(not(loom))]
$item
)*
}
}
macro_rules! cfg_aio {
($($item:item)*) => {
$(
#[cfg(all(any(docsrs, target_os = "freebsd"), feature = "net"))]
#[cfg_attr(docsrs,
doc(cfg(all(target_os = "freebsd", feature = "net")))
)]
$item
)*
}
}
macro_rules! cfg_fs {
($($item:item)*) => {
$(
#[cfg(feature = "fs")]
#[cfg_attr(docsrs, doc(cfg(feature = "fs")))]
$item
)*
}
}
macro_rules! cfg_io_blocking {
($($item:item)*) => {
$( #[cfg(any(
feature = "io-std",
feature = "fs",
all(windows, feature = "process"),
))] $item )*
}
}
macro_rules! cfg_io_driver {
($($item:item)*) => {
$(
#[cfg(any(
feature = "net",
all(unix, feature = "process"),
all(unix, feature = "signal"),
all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
)
))]
#[cfg_attr(docsrs, doc(cfg(any(
feature = "net",
all(unix, feature = "process"),
all(unix, feature = "signal"),
all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
)
))))]
$item
)*
}
}
macro_rules! cfg_io_driver_impl {
( $( $item:item )* ) => {
$(
#[cfg(any(
feature = "net",
all(unix, feature = "process"),
all(unix, feature = "signal"),
all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
)
))]
$item
)*
}
}
macro_rules! cfg_not_io_driver {
($($item:item)*) => {
$(
#[cfg(not(any(
feature = "net",
all(unix, feature = "process"),
all(unix, feature = "signal"),
all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
)
)))]
$item
)*
}
}
macro_rules! cfg_io_readiness {
($($item:item)*) => {
$(
#[cfg(feature = "net")]
$item
)*
}
}
macro_rules! cfg_io_std {
($($item:item)*) => {
$(
#[cfg(feature = "io-std")]
#[cfg_attr(docsrs, doc(cfg(feature = "io-std")))]
$item
)*
}
}
macro_rules! cfg_io_util {
($($item:item)*) => {
$(
#[cfg(feature = "io-util")]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
$item
)*
}
}
macro_rules! cfg_not_io_util {
($($item:item)*) => {
$( #[cfg(not(feature = "io-util"))] $item )*
}
}
macro_rules! cfg_loom {
($($item:item)*) => {
$( #[cfg(loom)] $item )*
}
}
macro_rules! cfg_not_loom {
($($item:item)*) => {
$( #[cfg(not(loom))] $item )*
}
}
macro_rules! cfg_macros {
($($item:item)*) => {
$(
#[cfg(feature = "macros")]
#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
$item
)*
}
}
macro_rules! cfg_unstable_metrics {
($($item:item)*) => {
$(
#[cfg(tokio_unstable)]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
$item
)*
}
}
/// Some metrics require 64-bit atomics.
macro_rules! cfg_64bit_metrics {
($($item:item)*) => {
$(
#[cfg(target_has_atomic = "64")]
#[cfg_attr(docsrs, doc(cfg(target_has_atomic = "64")))]
$item
)*
}
}
macro_rules! cfg_no_64bit_metrics {
($($item:item)*) => {
$(
#[cfg(not(target_has_atomic = "64"))]
$item
)*
}
}
macro_rules! cfg_not_unstable_metrics {
($($item:item)*) => {
$(
#[cfg(not(tokio_unstable))]
$item
)*
}
}
macro_rules! cfg_not_rt_and_metrics_and_net {
($($item:item)*) => {
$( #[cfg(not(all(feature = "net", feature = "rt", tokio_unstable)))]$item )*
}
}
macro_rules! cfg_net_or_process {
($($item:item)*) => {
$(
#[cfg(any(feature = "net", feature = "process"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "net", feature = "process"))))]
$item
)*
}
}
macro_rules! cfg_net {
($($item:item)*) => {
$(
#[cfg(feature = "net")]
#[cfg_attr(docsrs, doc(cfg(feature = "net")))]
$item
)*
}
}
macro_rules! cfg_net_or_uring {
($($item:item)*) => {
$(
#[cfg(any(
feature = "net",
all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux",
)
))]
#[cfg_attr(
docsrs,
doc(cfg(any(
feature = "net",
all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux",
)
)))
)]
$item
)*
}
}
macro_rules! cfg_net_unix {
($($item:item)*) => {
$(
#[cfg(all(unix, feature = "net"))]
#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "net"))))]
$item
)*
}
}
macro_rules! cfg_net_windows {
($($item:item)*) => {
$(
#[cfg(all(any(all(doc, docsrs), windows), feature = "net"))]
#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "net"))))]
$item
)*
}
}
macro_rules! cfg_process {
($($item:item)*) => {
$(
#[cfg(feature = "process")]
#[cfg_attr(docsrs, doc(cfg(feature = "process")))]
#[cfg(not(loom))]
#[cfg(not(target_os = "wasi"))]
$item
)*
}
}
macro_rules! cfg_process_driver {
($($item:item)*) => {
#[cfg(unix)]
#[cfg(not(loom))]
cfg_process! { $($item)* }
}
}
macro_rules! cfg_not_process_driver {
($($item:item)*) => {
$(
#[cfg(not(all(unix, not(loom), feature = "process")))]
$item
)*
}
}
macro_rules! cfg_signal {
($($item:item)*) => {
$(
#[cfg(feature = "signal")]
#[cfg_attr(docsrs, doc(cfg(feature = "signal")))]
#[cfg(not(loom))]
#[cfg(not(target_os = "wasi"))]
$item
)*
}
}
macro_rules! cfg_signal_internal {
($($item:item)*) => {
$(
#[cfg(any(feature = "signal", all(unix, feature = "process")))]
#[cfg(not(loom))]
$item
)*
}
}
macro_rules! cfg_signal_internal_and_unix {
($($item:item)*) => {
#[cfg(unix)]
cfg_signal_internal! { $($item)* }
}
}
macro_rules! cfg_not_signal_internal {
($($item:item)*) => {
$(
#[cfg(any(loom, not(unix), not(any(feature = "signal", all(unix, feature = "process")))))]
$item
)*
}
}
macro_rules! cfg_sync {
($($item:item)*) => {
$(
#[cfg(feature = "sync")]
#[cfg_attr(docsrs, doc(cfg(feature = "sync")))]
$item
)*
}
}
macro_rules! cfg_not_sync {
($($item:item)*) => {
$( #[cfg(not(feature = "sync"))] $item )*
}
}
macro_rules! cfg_rt {
($($item:item)*) => {
$(
#[cfg(feature = "rt")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
$item
)*
}
}
macro_rules! cfg_not_rt {
($($item:item)*) => {
$( #[cfg(not(feature = "rt"))] $item )*
}
}
macro_rules! cfg_rt_multi_thread {
($($item:item)*) => {
$(
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
$item
)*
}
}
macro_rules! cfg_not_rt_multi_thread {
($($item:item)*) => {
$( #[cfg(not(feature = "rt-multi-thread"))] $item )*
}
}
macro_rules! cfg_taskdump {
($($item:item)*) => {
$(
#[cfg(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(
target_arch = "aarch64",
target_arch = "x86",
target_arch = "x86_64"
)
))]
$item
)*
};
}
macro_rules! cfg_not_taskdump {
($($item:item)*) => {
$(
#[cfg(not(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(
target_arch = "aarch64",
target_arch = "x86",
target_arch = "x86_64"
)
)))]
$item
)*
};
}
macro_rules! cfg_test_util {
($($item:item)*) => {
$(
#[cfg(feature = "test-util")]
#[cfg_attr(docsrs, doc(cfg(feature = "test-util")))]
$item
)*
}
}
macro_rules! cfg_not_test_util {
($($item:item)*) => {
$( #[cfg(not(feature = "test-util"))] $item )*
}
}
macro_rules! cfg_time {
($($item:item)*) => {
$(
#[cfg(feature = "time")]
#[cfg_attr(docsrs, doc(cfg(feature = "time")))]
$item
)*
}
}
macro_rules! cfg_not_time {
($($item:item)*) => {
$( #[cfg(not(feature = "time"))] $item )*
}
}
macro_rules! cfg_trace {
($($item:item)*) => {
$(
#[cfg(all(tokio_unstable, feature = "tracing"))]
#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))]
$item
)*
};
}
macro_rules! cfg_unstable {
($($item:item)*) => {
$(
#[cfg(tokio_unstable)]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
$item
)*
};
}
macro_rules! cfg_not_trace {
($($item:item)*) => {
$(
#[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
$item
)*
}
}
macro_rules! cfg_coop {
($($item:item)*) => {
$(
#[cfg(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
))]
$item
)*
}
}
macro_rules! cfg_not_coop {
($($item:item)*) => {
$(
#[cfg(not(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
)))]
$item
)*
}
}
macro_rules! cfg_has_atomic_u64 {
($($item:item)*) => {
$(
#[cfg(target_has_atomic = "64")]
$item
)*
}
}
macro_rules! cfg_not_has_atomic_u64 {
($($item:item)*) => {
$(
#[cfg(not(target_has_atomic = "64"))]
$item
)*
}
}
macro_rules! cfg_has_const_mutex_new {
($($item:item)*) => {
$(
#[cfg(not(all(loom, test)))]
$item
)*
}
}
macro_rules! cfg_not_has_const_mutex_new {
($($item:item)*) => {
$(
#[cfg(all(loom, test))]
$item
)*
}
}
macro_rules! cfg_not_wasi {
($($item:item)*) => {
$(
#[cfg(not(target_os = "wasi"))]
$item
)*
}
}
macro_rules! cfg_is_wasm_not_wasi {
($($item:item)*) => {
$(
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
$item
)*
}
}
/// Use this macro to provide two different implementations of the same API — one for stable
/// builds and one for unstable builds.
macro_rules! cfg_metrics_variant {
(stable: {$($stable_code:tt)*}, unstable: {$($unstable_code:tt)*}) => {
cfg_not_unstable_metrics! {
$($stable_code)*
}
cfg_unstable_metrics! {
$($unstable_code)*
}
}
}
macro_rules! cfg_io_uring {
($($item:item)*) => {
$(
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux",
))]
$item
)*
};
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/config.rs | tokio/src/runtime/config.rs | #![cfg_attr(
any(not(all(tokio_unstable, feature = "full")), target_family = "wasm"),
allow(dead_code)
)]
use crate::runtime::{Callback, TaskCallback};
use crate::util::RngSeedGenerator;
pub(crate) struct Config {
/// How many ticks before pulling a task from the global/remote queue?
pub(crate) global_queue_interval: Option<u32>,
/// How many ticks before yielding to the driver for timer and I/O events?
pub(crate) event_interval: u32,
/// Callback for a worker parking itself
pub(crate) before_park: Option<Callback>,
/// Callback for a worker unparking itself
pub(crate) after_unpark: Option<Callback>,
/// To run before each task is spawned.
pub(crate) before_spawn: Option<TaskCallback>,
/// To run after each task is terminated.
pub(crate) after_termination: Option<TaskCallback>,
/// To run before each poll
#[cfg(tokio_unstable)]
pub(crate) before_poll: Option<TaskCallback>,
/// To run after each poll
#[cfg(tokio_unstable)]
pub(crate) after_poll: Option<TaskCallback>,
/// The multi-threaded scheduler includes a per-worker LIFO slot used to
/// store the last scheduled task. This can improve certain usage patterns,
/// especially message passing between tasks. However, this LIFO slot is not
/// currently stealable.
///
/// Eventually, the LIFO slot **will** become stealable, however as a
/// stop-gap, this unstable option lets users disable the LIFO task.
pub(crate) disable_lifo_slot: bool,
/// Random number generator seed to configure runtimes to act in a
/// deterministic way.
pub(crate) seed_generator: RngSeedGenerator,
/// How to build poll time histograms
pub(crate) metrics_poll_count_histogram: Option<crate::runtime::HistogramBuilder>,
#[cfg(tokio_unstable)]
/// How to respond to unhandled task panics.
pub(crate) unhandled_panic: crate::runtime::UnhandledPanic,
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/park.rs | tokio/src/runtime/park.rs | #![cfg_attr(not(feature = "full"), allow(dead_code))]
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::{Arc, Condvar, Mutex};
use std::sync::atomic::Ordering::SeqCst;
use std::time::Duration;
#[derive(Debug)]
pub(crate) struct ParkThread {
inner: Arc<Inner>,
}
/// Unblocks a thread that was blocked by `ParkThread`.
#[derive(Clone, Debug)]
pub(crate) struct UnparkThread {
inner: Arc<Inner>,
}
#[derive(Debug)]
struct Inner {
state: AtomicUsize,
mutex: Mutex<()>,
condvar: Condvar,
}
const EMPTY: usize = 0;
const PARKED: usize = 1;
const NOTIFIED: usize = 2;
tokio_thread_local! {
static CURRENT_PARKER: ParkThread = ParkThread::new();
}
// Bit of a hack, but it is only for loom
#[cfg(loom)]
tokio_thread_local! {
pub(crate) static CURRENT_THREAD_PARK_COUNT: AtomicUsize = AtomicUsize::new(0);
}
// ==== impl ParkThread ====
impl ParkThread {
pub(crate) fn new() -> Self {
Self {
inner: Arc::new(Inner {
state: AtomicUsize::new(EMPTY),
mutex: Mutex::new(()),
condvar: Condvar::new(),
}),
}
}
pub(crate) fn unpark(&self) -> UnparkThread {
let inner = self.inner.clone();
UnparkThread { inner }
}
pub(crate) fn park(&mut self) {
#[cfg(loom)]
CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst));
self.inner.park();
}
pub(crate) fn park_timeout(&mut self, duration: Duration) {
#[cfg(loom)]
CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst));
self.inner.park_timeout(duration);
}
pub(crate) fn shutdown(&mut self) {
self.inner.shutdown();
}
}
// ==== impl Inner ====
impl Inner {
fn park(&self) {
// If we were previously notified then we consume this notification and
// return quickly.
if self
.state
.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
.is_ok()
{
return;
}
// Otherwise we need to coordinate going to sleep
let mut m = self.mutex.lock();
match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
Ok(_) => {}
Err(NOTIFIED) => {
// We must read here, even though we know it will be `NOTIFIED`.
// This is because `unpark` may have been called again since we read
// `NOTIFIED` in the `compare_exchange` above. We must perform an
// acquire operation that synchronizes with that `unpark` to observe
// any writes it made before the call to unpark. To do that we must
// read from the write it made to `state`.
let old = self.state.swap(EMPTY, SeqCst);
debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
return;
}
Err(actual) => panic!("inconsistent park state; actual = {actual}"),
}
loop {
m = self.condvar.wait(m).unwrap();
if self
.state
.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
.is_ok()
{
// got a notification
return;
}
// spurious wakeup, go back to sleep
}
}
/// Parks the current thread for at most `dur`.
fn park_timeout(&self, dur: Duration) {
// Like `park` above we have a fast path for an already-notified thread,
// and afterwards we start coordinating for a sleep. Return quickly.
if self
.state
.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
.is_ok()
{
return;
}
if dur == Duration::from_millis(0) {
return;
}
let m = self.mutex.lock();
match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
Ok(_) => {}
Err(NOTIFIED) => {
// We must read again here, see `park`.
let old = self.state.swap(EMPTY, SeqCst);
debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
return;
}
Err(actual) => panic!("inconsistent park_timeout state; actual = {actual}"),
}
#[cfg(not(all(target_family = "wasm", not(target_feature = "atomics"))))]
// Wait with a timeout, and if we spuriously wake up or otherwise wake up
// from a notification, we just want to unconditionally set the state back to
// empty, either consuming a notification or un-flagging ourselves as
// parked.
let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap();
#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
// Wasm without atomics doesn't have threads, so just sleep.
{
let _m = m;
std::thread::sleep(dur);
}
match self.state.swap(EMPTY, SeqCst) {
NOTIFIED => {} // got a notification, hurray!
PARKED => {} // no notification, alas
n => panic!("inconsistent park_timeout state: {n}"),
}
}
fn unpark(&self) {
// To ensure the unparked thread will observe any writes we made before
// this call, we must perform a release operation that `park` can
// synchronize with. To do that we must write `NOTIFIED` even if `state`
// is already `NOTIFIED`. That is why this must be a swap rather than a
// compare-and-swap that returns if it reads `NOTIFIED` on failure.
match self.state.swap(NOTIFIED, SeqCst) {
EMPTY => return, // no one was waiting
NOTIFIED => return, // already unparked
PARKED => {} // gotta go wake someone up
_ => panic!("inconsistent state in unpark"),
}
// There is a period between when the parked thread sets `state` to
// `PARKED` (or last checked `state` in the case of a spurious wake
// up) and when it actually waits on `cvar`. If we were to notify
// during this period it would be ignored and then when the parked
// thread went to sleep it would never wake up. Fortunately, it has
// `lock` locked at this stage so we can acquire `lock` to wait until
// it is ready to receive the notification.
//
// Releasing `lock` before the call to `notify_one` means that when the
// parked thread wakes it doesn't get woken only to have to wait for us
// to release `lock`.
drop(self.mutex.lock());
self.condvar.notify_one();
}
fn shutdown(&self) {
self.condvar.notify_all();
}
}
impl Default for ParkThread {
fn default() -> Self {
Self::new()
}
}
// ===== impl UnparkThread =====
impl UnparkThread {
pub(crate) fn unpark(&self) {
self.inner.unpark();
}
}
use crate::loom::thread::AccessError;
use std::future::Future;
use std::marker::PhantomData;
use std::rc::Rc;
use std::task::{RawWaker, RawWakerVTable, Waker};
/// Blocks the current thread using a condition variable.
#[derive(Debug)]
pub(crate) struct CachedParkThread {
_anchor: PhantomData<Rc<()>>,
}
impl CachedParkThread {
/// Creates a new `ParkThread` handle for the current thread.
///
/// This type cannot be moved to other threads, so it should be created on
/// the thread that the caller intends to park.
pub(crate) fn new() -> CachedParkThread {
CachedParkThread {
_anchor: PhantomData,
}
}
pub(crate) fn waker(&self) -> Result<Waker, AccessError> {
self.unpark().map(UnparkThread::into_waker)
}
fn unpark(&self) -> Result<UnparkThread, AccessError> {
self.with_current(ParkThread::unpark)
}
pub(crate) fn park(&mut self) {
self.with_current(|park_thread| park_thread.inner.park())
.unwrap();
}
pub(crate) fn park_timeout(&mut self, duration: Duration) {
self.with_current(|park_thread| park_thread.inner.park_timeout(duration))
.unwrap();
}
/// Gets a reference to the `ParkThread` handle for this thread.
fn with_current<F, R>(&self, f: F) -> Result<R, AccessError>
where
F: FnOnce(&ParkThread) -> R,
{
CURRENT_PARKER.try_with(|inner| f(inner))
}
pub(crate) fn block_on<F: Future>(&mut self, f: F) -> Result<F::Output, AccessError> {
use std::task::Context;
use std::task::Poll::Ready;
let waker = self.waker()?;
let mut cx = Context::from_waker(&waker);
pin!(f);
loop {
if let Ready(v) = crate::task::coop::budget(|| f.as_mut().poll(&mut cx)) {
return Ok(v);
}
self.park();
}
}
}
impl UnparkThread {
pub(crate) fn into_waker(self) -> Waker {
unsafe {
let raw = unparker_to_raw_waker(self.inner);
Waker::from_raw(raw)
}
}
}
impl Inner {
#[allow(clippy::wrong_self_convention)]
fn into_raw(this: Arc<Inner>) -> *const () {
Arc::into_raw(this) as *const ()
}
/// # Safety
///
/// The pointer must have been created by [`Self::into_raw`].
unsafe fn from_raw(ptr: *const ()) -> Arc<Inner> {
unsafe { Arc::from_raw(ptr as *const Inner) }
}
}
// TODO: Is this really an unsafe function?
unsafe fn unparker_to_raw_waker(unparker: Arc<Inner>) -> RawWaker {
RawWaker::new(
Inner::into_raw(unparker),
&RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker),
)
}
/// # Safety
///
/// The pointer must have been created by [`Inner::into_raw`].
unsafe fn clone(raw: *const ()) -> RawWaker {
unsafe {
Arc::increment_strong_count(raw as *const Inner);
}
unsafe { unparker_to_raw_waker(Inner::from_raw(raw)) }
}
/// # Safety
///
/// The pointer must have been created by [`Inner::into_raw`].
unsafe fn drop_waker(raw: *const ()) {
drop(unsafe { Inner::from_raw(raw) });
}
/// # Safety
///
/// The pointer must have been created by [`Inner::into_raw`].
unsafe fn wake(raw: *const ()) {
let unparker = unsafe { Inner::from_raw(raw) };
unparker.unpark();
}
/// # Safety
///
/// The pointer must have been created by [`Inner::into_raw`].
unsafe fn wake_by_ref(raw: *const ()) {
let raw = raw as *const Inner;
unsafe {
(*raw).unpark();
}
}
#[cfg(loom)]
pub(crate) fn current_thread_park_count() -> usize {
CURRENT_THREAD_PARK_COUNT.with(|count| count.load(SeqCst))
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/builder.rs | tokio/src/runtime/builder.rs | #![cfg_attr(loom, allow(unused_imports))]
use crate::runtime::handle::Handle;
use crate::runtime::{
blocking, driver, Callback, HistogramBuilder, Runtime, TaskCallback, TimerFlavor,
};
#[cfg(tokio_unstable)]
use crate::runtime::{metrics::HistogramConfiguration, LocalOptions, LocalRuntime, TaskMeta};
use crate::util::rand::{RngSeed, RngSeedGenerator};
use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use std::fmt;
use std::io;
use std::thread::ThreadId;
use std::time::Duration;
/// Builds Tokio Runtime with custom configuration values.
///
/// Methods can be chained in order to set the configuration values. The
/// Runtime is constructed by calling [`build`].
///
/// New instances of `Builder` are obtained via [`Builder::new_multi_thread`]
/// or [`Builder::new_current_thread`].
///
/// See function level documentation for details on the various configuration
/// settings.
///
/// [`build`]: method@Self::build
/// [`Builder::new_multi_thread`]: method@Self::new_multi_thread
/// [`Builder::new_current_thread`]: method@Self::new_current_thread
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Builder;
///
/// fn main() {
/// // build runtime
/// let runtime = Builder::new_multi_thread()
/// .worker_threads(4)
/// .thread_name("my-custom-name")
/// .thread_stack_size(3 * 1024 * 1024)
/// .build()
/// .unwrap();
///
/// // use runtime ...
/// }
/// # }
/// ```
pub struct Builder {
/// Runtime type
kind: Kind,
/// Whether or not to enable the I/O driver
enable_io: bool,
nevents: usize,
/// Whether or not to enable the time driver
enable_time: bool,
/// Whether or not the clock should start paused.
start_paused: bool,
/// The number of worker threads, used by Runtime.
///
/// Only used when not using the current-thread executor.
worker_threads: Option<usize>,
/// Cap on thread usage.
max_blocking_threads: usize,
/// Name fn used for threads spawned by the runtime.
pub(super) thread_name: ThreadNameFn,
/// Stack size used for threads spawned by the runtime.
pub(super) thread_stack_size: Option<usize>,
/// Callback to run after each thread starts.
pub(super) after_start: Option<Callback>,
/// To run before each worker thread stops
pub(super) before_stop: Option<Callback>,
/// To run before each worker thread is parked.
pub(super) before_park: Option<Callback>,
/// To run after each thread is unparked.
pub(super) after_unpark: Option<Callback>,
/// To run before each task is spawned.
pub(super) before_spawn: Option<TaskCallback>,
/// To run before each poll
#[cfg(tokio_unstable)]
pub(super) before_poll: Option<TaskCallback>,
/// To run after each poll
#[cfg(tokio_unstable)]
pub(super) after_poll: Option<TaskCallback>,
/// To run after each task is terminated.
pub(super) after_termination: Option<TaskCallback>,
/// Customizable keep alive timeout for `BlockingPool`
pub(super) keep_alive: Option<Duration>,
/// How many ticks before pulling a task from the global/remote queue?
///
/// When `None`, the value is unspecified and behavior details are left to
/// the scheduler. Each scheduler flavor could choose to either pick its own
/// default value or use some other strategy to decide when to poll from the
/// global queue. For example, the multi-threaded scheduler uses a
/// self-tuning strategy based on mean task poll times.
pub(super) global_queue_interval: Option<u32>,
/// How many ticks before yielding to the driver for timer and I/O events?
pub(super) event_interval: u32,
/// When true, the multi-threade scheduler LIFO slot should not be used.
///
/// This option should only be exposed as unstable.
pub(super) disable_lifo_slot: bool,
/// Specify a random number generator seed to provide deterministic results
pub(super) seed_generator: RngSeedGenerator,
/// When true, enables task poll count histogram instrumentation.
pub(super) metrics_poll_count_histogram_enable: bool,
/// Configures the task poll count histogram
pub(super) metrics_poll_count_histogram: HistogramBuilder,
#[cfg(tokio_unstable)]
pub(super) unhandled_panic: UnhandledPanic,
timer_flavor: TimerFlavor,
}
cfg_unstable! {
/// How the runtime should respond to unhandled panics.
///
/// Instances of `UnhandledPanic` are passed to `Builder::unhandled_panic`
/// to configure the runtime behavior when a spawned task panics.
///
/// See [`Builder::unhandled_panic`] for more details.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum UnhandledPanic {
/// The runtime should ignore panics on spawned tasks.
///
/// The panic is forwarded to the task's [`JoinHandle`] and all spawned
/// tasks continue running normally.
///
/// This is the default behavior.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::{self, UnhandledPanic};
///
/// # pub fn main() {
/// let rt = runtime::Builder::new_current_thread()
/// .unhandled_panic(UnhandledPanic::Ignore)
/// .build()
/// .unwrap();
///
/// let task1 = rt.spawn(async { panic!("boom"); });
/// let task2 = rt.spawn(async {
/// // This task completes normally
/// "done"
/// });
///
/// rt.block_on(async {
/// // The panic on the first task is forwarded to the `JoinHandle`
/// assert!(task1.await.is_err());
///
/// // The second task completes normally
/// assert!(task2.await.is_ok());
/// })
/// # }
/// # }
/// ```
///
/// [`JoinHandle`]: struct@crate::task::JoinHandle
Ignore,
/// The runtime should immediately shutdown if a spawned task panics.
///
/// The runtime will immediately shutdown even if the panicked task's
/// [`JoinHandle`] is still available. All further spawned tasks will be
/// immediately dropped and call to [`Runtime::block_on`] will panic.
///
/// # Examples
///
/// ```should_panic
/// use tokio::runtime::{self, UnhandledPanic};
///
/// # pub fn main() {
/// let rt = runtime::Builder::new_current_thread()
/// .unhandled_panic(UnhandledPanic::ShutdownRuntime)
/// .build()
/// .unwrap();
///
/// rt.spawn(async { panic!("boom"); });
/// rt.spawn(async {
/// // This task never completes.
/// });
///
/// rt.block_on(async {
/// // Do some work
/// # loop { tokio::task::yield_now().await; }
/// })
/// # }
/// ```
///
/// [`JoinHandle`]: struct@crate::task::JoinHandle
ShutdownRuntime,
}
}
pub(crate) type ThreadNameFn = std::sync::Arc<dyn Fn() -> String + Send + Sync + 'static>;
#[derive(Clone, Copy)]
pub(crate) enum Kind {
CurrentThread,
#[cfg(feature = "rt-multi-thread")]
MultiThread,
}
impl Builder {
/// Returns a new builder with the current thread scheduler selected.
///
/// Configuration methods can be chained on the return value.
///
/// To spawn non-`Send` tasks on the resulting runtime, combine it with a
/// [`LocalSet`], or call [`build_local`] to create a [`LocalRuntime`] (unstable).
///
/// [`LocalSet`]: crate::task::LocalSet
/// [`LocalRuntime`]: crate::runtime::LocalRuntime
/// [`build_local`]: crate::runtime::Builder::build_local
pub fn new_current_thread() -> Builder {
#[cfg(loom)]
const EVENT_INTERVAL: u32 = 4;
// The number `61` is fairly arbitrary. I believe this value was copied from golang.
#[cfg(not(loom))]
const EVENT_INTERVAL: u32 = 61;
Builder::new(Kind::CurrentThread, EVENT_INTERVAL)
}
/// Returns a new builder with the multi thread scheduler selected.
///
/// Configuration methods can be chained on the return value.
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new_multi_thread() -> Builder {
// The number `61` is fairly arbitrary. I believe this value was copied from golang.
Builder::new(Kind::MultiThread, 61)
}
/// Returns a new runtime builder initialized with default configuration
/// values.
///
/// Configuration methods can be chained on the return value.
pub(crate) fn new(kind: Kind, event_interval: u32) -> Builder {
Builder {
kind,
// I/O defaults to "off"
enable_io: false,
nevents: 1024,
// Time defaults to "off"
enable_time: false,
// The clock starts not-paused
start_paused: false,
// Read from environment variable first in multi-threaded mode.
// Default to lazy auto-detection (one thread per CPU core)
worker_threads: None,
max_blocking_threads: 512,
// Default thread name
thread_name: std::sync::Arc::new(|| "tokio-runtime-worker".into()),
// Do not set a stack size by default
thread_stack_size: None,
// No worker thread callbacks
after_start: None,
before_stop: None,
before_park: None,
after_unpark: None,
before_spawn: None,
after_termination: None,
#[cfg(tokio_unstable)]
before_poll: None,
#[cfg(tokio_unstable)]
after_poll: None,
keep_alive: None,
// Defaults for these values depend on the scheduler kind, so we get them
// as parameters.
global_queue_interval: None,
event_interval,
seed_generator: RngSeedGenerator::new(RngSeed::new()),
#[cfg(tokio_unstable)]
unhandled_panic: UnhandledPanic::Ignore,
metrics_poll_count_histogram_enable: false,
metrics_poll_count_histogram: HistogramBuilder::default(),
disable_lifo_slot: false,
timer_flavor: TimerFlavor::Traditional,
}
}
/// Enables both I/O and time drivers.
///
/// Doing this is a shorthand for calling `enable_io` and `enable_time`
/// individually. If additional components are added to Tokio in the future,
/// `enable_all` will include these future components.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_all()
/// .build()
/// .unwrap();
/// # }
/// ```
pub fn enable_all(&mut self) -> &mut Self {
#[cfg(any(
feature = "net",
all(unix, feature = "process"),
all(unix, feature = "signal")
))]
self.enable_io();
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux",
))]
self.enable_io_uring();
#[cfg(feature = "time")]
self.enable_time();
self
}
/// Enables the alternative timer implementation, which is disabled by default.
///
/// The alternative timer implementation is an unstable feature that may
/// provide better performance on multi-threaded runtimes with a large number
/// of worker threads.
///
/// This option only applies to multi-threaded runtimes. Attempting to use
/// this option with any other runtime type will have no effect.
///
/// [Click here to share your experience with the alternative timer](https://github.com/tokio-rs/tokio/issues/7745)
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_alt_timer()
/// .build()
/// .unwrap();
/// # }
/// ```
#[cfg(all(tokio_unstable, feature = "time", feature = "rt-multi-thread"))]
#[cfg_attr(
docsrs,
doc(cfg(all(tokio_unstable, feature = "time", feature = "rt-multi-thread")))
)]
pub fn enable_alt_timer(&mut self) -> &mut Self {
self.enable_time();
self.timer_flavor = TimerFlavor::Alternative;
self
}
/// Sets the number of worker threads the `Runtime` will use.
///
/// This can be any number above 0 though it is advised to keep this value
/// on the smaller side.
///
/// This will override the value read from environment variable `TOKIO_WORKER_THREADS`.
///
/// # Default
///
/// The default value is the number of cores available to the system.
///
/// When using the `current_thread` runtime this method has no effect.
///
/// # Examples
///
/// ## Multi threaded runtime with 4 threads
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
///
/// // This will spawn a work-stealing runtime with 4 worker threads.
/// let rt = runtime::Builder::new_multi_thread()
/// .worker_threads(4)
/// .build()
/// .unwrap();
///
/// rt.spawn(async move {});
/// # }
/// ```
///
/// ## Current thread runtime (will only run on the current thread via `Runtime::block_on`)
///
/// ```
/// use tokio::runtime;
///
/// // Create a runtime that _must_ be driven from a call
/// // to `Runtime::block_on`.
/// let rt = runtime::Builder::new_current_thread()
/// .build()
/// .unwrap();
///
/// // This will run the runtime and future on the current thread
/// rt.block_on(async move {});
/// ```
///
/// # Panics
///
/// This will panic if `val` is not larger than `0`.
#[track_caller]
pub fn worker_threads(&mut self, val: usize) -> &mut Self {
assert!(val > 0, "Worker threads cannot be set to 0");
self.worker_threads = Some(val);
self
}
/// Specifies the limit for additional threads spawned by the Runtime.
///
/// These threads are used for blocking operations like tasks spawned
/// through [`spawn_blocking`], this includes but is not limited to:
/// - [`fs`] operations
/// - dns resolution through [`ToSocketAddrs`]
/// - writing to [`Stdout`] or [`Stderr`]
/// - reading from [`Stdin`]
///
/// Unlike the [`worker_threads`], they are not always active and will exit
/// if left idle for too long. You can change this timeout duration with [`thread_keep_alive`].
///
/// It's recommended to not set this limit too low in order to avoid hanging on operations
/// requiring [`spawn_blocking`].
///
/// The default value is 512.
///
/// # Queue Behavior
///
/// When a blocking task is submitted, it will be inserted into a queue. If available, one of
/// the idle threads will be notified to run the task. Otherwise, if the threshold set by this
/// method has not been reached, a new thread will be spawned. If no idle thread is available
/// and no more threads are allowed to be spawned, the task will remain in the queue until one
/// of the busy threads pick it up. Note that since the queue does not apply any backpressure,
/// it could potentially grow unbounded.
///
/// # Panics
///
/// This will panic if `val` is not larger than `0`.
///
/// # Upgrading from 0.x
///
/// In old versions `max_threads` limited both blocking and worker threads, but the
/// current `max_blocking_threads` does not include async worker threads in the count.
///
/// [`spawn_blocking`]: fn@crate::task::spawn_blocking
/// [`fs`]: mod@crate::fs
/// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs
/// [`Stdout`]: struct@crate::io::Stdout
/// [`Stdin`]: struct@crate::io::Stdin
/// [`Stderr`]: struct@crate::io::Stderr
/// [`worker_threads`]: Self::worker_threads
/// [`thread_keep_alive`]: Self::thread_keep_alive
#[track_caller]
#[cfg_attr(docsrs, doc(alias = "max_threads"))]
pub fn max_blocking_threads(&mut self, val: usize) -> &mut Self {
assert!(val > 0, "Max blocking threads cannot be set to 0");
self.max_blocking_threads = val;
self
}
/// Sets name of threads spawned by the `Runtime`'s thread pool.
///
/// The default name is "tokio-runtime-worker".
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use tokio::runtime;
///
/// # pub fn main() {
/// let rt = runtime::Builder::new_multi_thread()
/// .thread_name("my-pool")
/// .build();
/// # }
/// # }
/// ```
pub fn thread_name(&mut self, val: impl Into<String>) -> &mut Self {
let val = val.into();
self.thread_name = std::sync::Arc::new(move || val.clone());
self
}
/// Sets a function used to generate the name of threads spawned by the `Runtime`'s thread pool.
///
/// The default name fn is `|| "tokio-runtime-worker".into()`.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use tokio::runtime;
/// # use std::sync::atomic::{AtomicUsize, Ordering};
/// # pub fn main() {
/// let rt = runtime::Builder::new_multi_thread()
/// .thread_name_fn(|| {
/// static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
/// let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
/// format!("my-pool-{}", id)
/// })
/// .build();
/// # }
/// # }
/// ```
pub fn thread_name_fn<F>(&mut self, f: F) -> &mut Self
where
F: Fn() -> String + Send + Sync + 'static,
{
self.thread_name = std::sync::Arc::new(f);
self
}
/// Sets the stack size (in bytes) for worker threads.
///
/// The actual stack size may be greater than this value if the platform
/// specifies minimal stack size.
///
/// The default stack size for spawned threads is 2 MiB, though this
/// particular stack size is subject to change in the future.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use tokio::runtime;
///
/// # pub fn main() {
/// let rt = runtime::Builder::new_multi_thread()
/// .thread_stack_size(32 * 1024)
/// .build();
/// # }
/// # }
/// ```
pub fn thread_stack_size(&mut self, val: usize) -> &mut Self {
self.thread_stack_size = Some(val);
self
}
/// Executes function `f` after each thread is started but before it starts
/// doing work.
///
/// This is intended for bookkeeping and monitoring use cases.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use tokio::runtime;
/// # pub fn main() {
/// let runtime = runtime::Builder::new_multi_thread()
/// .on_thread_start(|| {
/// println!("thread started");
/// })
/// .build();
/// # }
/// # }
/// ```
#[cfg(not(loom))]
pub fn on_thread_start<F>(&mut self, f: F) -> &mut Self
where
F: Fn() + Send + Sync + 'static,
{
self.after_start = Some(std::sync::Arc::new(f));
self
}
/// Executes function `f` before each thread stops.
///
/// This is intended for bookkeeping and monitoring use cases.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// {
/// # use tokio::runtime;
/// # pub fn main() {
/// let runtime = runtime::Builder::new_multi_thread()
/// .on_thread_stop(|| {
/// println!("thread stopping");
/// })
/// .build();
/// # }
/// # }
/// ```
#[cfg(not(loom))]
pub fn on_thread_stop<F>(&mut self, f: F) -> &mut Self
where
F: Fn() + Send + Sync + 'static,
{
self.before_stop = Some(std::sync::Arc::new(f));
self
}
/// Executes function `f` just before a thread is parked (goes idle).
/// `f` is called within the Tokio context, so functions like [`tokio::spawn`](crate::spawn)
/// can be called, and may result in this thread being unparked immediately.
///
/// This can be used to start work only when the executor is idle, or for bookkeeping
/// and monitoring purposes.
///
/// Note: There can only be one park callback for a runtime; calling this function
/// more than once replaces the last callback defined, rather than adding to it.
///
/// # Examples
///
/// ## Multithreaded executor
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use std::sync::Arc;
/// # use std::sync::atomic::{AtomicBool, Ordering};
/// # use tokio::runtime;
/// # use tokio::sync::Barrier;
/// # pub fn main() {
/// let once = AtomicBool::new(true);
/// let barrier = Arc::new(Barrier::new(2));
///
/// let runtime = runtime::Builder::new_multi_thread()
/// .worker_threads(1)
/// .on_thread_park({
/// let barrier = barrier.clone();
/// move || {
/// let barrier = barrier.clone();
/// if once.swap(false, Ordering::Relaxed) {
/// tokio::spawn(async move { barrier.wait().await; });
/// }
/// }
/// })
/// .build()
/// .unwrap();
///
/// runtime.block_on(async {
/// barrier.wait().await;
/// })
/// # }
/// # }
/// ```
/// ## Current thread executor
/// ```
/// # use std::sync::Arc;
/// # use std::sync::atomic::{AtomicBool, Ordering};
/// # use tokio::runtime;
/// # use tokio::sync::Barrier;
/// # pub fn main() {
/// let once = AtomicBool::new(true);
/// let barrier = Arc::new(Barrier::new(2));
///
/// let runtime = runtime::Builder::new_current_thread()
/// .on_thread_park({
/// let barrier = barrier.clone();
/// move || {
/// let barrier = barrier.clone();
/// if once.swap(false, Ordering::Relaxed) {
/// tokio::spawn(async move { barrier.wait().await; });
/// }
/// }
/// })
/// .build()
/// .unwrap();
///
/// runtime.block_on(async {
/// barrier.wait().await;
/// })
/// # }
/// ```
#[cfg(not(loom))]
pub fn on_thread_park<F>(&mut self, f: F) -> &mut Self
where
F: Fn() + Send + Sync + 'static,
{
self.before_park = Some(std::sync::Arc::new(f));
self
}
/// Executes function `f` just after a thread unparks (starts executing tasks).
///
/// This is intended for bookkeeping and monitoring use cases; note that work
/// in this callback will increase latencies when the application has allowed one or
/// more runtime threads to go idle.
///
/// Note: There can only be one unpark callback for a runtime; calling this function
/// more than once replaces the last callback defined, rather than adding to it.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use tokio::runtime;
/// # pub fn main() {
/// let runtime = runtime::Builder::new_multi_thread()
/// .on_thread_unpark(|| {
/// println!("thread unparking");
/// })
/// .build();
///
/// runtime.unwrap().block_on(async {
/// tokio::task::yield_now().await;
/// println!("Hello from Tokio!");
/// })
/// # }
/// # }
/// ```
#[cfg(not(loom))]
pub fn on_thread_unpark<F>(&mut self, f: F) -> &mut Self
where
F: Fn() + Send + Sync + 'static,
{
self.after_unpark = Some(std::sync::Arc::new(f));
self
}
/// Executes function `f` just before a task is spawned.
///
/// `f` is called within the Tokio context, so functions like
/// [`tokio::spawn`](crate::spawn) can be called, and may result in this callback being
/// invoked immediately.
///
/// This can be used for bookkeeping or monitoring purposes.
///
/// Note: There can only be one spawn callback for a runtime; calling this function more
/// than once replaces the last callback defined, rather than adding to it.
///
/// This *does not* support [`LocalSet`](crate::task::LocalSet) at this time.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [unstable]: crate#unstable-features
///
/// # Examples
///
/// ```
/// # use tokio::runtime;
/// # pub fn main() {
/// let runtime = runtime::Builder::new_current_thread()
/// .on_task_spawn(|_| {
/// println!("spawning task");
/// })
/// .build()
/// .unwrap();
///
/// runtime.block_on(async {
/// tokio::task::spawn(std::future::ready(()));
///
/// for _ in 0..64 {
/// tokio::task::yield_now().await;
/// }
/// })
/// # }
/// ```
#[cfg(all(not(loom), tokio_unstable))]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
pub fn on_task_spawn<F>(&mut self, f: F) -> &mut Self
where
F: Fn(&TaskMeta<'_>) + Send + Sync + 'static,
{
self.before_spawn = Some(std::sync::Arc::new(f));
self
}
/// Executes function `f` just before a task is polled
///
/// `f` is called within the Tokio context, so functions like
/// [`tokio::spawn`](crate::spawn) can be called, and may result in this callback being
/// invoked immediately.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [unstable]: crate#unstable-features
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use std::sync::{atomic::AtomicUsize, Arc};
/// # use tokio::task::yield_now;
/// # pub fn main() {
/// let poll_start_counter = Arc::new(AtomicUsize::new(0));
/// let poll_start = poll_start_counter.clone();
/// let rt = tokio::runtime::Builder::new_multi_thread()
/// .enable_all()
/// .on_before_task_poll(move |meta| {
/// println!("task {} is about to be polled", meta.id())
/// })
/// .build()
/// .unwrap();
/// let task = rt.spawn(async {
/// yield_now().await;
/// });
/// let _ = rt.block_on(task);
///
/// # }
/// # }
/// ```
#[cfg(tokio_unstable)]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
pub fn on_before_task_poll<F>(&mut self, f: F) -> &mut Self
where
F: Fn(&TaskMeta<'_>) + Send + Sync + 'static,
{
self.before_poll = Some(std::sync::Arc::new(f));
self
}
/// Executes function `f` just after a task is polled
///
/// `f` is called within the Tokio context, so functions like
/// [`tokio::spawn`](crate::spawn) can be called, and may result in this callback being
/// invoked immediately.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [unstable]: crate#unstable-features
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use std::sync::{atomic::AtomicUsize, Arc};
/// # use tokio::task::yield_now;
/// # pub fn main() {
/// let poll_stop_counter = Arc::new(AtomicUsize::new(0));
/// let poll_stop = poll_stop_counter.clone();
/// let rt = tokio::runtime::Builder::new_multi_thread()
/// .enable_all()
/// .on_after_task_poll(move |meta| {
/// println!("task {} completed polling", meta.id());
/// })
/// .build()
/// .unwrap();
/// let task = rt.spawn(async {
/// yield_now().await;
/// });
/// let _ = rt.block_on(task);
///
/// # }
/// # }
/// ```
#[cfg(tokio_unstable)]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
pub fn on_after_task_poll<F>(&mut self, f: F) -> &mut Self
where
F: Fn(&TaskMeta<'_>) + Send + Sync + 'static,
{
self.after_poll = Some(std::sync::Arc::new(f));
self
}
/// Executes function `f` just after a task is terminated.
///
/// `f` is called within the Tokio context, so functions like
/// [`tokio::spawn`](crate::spawn) can be called.
///
/// This can be used for bookkeeping or monitoring purposes.
///
/// Note: There can only be one task termination callback for a runtime; calling this
/// function more than once replaces the last callback defined, rather than adding to it.
///
/// This *does not* support [`LocalSet`](crate::task::LocalSet) at this time.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [unstable]: crate#unstable-features
///
/// # Examples
///
/// ```
/// # use tokio::runtime;
/// # pub fn main() {
/// let runtime = runtime::Builder::new_current_thread()
/// .on_task_terminate(|_| {
/// println!("killing task");
/// })
/// .build()
/// .unwrap();
///
/// runtime.block_on(async {
/// tokio::task::spawn(std::future::ready(()));
///
/// for _ in 0..64 {
/// tokio::task::yield_now().await;
/// }
/// })
/// # }
/// ```
#[cfg(all(not(loom), tokio_unstable))]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
pub fn on_task_terminate<F>(&mut self, f: F) -> &mut Self
where
F: Fn(&TaskMeta<'_>) + Send + Sync + 'static,
{
self.after_termination = Some(std::sync::Arc::new(f));
self
}
/// Creates the configured `Runtime`.
///
/// The returned `Runtime` instance is ready to spawn tasks.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Builder;
///
/// let rt = Builder::new_multi_thread().build().unwrap();
///
/// rt.block_on(async {
/// println!("Hello from the Tokio runtime");
/// });
/// # }
/// ```
pub fn build(&mut self) -> io::Result<Runtime> {
match &self.kind {
Kind::CurrentThread => self.build_current_thread_runtime(),
#[cfg(feature = "rt-multi-thread")]
Kind::MultiThread => self.build_threaded_runtime(),
}
}
/// Creates the configured [`LocalRuntime`].
///
/// The returned [`LocalRuntime`] instance is ready to spawn tasks.
///
/// # Panics
///
/// This will panic if the runtime is configured with [`new_multi_thread()`].
///
/// [`new_multi_thread()`]: Builder::new_multi_thread
///
/// # Examples
///
/// ```
/// use tokio::runtime::{Builder, LocalOptions};
///
/// let rt = Builder::new_current_thread()
/// .build_local(LocalOptions::default())
/// .unwrap();
///
/// rt.spawn_local(async {
/// println!("Hello from the Tokio runtime");
/// });
/// ```
#[allow(unused_variables, unreachable_patterns)]
#[cfg(tokio_unstable)]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
pub fn build_local(&mut self, options: LocalOptions) -> io::Result<LocalRuntime> {
match &self.kind {
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/process.rs | tokio/src/runtime/process.rs | #![cfg_attr(not(feature = "rt"), allow(dead_code))]
//! Process driver.
use crate::process::unix::GlobalOrphanQueue;
use crate::runtime::driver;
use crate::runtime::signal::{Driver as SignalDriver, Handle as SignalHandle};
use std::time::Duration;
/// Responsible for cleaning up orphaned child processes on Unix platforms.
#[derive(Debug)]
pub(crate) struct Driver {
park: SignalDriver,
signal_handle: SignalHandle,
}
// ===== impl Driver =====
impl Driver {
/// Creates a new signal `Driver` instance that delegates wakeups to `park`.
pub(crate) fn new(park: SignalDriver) -> Self {
let signal_handle = park.handle();
Self {
park,
signal_handle,
}
}
pub(crate) fn park(&mut self, handle: &driver::Handle) {
self.park.park(handle);
GlobalOrphanQueue::reap_orphans(&self.signal_handle);
}
pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) {
self.park.park_timeout(handle, duration);
GlobalOrphanQueue::reap_orphans(&self.signal_handle);
}
pub(crate) fn shutdown(&mut self, handle: &driver::Handle) {
self.park.shutdown(handle);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/id.rs | tokio/src/runtime/id.rs | use std::fmt;
use std::num::NonZeroU64;
/// An opaque ID that uniquely identifies a runtime relative to all other currently
/// running runtimes.
///
/// # Notes
///
/// - Runtime IDs are unique relative to other *currently running* runtimes.
/// When a runtime completes, the same ID may be used for another runtime.
/// - Runtime IDs are *not* sequential, and do not indicate the order in which
/// runtimes are started or any other data.
/// - The runtime ID of the currently running task can be obtained from the
/// Handle.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Handle;
///
/// #[tokio::main(flavor = "multi_thread", worker_threads = 4)]
/// async fn main() {
/// println!("Current runtime id: {}", Handle::current().id());
/// }
/// # }
/// ```
#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)]
pub struct Id(NonZeroU64);
impl Id {
pub(crate) fn new(integer: impl Into<NonZeroU64>) -> Self {
Self(integer.into())
}
}
impl fmt::Display for Id {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/thread_id.rs | tokio/src/runtime/thread_id.rs | use std::num::NonZeroU64;
#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)]
pub(crate) struct ThreadId(NonZeroU64);
impl ThreadId {
pub(crate) fn next() -> Self {
use crate::loom::sync::atomic::{Ordering::Relaxed, StaticAtomicU64};
static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(0);
let mut last = NEXT_ID.load(Relaxed);
loop {
let id = match last.checked_add(1) {
Some(id) => id,
None => exhausted(),
};
match NEXT_ID.compare_exchange_weak(last, id, Relaxed, Relaxed) {
Ok(_) => return ThreadId(NonZeroU64::new(id).unwrap()),
Err(id) => last = id,
}
}
}
}
#[cold]
#[allow(dead_code)]
fn exhausted() -> ! {
panic!("failed to generate unique thread ID: bitspace exhausted")
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/dump.rs | tokio/src/runtime/dump.rs | //! Snapshots of runtime state.
//!
//! See [`Handle::dump`][crate::runtime::Handle::dump].
use crate::task::Id;
use std::{fmt, future::Future, path::Path};
pub use crate::runtime::task::trace::Root;
/// A snapshot of a runtime's state.
///
/// See [`Handle::dump`][crate::runtime::Handle::dump].
#[derive(Debug)]
pub struct Dump {
tasks: Tasks,
}
/// Snapshots of tasks.
///
/// See [`Handle::dump`][crate::runtime::Handle::dump].
#[derive(Debug)]
pub struct Tasks {
tasks: Vec<Task>,
}
/// A snapshot of a task.
///
/// See [`Handle::dump`][crate::runtime::Handle::dump].
#[derive(Debug)]
pub struct Task {
id: Id,
trace: Trace,
}
/// Represents an address that should not be dereferenced.
///
/// This type exists to get the auto traits correct, the public API
/// uses raw pointers to make life easier for users.
#[derive(Copy, Clone, Debug)]
struct Address(*mut std::ffi::c_void);
// Safe since Address should not be dereferenced
unsafe impl Send for Address {}
unsafe impl Sync for Address {}
/// A backtrace symbol.
///
/// This struct provides accessors for backtrace symbols, similar to [`backtrace::BacktraceSymbol`].
#[derive(Clone, Debug)]
pub struct BacktraceSymbol {
name: Option<Box<[u8]>>,
name_demangled: Option<Box<str>>,
addr: Option<Address>,
filename: Option<std::path::PathBuf>,
lineno: Option<u32>,
colno: Option<u32>,
}
impl BacktraceSymbol {
pub(crate) fn from_backtrace_symbol(sym: &backtrace::BacktraceSymbol) -> Self {
let name = sym.name();
Self {
name: name.as_ref().map(|name| name.as_bytes().into()),
name_demangled: name.map(|name| format!("{name}").into()),
addr: sym.addr().map(Address),
filename: sym.filename().map(From::from),
lineno: sym.lineno(),
colno: sym.colno(),
}
}
/// Return the raw name of the symbol.
pub fn name_raw(&self) -> Option<&[u8]> {
self.name.as_deref()
}
/// Return the demangled name of the symbol.
pub fn name_demangled(&self) -> Option<&str> {
self.name_demangled.as_deref()
}
/// Returns the starting address of this symbol.
pub fn addr(&self) -> Option<*mut std::ffi::c_void> {
self.addr.map(|addr| addr.0)
}
/// Returns the file name where this function was defined. If debuginfo
/// is missing, this is likely to return None.
pub fn filename(&self) -> Option<&Path> {
self.filename.as_deref()
}
/// Returns the line number for where this symbol is currently executing.
///
/// If debuginfo is missing, this is likely to return `None`.
pub fn lineno(&self) -> Option<u32> {
self.lineno
}
/// Returns the column number for where this symbol is currently executing.
///
/// If debuginfo is missing, this is likely to return `None`.
pub fn colno(&self) -> Option<u32> {
self.colno
}
}
/// A backtrace frame.
///
/// This struct represents one stack frame in a captured backtrace, similar to [`backtrace::BacktraceFrame`].
#[derive(Clone, Debug)]
pub struct BacktraceFrame {
ip: Address,
symbol_address: Address,
symbols: Box<[BacktraceSymbol]>,
}
impl BacktraceFrame {
pub(crate) fn from_resolved_backtrace_frame(frame: &backtrace::BacktraceFrame) -> Self {
Self {
ip: Address(frame.ip()),
symbol_address: Address(frame.symbol_address()),
symbols: frame
.symbols()
.iter()
.map(BacktraceSymbol::from_backtrace_symbol)
.collect(),
}
}
/// Return the instruction pointer of this frame.
///
/// See the ABI docs for your platform for the exact meaning.
pub fn ip(&self) -> *mut std::ffi::c_void {
self.ip.0
}
/// Returns the starting symbol address of the frame of this function.
pub fn symbol_address(&self) -> *mut std::ffi::c_void {
self.symbol_address.0
}
/// Return an iterator over the symbols of this backtrace frame.
///
/// Due to inlining, it is possible for there to be multiple [`BacktraceSymbol`] items relating
/// to a single frame. The first symbol listed is the "innermost function",
/// whereas the last symbol is the outermost (last caller).
pub fn symbols(&self) -> impl Iterator<Item = &BacktraceSymbol> {
self.symbols.iter()
}
}
/// A captured backtrace.
///
/// This struct provides access to each backtrace frame, similar to [`backtrace::Backtrace`].
#[derive(Clone, Debug)]
pub struct Backtrace {
frames: Box<[BacktraceFrame]>,
}
impl Backtrace {
/// Return the frames in this backtrace, innermost (in a task dump,
/// likely to be a leaf future's poll function) first.
pub fn frames(&self) -> impl Iterator<Item = &BacktraceFrame> {
self.frames.iter()
}
}
/// An execution trace of a task's last poll.
///
/// <div class="warning">
///
/// Resolving a backtrace, either via the [`Display`][std::fmt::Display] impl or via
/// [`resolve_backtraces`][Trace::resolve_backtraces], parses debuginfo, which is
/// possibly a CPU-expensive operation that can take a platform-specific but
/// long time to run - often over 100 milliseconds, especially if the current
/// process's binary is big. In some cases, the platform might internally cache some of the
/// debuginfo, so successive calls to `resolve_backtraces` might be faster than
/// the first call, but all guarantees are platform-dependent.
///
/// To avoid blocking the runtime, it is recommended
/// that you resolve backtraces inside of a [`spawn_blocking()`][crate::task::spawn_blocking]
/// and to have some concurrency-limiting mechanism to avoid unexpected performance impact.
/// </div>
///
/// See [`Handle::dump`][crate::runtime::Handle::dump].
#[derive(Debug)]
pub struct Trace {
inner: super::task::trace::Trace,
}
impl Trace {
/// Resolve and return a list of backtraces that are involved in polls in this trace.
///
/// The exact backtraces included here are unstable and might change in the future,
/// but you can expect one [`Backtrace`] for every call to
/// [`poll`] to a bottom-level Tokio future - so if something like [`join!`] is
/// used, there will be a backtrace for each future in the join.
///
/// [`poll`]: std::future::Future::poll
/// [`join!`]: macro@join
pub fn resolve_backtraces(&self) -> Vec<Backtrace> {
self.inner
.backtraces()
.iter()
.map(|backtrace| {
let mut backtrace = backtrace::Backtrace::from(backtrace.clone());
backtrace.resolve();
Backtrace {
frames: backtrace
.frames()
.iter()
.map(BacktraceFrame::from_resolved_backtrace_frame)
.collect(),
}
})
.collect()
}
/// Runs the function `f` in tracing mode, and returns its result along with the resulting [`Trace`].
///
/// This is normally called with `f` being the poll function of a future, and will give you a backtrace
/// that tells you what that one future is doing.
///
/// Use [`Handle::dump`] instead if you want to know what *all the tasks* in your program are doing.
/// Also see [`Handle::dump`] for more documentation about dumps, but unlike [`Handle::dump`], this function
/// should not be much slower than calling `f` directly.
///
/// Due to the way tracing is implemented, Tokio leaf futures will usually, instead of doing their
/// actual work, do the equivalent of a `yield_now` (returning a `Poll::Pending` and scheduling the
/// current context for execution), which means forward progress will probably not happen unless
/// you eventually call your future outside of `capture`.
///
/// [`Handle::dump`]: crate::runtime::Handle::dump
///
/// Example usage:
/// ```
/// use std::future::Future;
/// use std::task::Poll;
/// use tokio::runtime::dump::Trace;
///
/// # async fn test_fn() {
/// // some future
/// let mut test_future = std::pin::pin!(async move { tokio::task::yield_now().await; 0 });
///
/// // trace it once, see what it's doing
/// let (trace, res) = Trace::root(std::future::poll_fn(|cx| {
/// let (res, trace) = Trace::capture(|| test_future.as_mut().poll(cx));
/// Poll::Ready((trace, res))
/// })).await;
///
/// // await it to let it finish, outside of a `capture`
/// let output = match res {
/// Poll::Ready(output) => output,
/// Poll::Pending => test_future.await,
/// };
///
/// println!("{trace}");
/// # }
/// ```
///
/// ### Nested calls
///
/// Nested calls to `capture` might return partial traces, but will not do any other undesirable behavior (for
/// example, they will not panic).
pub fn capture<F, R>(f: F) -> (R, Trace)
where
F: FnOnce() -> R,
{
let (res, trace) = super::task::trace::Trace::capture(f);
(res, Trace { inner: trace })
}
/// Create a root for stack traces captured using [`Trace::capture`]. Stack frames above
/// the root will not be captured.
///
/// Nesting multiple [`Root`] futures is fine. Captures will stop at the first root. Not having
/// a [`Root`] is fine as well, but there is no guarantee on where the capture will stop.
pub fn root<F>(f: F) -> Root<F>
where
F: Future,
{
crate::runtime::task::trace::Trace::root(f)
}
}
impl Dump {
pub(crate) fn new(tasks: Vec<Task>) -> Self {
Self {
tasks: Tasks { tasks },
}
}
/// Tasks in this snapshot.
pub fn tasks(&self) -> &Tasks {
&self.tasks
}
}
impl Tasks {
/// Iterate over tasks.
pub fn iter(&self) -> impl Iterator<Item = &Task> {
self.tasks.iter()
}
}
impl Task {
pub(crate) fn new(id: Id, trace: super::task::trace::Trace) -> Self {
Self {
id,
trace: Trace { inner: trace },
}
}
/// Returns a [task ID] that uniquely identifies this task relative to other
/// tasks spawned at the time of the dump.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [task ID]: crate::task::Id
/// [unstable]: crate#unstable-features
#[cfg(tokio_unstable)]
#[cfg_attr(docsrs, doc(cfg(tokio_unstable)))]
pub fn id(&self) -> Id {
self.id
}
/// A trace of this task's state.
pub fn trace(&self) -> &Trace {
&self.trace
}
}
impl fmt::Display for Trace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/runtime.rs | tokio/src/runtime/runtime.rs | use super::BOX_FUTURE_THRESHOLD;
use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use crate::runtime::{context, EnterGuard, Handle};
use crate::task::JoinHandle;
use crate::util::trace::SpawnMeta;
use std::future::Future;
use std::mem;
use std::time::Duration;
cfg_rt_multi_thread! {
use crate::runtime::Builder;
use crate::runtime::scheduler::MultiThread;
}
/// The Tokio runtime.
///
/// The runtime provides an I/O driver, task scheduler, [timer], and
/// blocking pool, necessary for running asynchronous tasks.
///
/// Instances of `Runtime` can be created using [`new`], or [`Builder`].
/// However, most users will use the [`#[tokio::main]`][main] annotation on
/// their entry point instead.
///
/// See [module level][mod] documentation for more details.
///
/// # Shutdown
///
/// Shutting down the runtime is done by dropping the value, or calling
/// [`shutdown_background`] or [`shutdown_timeout`].
///
/// Tasks spawned through [`Runtime::spawn`] keep running until they yield.
/// Then they are dropped. They are not *guaranteed* to run to completion, but
/// *might* do so if they do not yield until completion.
///
/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running
/// until they return.
///
/// The thread initiating the shutdown blocks until all spawned work has been
/// stopped. This can take an indefinite amount of time. The `Drop`
/// implementation waits forever for this.
///
/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if
/// waiting forever is undesired. When the timeout is reached, spawned work that
/// did not stop in time and threads running it are leaked. The work continues
/// to run until one of the stopping conditions is fulfilled, but the thread
/// initiating the shutdown is unblocked.
///
/// Once the runtime has been dropped, any outstanding I/O resources bound to
/// it will no longer function. Calling any method on them will result in an
/// error.
///
/// # Sharing
///
/// There are several ways to establish shared access to a Tokio runtime:
///
/// * Using an <code>[Arc]\<Runtime></code>.
/// * Using a [`Handle`].
/// * Entering the runtime context.
///
/// Using an <code>[Arc]\<Runtime></code> or [`Handle`] allows you to do various
/// things with the runtime such as spawning new tasks or entering the runtime
/// context. Both types can be cloned to create a new handle that allows access
/// to the same runtime. By passing clones into different tasks or threads, you
/// will be able to access the runtime from those tasks or threads.
///
/// The difference between <code>[Arc]\<Runtime></code> and [`Handle`] is that
/// an <code>[Arc]\<Runtime></code> will prevent the runtime from shutting down,
/// whereas a [`Handle`] does not prevent that. This is because shutdown of the
/// runtime happens when the destructor of the `Runtime` object runs.
///
/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive
/// ownership of the `Runtime` type. When using an <code>[Arc]\<Runtime></code>,
/// this can be achieved via [`Arc::try_unwrap`] when only one strong count
/// reference is left over.
///
/// The runtime context is entered using the [`Runtime::enter`] or
/// [`Handle::enter`] methods, which use a thread-local variable to store the
/// current runtime. Whenever you are inside the runtime context, methods such
/// as [`tokio::spawn`] will use the runtime whose context you are inside.
///
/// [timer]: crate::time
/// [mod]: index.html
/// [`new`]: method@Self::new
/// [`Builder`]: struct@Builder
/// [`Handle`]: struct@Handle
/// [main]: macro@crate::main
/// [`tokio::spawn`]: crate::spawn
/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap
/// [Arc]: std::sync::Arc
/// [`shutdown_background`]: method@Runtime::shutdown_background
/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout
#[derive(Debug)]
pub struct Runtime {
/// Task scheduler
scheduler: Scheduler,
/// Handle to runtime, also contains driver handles
handle: Handle,
/// Blocking pool handle, used to signal shutdown
blocking_pool: BlockingPool,
}
/// The flavor of a `Runtime`.
///
/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()).
#[derive(Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum RuntimeFlavor {
/// The flavor that executes all tasks on the current thread.
CurrentThread,
/// The flavor that executes tasks across multiple threads.
MultiThread,
}
/// The runtime scheduler is either a multi-thread or a current-thread executor.
#[derive(Debug)]
pub(super) enum Scheduler {
/// Execute all tasks on the current-thread.
CurrentThread(CurrentThread),
/// Execute tasks across multiple threads.
#[cfg(feature = "rt-multi-thread")]
MultiThread(MultiThread),
}
impl Runtime {
pub(super) fn from_parts(
scheduler: Scheduler,
handle: Handle,
blocking_pool: BlockingPool,
) -> Runtime {
Runtime {
scheduler,
handle,
blocking_pool,
}
}
/// Creates a new runtime instance with default configuration values.
///
/// This results in the multi threaded scheduler, I/O driver, and time driver being
/// initialized.
///
/// Most applications will not need to call this function directly. Instead,
/// they will use the [`#[tokio::main]` attribute][main]. When a more complex
/// configuration is necessary, the [runtime builder] may be used.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
/// ```
///
/// [mod]: index.html
/// [main]: ../attr.main.html
/// [threaded scheduler]: index.html#threaded-scheduler
/// [runtime builder]: crate::runtime::Builder
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new() -> std::io::Result<Runtime> {
Builder::new_multi_thread().enable_all().build()
}
/// Returns a handle to the runtime's spawner.
///
/// The returned handle can be used to spawn tasks that run on this runtime, and can
/// be cloned to allow moving the `Handle` to other threads.
///
/// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone.
/// Refer to the documentation of [`Handle::block_on`] for more.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let handle = rt.handle();
///
/// // Use the handle...
/// # }
/// ```
pub fn handle(&self) -> &Handle {
&self.handle
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let fut_size = mem::size_of::<F>();
if fut_size > BOX_FUTURE_THRESHOLD {
self.handle
.spawn_named(Box::pin(future), SpawnMeta::new_unnamed(fut_size))
} else {
self.handle
.spawn_named(future, SpawnMeta::new_unnamed(fut_size))
}
}
/// Runs the provided function on an executor dedicated to blocking operations.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a blocking function onto the runtime
/// rt.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
self.handle.spawn_blocking(func)
}
/// Runs a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
/// # Non-worker future
///
/// Note that the future required by this function does not run as a
/// worker. The expectation is that other tasks are spawned by the future here.
/// Awaiting on other futures from the future provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi thread scheduler
///
/// When the multi thread scheduler is used this will allow futures
/// to run within the io driver and timer context of the overall runtime.
///
/// Any spawned tasks will continue running after `block_on` returns.
///
/// # Current thread scheduler
///
/// When the current thread scheduler is enabled `block_on`
/// can be called concurrently from multiple threads. The first call
/// will take ownership of the io and timer drivers. This means
/// other threads which do not own the drivers will hook into that one.
/// When the first `block_on` completes, other threads will be able to
/// "steal" the driver to allow continued execution of their futures.
///
/// Any spawned tasks will be suspended after `block_on` returns. Calling
/// `block_on` again will resume previously spawned tasks.
///
/// # Panics
///
/// This function panics if the provided future panics, or if called within an
/// asynchronous execution context.
///
/// # Examples
///
/// ```no_run
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Execute the future, blocking the current thread until completion
/// rt.block_on(async {
/// println!("hello");
/// });
/// # }
/// ```
///
/// [handle]: fn@Handle::block_on
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
let fut_size = mem::size_of::<F>();
if fut_size > BOX_FUTURE_THRESHOLD {
self.block_on_inner(Box::pin(future), SpawnMeta::new_unnamed(fut_size))
} else {
self.block_on_inner(future, SpawnMeta::new_unnamed(fut_size))
}
}
#[track_caller]
fn block_on_inner<F: Future>(&self, future: F, _meta: SpawnMeta<'_>) -> F::Output {
#[cfg(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(
future,
"block_on",
_meta,
crate::runtime::task::Id::next().as_u64(),
);
let _enter = self.enter();
match &self.scheduler {
Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(feature = "rt-multi-thread")]
Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future),
}
}
/// Enters the runtime context.
///
/// This allows you to construct types that must have an executor
/// available on creation such as [`Sleep`] or [`TcpStream`]. It will
/// also allow you to call methods such as [`tokio::spawn`].
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
///
/// # Example
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
/// use tokio::task::JoinHandle;
///
/// fn function_that_spawns(msg: String) -> JoinHandle<()> {
/// // Had we not used `rt.enter` below, this would panic.
/// tokio::spawn(async move {
/// println!("{}", msg);
/// })
/// }
///
/// fn main() {
/// let rt = Runtime::new().unwrap();
///
/// let s = "Hello World!".to_string();
///
/// // By entering the context, we tie `tokio::spawn` to this executor.
/// let _guard = rt.enter();
/// let handle = function_that_spawns(s);
///
/// // Wait for the task before we end the test.
/// rt.block_on(handle).unwrap();
/// }
/// # }
/// ```
pub fn enter(&self) -> EnterGuard<'_> {
self.handle.enter()
}
/// Shuts down the runtime, waiting for at most `duration` for all spawned
/// work to stop.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
/// use tokio::task;
///
/// use std::thread;
/// use std::time::Duration;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// task::spawn_blocking(move || {
/// thread::sleep(Duration::from_secs(10_000));
/// });
/// });
///
/// runtime.shutdown_timeout(Duration::from_millis(100));
/// }
/// # }
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
self.handle.inner.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
/// Shuts down the runtime, without waiting for any spawned work to stop.
///
/// This can be useful if you want to drop a runtime from within another runtime.
/// Normally, dropping a runtime will block indefinitely for spawned blocking tasks
/// to complete, which would normally not be permitted within an asynchronous context.
/// By calling `shutdown_background()`, you can drop the runtime from such a context.
///
/// Note however, that because we do not wait for any blocking tasks to complete, this
/// may result in a resource leak (in that any blocking tasks are still running until they
/// return.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`.
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// let inner_runtime = Runtime::new().unwrap();
/// // ...
/// inner_runtime.shutdown_background();
/// });
/// }
/// # }
/// ```
pub fn shutdown_background(self) {
self.shutdown_timeout(Duration::from_nanos(0));
}
/// Returns a view that lets you get information about how the runtime
/// is performing.
pub fn metrics(&self) -> crate::runtime::RuntimeMetrics {
self.handle.metrics()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
match &mut self.scheduler {
Scheduler::CurrentThread(current_thread) => {
// This ensures that tasks spawned on the current-thread
// runtime are dropped inside the runtime's context.
let _guard = context::try_set_current(&self.handle.inner);
current_thread.shutdown(&self.handle.inner);
}
#[cfg(feature = "rt-multi-thread")]
Scheduler::MultiThread(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
}
}
}
impl std::panic::UnwindSafe for Runtime {}
impl std::panic::RefUnwindSafe for Runtime {}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/mod.rs | tokio/src/runtime/mod.rs | //! The Tokio runtime.
//!
//! Unlike other Rust programs, asynchronous applications require runtime
//! support. In particular, the following runtime services are necessary:
//!
//! * An **I/O event loop**, called the driver, which drives I/O resources and
//! dispatches I/O events to tasks that depend on them.
//! * A **scheduler** to execute [tasks] that use these I/O resources.
//! * A **timer** for scheduling work to run after a set period of time.
//!
//! Tokio's [`Runtime`] bundles all of these services as a single type, allowing
//! them to be started, shut down, and configured together. However, often it is
//! not required to configure a [`Runtime`] manually, and a user may just use the
//! [`tokio::main`] attribute macro, which creates a [`Runtime`] under the hood.
//!
//! # Choose your runtime
//!
//! Here is the rules of thumb to choose the right runtime for your application.
//!
//! ```plaintext
//! +------------------------------------------------------+
//! | Do you want work-stealing or multi-thread scheduler? |
//! +------------------------------------------------------+
//! | Yes | No
//! | |
//! | |
//! v |
//! +------------------------+ |
//! | Multi-threaded Runtime | |
//! +------------------------+ |
//! |
//! V
//! +--------------------------------+
//! | Do you execute `!Send` Future? |
//! +--------------------------------+
//! | Yes | No
//! | |
//! V |
//! +--------------------------+ |
//! | Local Runtime (unstable) | |
//! +--------------------------+ |
//! |
//! v
//! +------------------------+
//! | Current-thread Runtime |
//! +------------------------+
//! ```
//!
//! The above decision tree is not exhaustive. there are other factors that
//! may influence your decision.
//!
//! ## Bridging with sync code
//!
//! See <https://tokio.rs/tokio/topics/bridging> for details.
//!
//! ## NUMA awareness
//!
//! The tokio runtime is not NUMA (Non-Uniform Memory Access) aware.
//! You may want to start multiple runtimes instead of a single runtime
//! for better performance on NUMA systems.
//!
//! # Usage
//!
//! When no fine tuning is required, the [`tokio::main`] attribute macro can be
//! used.
//!
//! ```no_run
//! # #[cfg(not(target_family = "wasm"))]
//! # {
//! use tokio::net::TcpListener;
//! use tokio::io::{AsyncReadExt, AsyncWriteExt};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let listener = TcpListener::bind("127.0.0.1:8080").await?;
//!
//! loop {
//! let (mut socket, _) = listener.accept().await?;
//!
//! tokio::spawn(async move {
//! let mut buf = [0; 1024];
//!
//! // In a loop, read data from the socket and write the data back.
//! loop {
//! let n = match socket.read(&mut buf).await {
//! // socket closed
//! Ok(0) => return,
//! Ok(n) => n,
//! Err(e) => {
//! println!("failed to read from socket; err = {:?}", e);
//! return;
//! }
//! };
//!
//! // Write the data back
//! if let Err(e) = socket.write_all(&buf[0..n]).await {
//! println!("failed to write to socket; err = {:?}", e);
//! return;
//! }
//! }
//! });
//! }
//! }
//! # }
//! ```
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```no_run
//! # #[cfg(not(target_family = "wasm"))]
//! # {
//! use tokio::net::TcpListener;
//! use tokio::io::{AsyncReadExt, AsyncWriteExt};
//! use tokio::runtime::Runtime;
//!
//! fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Create the runtime
//! let rt = Runtime::new()?;
//!
//! // Spawn the root task
//! rt.block_on(async {
//! let listener = TcpListener::bind("127.0.0.1:8080").await?;
//!
//! loop {
//! let (mut socket, _) = listener.accept().await?;
//!
//! tokio::spawn(async move {
//! let mut buf = [0; 1024];
//!
//! // In a loop, read data from the socket and write the data back.
//! loop {
//! let n = match socket.read(&mut buf).await {
//! // socket closed
//! Ok(0) => return,
//! Ok(n) => n,
//! Err(e) => {
//! println!("failed to read from socket; err = {:?}", e);
//! return;
//! }
//! };
//!
//! // Write the data back
//! if let Err(e) = socket.write_all(&buf[0..n]).await {
//! println!("failed to write to socket; err = {:?}", e);
//! return;
//! }
//! }
//! });
//! }
//! })
//! }
//! # }
//! ```
//!
//! ## Runtime Configurations
//!
//! Tokio provides multiple task scheduling strategies, suitable for different
//! applications. The [runtime builder] or `#[tokio::main]` attribute may be
//! used to select which scheduler to use.
//!
//! #### Multi-Thread Scheduler
//!
//! The multi-thread scheduler executes futures on a _thread pool_, using a
//! work-stealing strategy. By default, it will start a worker thread for each
//! CPU core available on the system. This tends to be the ideal configuration
//! for most applications. The multi-thread scheduler requires the `rt-multi-thread`
//! feature flag, and is selected by default:
//! ```
//! # #[cfg(not(target_family = "wasm"))]
//! # {
//! use tokio::runtime;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let threaded_rt = runtime::Runtime::new()?;
//! # Ok(()) }
//! # }
//! ```
//!
//! Most applications should use the multi-thread scheduler, except in some
//! niche use-cases, such as when running only a single thread is required.
//!
//! #### Current-Thread Scheduler
//!
//! The current-thread scheduler provides a _single-threaded_ future executor.
//! All tasks will be created and executed on the current thread. This requires
//! the `rt` feature flag.
//! ```
//! use tokio::runtime;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let rt = runtime::Builder::new_current_thread()
//! .build()?;
//! # Ok(()) }
//! ```
//!
//! #### Resource drivers
//!
//! When configuring a runtime by hand, no resource drivers are enabled by
//! default. In this case, attempting to use networking types or time types will
//! fail. In order to enable these types, the resource drivers must be enabled.
//! This is done with [`Builder::enable_io`] and [`Builder::enable_time`]. As a
//! shorthand, [`Builder::enable_all`] enables both resource drivers.
//!
//! ## Lifetime of spawned threads
//!
//! The runtime may spawn threads depending on its configuration and usage. The
//! multi-thread scheduler spawns threads to schedule tasks and for `spawn_blocking`
//! calls.
//!
//! While the `Runtime` is active, threads may shut down after periods of being
//! idle. Once `Runtime` is dropped, all runtime threads have usually been
//! terminated, but in the presence of unstoppable spawned work are not
//! guaranteed to have been terminated. See the
//! [struct level documentation](Runtime#shutdown) for more details.
//!
//! [tasks]: crate::task
//! [`Runtime`]: Runtime
//! [`tokio::spawn`]: crate::spawn
//! [`tokio::main`]: ../attr.main.html
//! [runtime builder]: crate::runtime::Builder
//! [`Runtime::new`]: crate::runtime::Runtime::new
//! [`Builder::enable_io`]: crate::runtime::Builder::enable_io
//! [`Builder::enable_time`]: crate::runtime::Builder::enable_time
//! [`Builder::enable_all`]: crate::runtime::Builder::enable_all
//!
//! # Detailed runtime behavior
//!
//! This section gives more details into how the Tokio runtime will schedule
//! tasks for execution.
//!
//! At its most basic level, a runtime has a collection of tasks that need to be
//! scheduled. It will repeatedly remove a task from that collection and
//! schedule it (by calling [`poll`]). When the collection is empty, the thread
//! will go to sleep until a task is added to the collection.
//!
//! However, the above is not sufficient to guarantee a well-behaved runtime.
//! For example, the runtime might have a single task that is always ready to be
//! scheduled, and schedule that task every time. This is a problem because it
//! starves other tasks by not scheduling them. To solve this, Tokio provides
//! the following fairness guarantee:
//!
//! > If the total number of tasks does not grow without bound, and no task is
//! > [blocking the thread], then it is guaranteed that tasks are scheduled
//! > fairly.
//!
//! Or, more formally:
//!
//! > Under the following two assumptions:
//! >
//! > * There is some number `MAX_TASKS` such that the total number of tasks on
//! > the runtime at any specific point in time never exceeds `MAX_TASKS`.
//! > * There is some number `MAX_SCHEDULE` such that calling [`poll`] on any
//! > task spawned on the runtime returns within `MAX_SCHEDULE` time units.
//! >
//! > Then, there is some number `MAX_DELAY` such that when a task is woken, it
//! > will be scheduled by the runtime within `MAX_DELAY` time units.
//!
//! (Here, `MAX_TASKS` and `MAX_SCHEDULE` can be any number and the user of
//! the runtime may choose them. The `MAX_DELAY` number is controlled by the
//! runtime, and depends on the value of `MAX_TASKS` and `MAX_SCHEDULE`.)
//!
//! Other than the above fairness guarantee, there is no guarantee about the
//! order in which tasks are scheduled. There is also no guarantee that the
//! runtime is equally fair to all tasks. For example, if the runtime has two
//! tasks A and B that are both ready, then the runtime may schedule A five
//! times before it schedules B. This is the case even if A yields using
//! [`yield_now`]. All that is guaranteed is that it will schedule B eventually.
//!
//! Normally, tasks are scheduled only if they have been woken by calling
//! [`wake`] on their waker. However, this is not guaranteed, and Tokio may
//! schedule tasks that have not been woken under some circumstances. This is
//! called a spurious wakeup.
//!
//! ## IO and timers
//!
//! Beyond just scheduling tasks, the runtime must also manage IO resources and
//! timers. It does this by periodically checking whether there are any IO
//! resources or timers that are ready, and waking the relevant task so that
//! it will be scheduled.
//!
//! These checks are performed periodically between scheduling tasks. Under the
//! same assumptions as the previous fairness guarantee, Tokio guarantees that
//! it will wake tasks with an IO or timer event within some maximum number of
//! time units.
//!
//! ## Current thread runtime (behavior at the time of writing)
//!
//! This section describes how the [current thread runtime] behaves today. This
//! behavior may change in future versions of Tokio.
//!
//! The current thread runtime maintains two FIFO queues of tasks that are ready
//! to be scheduled: the global queue and the local queue. The runtime will prefer
//! to choose the next task to schedule from the local queue, and will only pick a
//! task from the global queue if the local queue is empty, or if it has picked
//! a task from the local queue 31 times in a row. The number 31 can be
//! changed using the [`global_queue_interval`] setting.
//!
//! The runtime will check for new IO or timer events whenever there are no
//! tasks ready to be scheduled, or when it has scheduled 61 tasks in a row. The
//! number 61 may be changed using the [`event_interval`] setting.
//!
//! When a task is woken from within a task running on the runtime, then the
//! woken task is added directly to the local queue. Otherwise, the task is
//! added to the global queue. The current thread runtime does not use [the lifo
//! slot optimization].
//!
//! ## Multi threaded runtime (behavior at the time of writing)
//!
//! This section describes how the [multi thread runtime] behaves today. This
//! behavior may change in future versions of Tokio.
//!
//! A multi thread runtime has a fixed number of worker threads, which are all
//! created on startup. The multi thread runtime maintains one global queue, and
//! a local queue for each worker thread. The local queue of a worker thread can
//! fit at most 256 tasks. If more than 256 tasks are added to the local queue,
//! then half of them are moved to the global queue to make space.
//!
//! The runtime will prefer to choose the next task to schedule from the local
//! queue, and will only pick a task from the global queue if the local queue is
//! empty, or if it has picked a task from the local queue
//! [`global_queue_interval`] times in a row. If the value of
//! [`global_queue_interval`] is not explicitly set using the runtime builder,
//! then the runtime will dynamically compute it using a heuristic that targets
//! 10ms intervals between each check of the global queue (based on the
//! [`worker_mean_poll_time`] metric).
//!
//! If both the local queue and global queue is empty, then the worker thread
//! will attempt to steal tasks from the local queue of another worker thread.
//! Stealing is done by moving half of the tasks in one local queue to another
//! local queue.
//!
//! The runtime will check for new IO or timer events whenever there are no
//! tasks ready to be scheduled, or when it has scheduled 61 tasks in a row. The
//! number 61 may be changed using the [`event_interval`] setting.
//!
//! The multi thread runtime uses [the lifo slot optimization]: Whenever a task
//! wakes up another task, the other task is added to the worker thread's lifo
//! slot instead of being added to a queue. If there was already a task in the
//! lifo slot when this happened, then the lifo slot is replaced, and the task
//! that used to be in the lifo slot is placed in the thread's local queue.
//! When the runtime finishes scheduling a task, it will schedule the task in
//! the lifo slot immediately, if any. When the lifo slot is used, the [coop
//! budget] is not reset. Furthermore, if a worker thread uses the lifo slot
//! three times in a row, it is temporarily disabled until the worker thread has
//! scheduled a task that didn't come from the lifo slot. The lifo slot can be
//! disabled using the [`disable_lifo_slot`] setting. The lifo slot is separate
//! from the local queue, so other worker threads cannot steal the task in the
//! lifo slot.
//!
//! When a task is woken from a thread that is not a worker thread, then the
//! task is placed in the global queue.
//!
//! [`poll`]: std::future::Future::poll
//! [`wake`]: std::task::Waker::wake
//! [`yield_now`]: crate::task::yield_now
//! [blocking the thread]: https://ryhl.io/blog/async-what-is-blocking/
//! [current thread runtime]: crate::runtime::Builder::new_current_thread
//! [multi thread runtime]: crate::runtime::Builder::new_multi_thread
//! [`global_queue_interval`]: crate::runtime::Builder::global_queue_interval
//! [`event_interval`]: crate::runtime::Builder::event_interval
//! [`disable_lifo_slot`]: crate::runtime::Builder::disable_lifo_slot
//! [the lifo slot optimization]: crate::runtime::Builder::disable_lifo_slot
//! [coop budget]: crate::task::coop#cooperative-scheduling
//! [`worker_mean_poll_time`]: crate::runtime::RuntimeMetrics::worker_mean_poll_time
// At the top due to macros
#[cfg(test)]
#[cfg(not(target_family = "wasm"))]
#[macro_use]
mod tests;
pub(crate) mod context;
pub(crate) mod park;
pub(crate) mod driver;
pub(crate) mod scheduler;
cfg_io_driver_impl! {
pub(crate) mod io;
}
cfg_process_driver! {
mod process;
}
#[cfg_attr(not(feature = "time"), allow(dead_code))]
#[derive(Debug, Copy, Clone, PartialEq)]
pub(crate) enum TimerFlavor {
Traditional,
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Alternative,
}
cfg_time! {
pub(crate) mod time;
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
pub(crate) mod time_alt;
use std::task::{Context, Poll};
use std::pin::Pin;
#[derive(Debug)]
pub(crate) enum Timer {
Traditional(time::TimerEntry),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Alternative(time_alt::Timer),
}
impl Timer {
#[track_caller]
pub(crate) fn new(
handle: crate::runtime::scheduler::Handle,
deadline: crate::time::Instant,
) -> Self {
match handle.timer_flavor() {
crate::runtime::TimerFlavor::Traditional => {
Timer::Traditional(time::TimerEntry::new(handle, deadline))
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
crate::runtime::TimerFlavor::Alternative => {
Timer::Alternative(time_alt::Timer::new(handle, deadline))
}
}
}
pub(crate) fn deadline(&self) -> crate::time::Instant {
match self {
Timer::Traditional(entry) => entry.deadline(),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(entry) => entry.deadline(),
}
}
pub(crate) fn is_elapsed(&self) -> bool {
match self {
Timer::Traditional(entry) => entry.is_elapsed(),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(entry) => entry.is_elapsed(),
}
}
pub(crate) fn flavor(self: Pin<&Self>) -> TimerFlavor {
match self.get_ref() {
Timer::Traditional(_) => TimerFlavor::Traditional,
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(_) => TimerFlavor::Alternative,
}
}
pub(crate) fn reset(
self: Pin<&mut Self>,
new_time: crate::time::Instant,
reregister: bool
) {
// Safety: we never move the inner entries.
let this = unsafe { self.get_unchecked_mut() };
match this {
Timer::Traditional(entry) => {
// Safety: we never move the inner entries.
unsafe { Pin::new_unchecked(entry).reset(new_time, reregister); }
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(_) => panic!("not implemented yet"),
}
}
pub(crate) fn poll_elapsed(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), crate::time::error::Error>> {
// Safety: we never move the inner entries.
let this = unsafe { self.get_unchecked_mut() };
match this {
Timer::Traditional(entry) => {
// Safety: we never move the inner entries.
unsafe { Pin::new_unchecked(entry).poll_elapsed(cx) }
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(entry) => {
// Safety: we never move the inner entries.
unsafe { Pin::new_unchecked(entry).poll_elapsed(cx).map(Ok) }
}
}
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
pub(crate) fn scheduler_handle(&self) -> &crate::runtime::scheduler::Handle {
match self {
Timer::Traditional(_) => unreachable!("we should not call this on Traditional Timer"),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(entry) => entry.scheduler_handle(),
}
}
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) fn driver(self: Pin<&Self>) -> &crate::runtime::time::Handle {
match self.get_ref() {
Timer::Traditional(entry) => entry.driver(),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(entry) => entry.driver(),
}
}
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) fn clock(self: Pin<&Self>) -> &crate::time::Clock {
match self.get_ref() {
Timer::Traditional(entry) => entry.clock(),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Timer::Alternative(entry) => entry.clock(),
}
}
}
}
cfg_signal_internal_and_unix! {
pub(crate) mod signal;
}
cfg_rt! {
pub(crate) mod task;
mod config;
use config::Config;
mod blocking;
#[cfg_attr(target_os = "wasi", allow(unused_imports))]
pub(crate) use blocking::spawn_blocking;
cfg_trace! {
pub(crate) use blocking::Mandatory;
}
cfg_fs! {
pub(crate) use blocking::spawn_mandatory_blocking;
}
mod builder;
pub use self::builder::Builder;
cfg_unstable! {
pub use self::builder::UnhandledPanic;
pub use crate::util::rand::RngSeed;
mod local_runtime;
pub use local_runtime::{LocalRuntime, LocalOptions};
}
cfg_taskdump! {
pub mod dump;
pub use dump::Dump;
}
mod task_hooks;
pub(crate) use task_hooks::{TaskHooks, TaskCallback};
cfg_unstable! {
pub use task_hooks::TaskMeta;
}
#[cfg(not(tokio_unstable))]
pub(crate) use task_hooks::TaskMeta;
mod handle;
pub use handle::{EnterGuard, Handle, TryCurrentError};
mod runtime;
pub use runtime::{Runtime, RuntimeFlavor};
mod id;
pub use id::Id;
/// Boundary value to prevent stack overflow caused by a large-sized
/// Future being placed in the stack.
pub(crate) const BOX_FUTURE_THRESHOLD: usize = if cfg!(debug_assertions) {
2048
} else {
16384
};
mod thread_id;
pub(crate) use thread_id::ThreadId;
pub(crate) mod metrics;
pub use metrics::RuntimeMetrics;
cfg_unstable_metrics! {
pub use metrics::{HistogramScale, HistogramConfiguration, LogHistogram, LogHistogramBuilder, InvalidHistogramConfiguration} ;
cfg_net! {
pub(crate) use metrics::IoDriverMetrics;
}
}
pub(crate) use metrics::{MetricsBatch, SchedulerMetrics, WorkerMetrics, HistogramBuilder};
/// After thread starts / before thread stops
type Callback = std::sync::Arc<dyn Fn() + Send + Sync>;
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/context.rs | tokio/src/runtime/context.rs | use crate::loom::thread::AccessError;
use crate::task::coop;
use std::cell::Cell;
#[cfg(any(feature = "rt", feature = "macros"))]
use crate::util::rand::FastRand;
cfg_rt! {
mod blocking;
pub(crate) use blocking::{disallow_block_in_place, try_enter_blocking_region, BlockingRegionGuard};
mod current;
pub(crate) use current::{with_current, try_set_current, SetCurrentGuard};
mod runtime;
pub(crate) use runtime::{EnterRuntime, enter_runtime};
mod scoped;
use scoped::Scoped;
use crate::runtime::{scheduler, task::Id};
use std::task::Waker;
cfg_taskdump! {
use crate::runtime::task::trace;
}
}
cfg_rt_multi_thread! {
mod runtime_mt;
pub(crate) use runtime_mt::{current_enter_context, exit_runtime};
}
struct Context {
/// Uniquely identifies the current thread
#[cfg(feature = "rt")]
thread_id: Cell<Option<ThreadId>>,
/// Handle to the runtime scheduler running on the current thread.
#[cfg(feature = "rt")]
current: current::HandleCell,
/// Handle to the scheduler's internal "context"
#[cfg(feature = "rt")]
scheduler: Scoped<scheduler::Context>,
#[cfg(feature = "rt")]
current_task_id: Cell<Option<Id>>,
/// Tracks if the current thread is currently driving a runtime.
/// Note, that if this is set to "entered", the current scheduler
/// handle may not reference the runtime currently executing. This
/// is because other runtime handles may be set to current from
/// within a runtime.
#[cfg(feature = "rt")]
runtime: Cell<EnterRuntime>,
#[cfg(any(feature = "rt", feature = "macros"))]
rng: Cell<Option<FastRand>>,
/// Tracks the amount of "work" a task may still do before yielding back to
/// the scheduler
budget: Cell<coop::Budget>,
#[cfg(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
trace: trace::Context,
}
tokio_thread_local! {
static CONTEXT: Context = const {
Context {
#[cfg(feature = "rt")]
thread_id: Cell::new(None),
// Tracks the current runtime handle to use when spawning,
// accessing drivers, etc...
#[cfg(feature = "rt")]
current: current::HandleCell::new(),
// Tracks the current scheduler internal context
#[cfg(feature = "rt")]
scheduler: Scoped::new(),
#[cfg(feature = "rt")]
current_task_id: Cell::new(None),
// Tracks if the current thread is currently driving a runtime.
// Note, that if this is set to "entered", the current scheduler
// handle may not reference the runtime currently executing. This
// is because other runtime handles may be set to current from
// within a runtime.
#[cfg(feature = "rt")]
runtime: Cell::new(EnterRuntime::NotEntered),
#[cfg(any(feature = "rt", feature = "macros"))]
rng: Cell::new(None),
budget: Cell::new(coop::Budget::unconstrained()),
#[cfg(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(
target_arch = "aarch64",
target_arch = "x86",
target_arch = "x86_64"
)
))]
trace: trace::Context::new(),
}
}
}
#[cfg(any(feature = "macros", all(feature = "sync", feature = "rt")))]
pub(crate) fn thread_rng_n(n: u32) -> u32 {
CONTEXT.with(|ctx| {
let mut rng = ctx.rng.get().unwrap_or_else(FastRand::new);
let ret = rng.fastrand_n(n);
ctx.rng.set(Some(rng));
ret
})
}
pub(crate) fn budget<R>(f: impl FnOnce(&Cell<coop::Budget>) -> R) -> Result<R, AccessError> {
CONTEXT.try_with(|ctx| f(&ctx.budget))
}
cfg_rt! {
use crate::runtime::ThreadId;
pub(crate) fn thread_id() -> Result<ThreadId, AccessError> {
CONTEXT.try_with(|ctx| {
match ctx.thread_id.get() {
Some(id) => id,
None => {
let id = ThreadId::next();
ctx.thread_id.set(Some(id));
id
}
}
})
}
pub(crate) fn set_current_task_id(id: Option<Id>) -> Option<Id> {
CONTEXT.try_with(|ctx| ctx.current_task_id.replace(id)).unwrap_or(None)
}
pub(crate) fn current_task_id() -> Option<Id> {
CONTEXT.try_with(|ctx| ctx.current_task_id.get()).unwrap_or(None)
}
#[track_caller]
pub(crate) fn defer(waker: &Waker) {
with_scheduler(|maybe_scheduler| {
if let Some(scheduler) = maybe_scheduler {
scheduler.defer(waker);
} else {
// Called from outside of the runtime, immediately wake the
// task.
waker.wake_by_ref();
}
});
}
pub(super) fn set_scheduler<R>(v: &scheduler::Context, f: impl FnOnce() -> R) -> R {
CONTEXT.with(|c| c.scheduler.set(v, f))
}
#[track_caller]
pub(super) fn with_scheduler<R>(f: impl FnOnce(Option<&scheduler::Context>) -> R) -> R {
let mut f = Some(f);
CONTEXT.try_with(|c| {
let f = f.take().unwrap();
if matches!(c.runtime.get(), EnterRuntime::Entered { .. }) {
c.scheduler.with(f)
} else {
f(None)
}
})
.unwrap_or_else(|_| (f.take().unwrap())(None))
}
cfg_taskdump! {
/// SAFETY: Callers of this function must ensure that trace frames always
/// form a valid linked list.
pub(crate) unsafe fn with_trace<R>(f: impl FnOnce(&trace::Context) -> R) -> Option<R> {
CONTEXT.try_with(|c| f(&c.trace)).ok()
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/driver.rs | tokio/src/runtime/driver.rs | //! Abstracts out the entire chain of runtime sub-drivers into common types.
// Eventually, this file will see significant refactoring / cleanup. For now, we
// don't need to worry much about dead code with certain feature permutations.
#![cfg_attr(
any(not(all(tokio_unstable, feature = "full")), target_family = "wasm"),
allow(dead_code)
)]
use crate::runtime::park::{ParkThread, UnparkThread};
use std::io;
use std::time::Duration;
#[derive(Debug)]
pub(crate) struct Driver {
inner: TimeDriver,
}
#[derive(Debug)]
pub(crate) struct Handle {
/// IO driver handle
pub(crate) io: IoHandle,
/// Signal driver handle
#[cfg_attr(any(not(unix), loom), allow(dead_code))]
pub(crate) signal: SignalHandle,
/// Time driver handle
pub(crate) time: TimeHandle,
/// Source of `Instant::now()`
#[cfg_attr(not(all(feature = "time", feature = "test-util")), allow(dead_code))]
pub(crate) clock: Clock,
}
pub(crate) struct Cfg {
pub(crate) enable_io: bool,
pub(crate) enable_time: bool,
pub(crate) enable_pause_time: bool,
pub(crate) start_paused: bool,
pub(crate) nevents: usize,
pub(crate) timer_flavor: crate::runtime::TimerFlavor,
}
impl Driver {
pub(crate) fn new(cfg: Cfg) -> io::Result<(Self, Handle)> {
let (io_stack, io_handle, signal_handle) = create_io_stack(cfg.enable_io, cfg.nevents)?;
let clock = create_clock(cfg.enable_pause_time, cfg.start_paused);
let (time_driver, time_handle) =
create_time_driver(cfg.enable_time, cfg.timer_flavor, io_stack, &clock);
Ok((
Self { inner: time_driver },
Handle {
io: io_handle,
signal: signal_handle,
time: time_handle,
clock,
},
))
}
pub(crate) fn park(&mut self, handle: &Handle) {
self.inner.park(handle);
}
pub(crate) fn park_timeout(&mut self, handle: &Handle, duration: Duration) {
self.inner.park_timeout(handle, duration);
}
pub(crate) fn shutdown(&mut self, handle: &Handle) {
self.inner.shutdown(handle);
}
}
impl Handle {
pub(crate) fn unpark(&self) {
#[cfg(feature = "time")]
if let Some(handle) = &self.time {
handle.unpark();
}
self.io.unpark();
}
cfg_io_driver! {
#[track_caller]
pub(crate) fn io(&self) -> &crate::runtime::io::Handle {
self.io
.as_ref()
.expect("A Tokio 1.x context was found, but IO is disabled. Call `enable_io` on the runtime builder to enable IO.")
}
}
cfg_signal_internal_and_unix! {
#[track_caller]
pub(crate) fn signal(&self) -> &crate::runtime::signal::Handle {
self.signal
.as_ref()
.expect("there is no signal driver running, must be called from the context of Tokio runtime")
}
}
cfg_time! {
/// Returns a reference to the time driver handle.
///
/// Panics if no time driver is present.
#[track_caller]
pub(crate) fn time(&self) -> &crate::runtime::time::Handle {
self.time
.as_ref()
.expect("A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers.")
}
#[cfg(tokio_unstable)]
pub(crate) fn with_time<F, R>(&self, f: F) -> R
where
F: FnOnce(Option<&crate::runtime::time::Handle>) -> R,
{
f(self.time.as_ref())
}
pub(crate) fn clock(&self) -> &Clock {
&self.clock
}
}
}
// ===== io driver =====
cfg_io_driver! {
pub(crate) type IoDriver = crate::runtime::io::Driver;
#[derive(Debug)]
pub(crate) enum IoStack {
Enabled(ProcessDriver),
Disabled(ParkThread),
}
#[derive(Debug)]
pub(crate) enum IoHandle {
Enabled(crate::runtime::io::Handle),
Disabled(UnparkThread),
}
fn create_io_stack(enabled: bool, nevents: usize) -> io::Result<(IoStack, IoHandle, SignalHandle)> {
#[cfg(loom)]
assert!(!enabled);
let ret = if enabled {
let (io_driver, io_handle) = crate::runtime::io::Driver::new(nevents)?;
let (signal_driver, signal_handle) = create_signal_driver(io_driver, &io_handle)?;
let process_driver = create_process_driver(signal_driver);
(IoStack::Enabled(process_driver), IoHandle::Enabled(io_handle), signal_handle)
} else {
let park_thread = ParkThread::new();
let unpark_thread = park_thread.unpark();
(IoStack::Disabled(park_thread), IoHandle::Disabled(unpark_thread), Default::default())
};
Ok(ret)
}
impl IoStack {
pub(crate) fn park(&mut self, handle: &Handle) {
match self {
IoStack::Enabled(v) => v.park(handle),
IoStack::Disabled(v) => v.park(),
}
}
pub(crate) fn park_timeout(&mut self, handle: &Handle, duration: Duration) {
match self {
IoStack::Enabled(v) => v.park_timeout(handle, duration),
IoStack::Disabled(v) => v.park_timeout(duration),
}
}
pub(crate) fn shutdown(&mut self, handle: &Handle) {
match self {
IoStack::Enabled(v) => v.shutdown(handle),
IoStack::Disabled(v) => v.shutdown(),
}
}
}
impl IoHandle {
pub(crate) fn unpark(&self) {
match self {
IoHandle::Enabled(handle) => handle.unpark(),
IoHandle::Disabled(handle) => handle.unpark(),
}
}
pub(crate) fn as_ref(&self) -> Option<&crate::runtime::io::Handle> {
match self {
IoHandle::Enabled(v) => Some(v),
IoHandle::Disabled(..) => None,
}
}
}
}
cfg_not_io_driver! {
pub(crate) type IoHandle = UnparkThread;
#[derive(Debug)]
pub(crate) struct IoStack(ParkThread);
fn create_io_stack(_enabled: bool, _nevents: usize) -> io::Result<(IoStack, IoHandle, SignalHandle)> {
let park_thread = ParkThread::new();
let unpark_thread = park_thread.unpark();
Ok((IoStack(park_thread), unpark_thread, Default::default()))
}
impl IoStack {
pub(crate) fn park(&mut self, _handle: &Handle) {
self.0.park();
}
pub(crate) fn park_timeout(&mut self, _handle: &Handle, duration: Duration) {
self.0.park_timeout(duration);
}
pub(crate) fn shutdown(&mut self, _handle: &Handle) {
self.0.shutdown();
}
/// This is not a "real" driver, so it is not considered enabled.
pub(crate) fn is_enabled(&self) -> bool {
false
}
}
}
// ===== signal driver =====
cfg_signal_internal_and_unix! {
type SignalDriver = crate::runtime::signal::Driver;
pub(crate) type SignalHandle = Option<crate::runtime::signal::Handle>;
fn create_signal_driver(io_driver: IoDriver, io_handle: &crate::runtime::io::Handle) -> io::Result<(SignalDriver, SignalHandle)> {
let driver = crate::runtime::signal::Driver::new(io_driver, io_handle)?;
let handle = driver.handle();
Ok((driver, Some(handle)))
}
}
cfg_not_signal_internal! {
pub(crate) type SignalHandle = ();
cfg_io_driver! {
type SignalDriver = IoDriver;
fn create_signal_driver(io_driver: IoDriver, _io_handle: &crate::runtime::io::Handle) -> io::Result<(SignalDriver, SignalHandle)> {
Ok((io_driver, ()))
}
}
}
// ===== process driver =====
cfg_process_driver! {
type ProcessDriver = crate::runtime::process::Driver;
fn create_process_driver(signal_driver: SignalDriver) -> ProcessDriver {
ProcessDriver::new(signal_driver)
}
}
cfg_not_process_driver! {
cfg_io_driver! {
type ProcessDriver = SignalDriver;
fn create_process_driver(signal_driver: SignalDriver) -> ProcessDriver {
signal_driver
}
}
}
// ===== time driver =====
cfg_time! {
#[derive(Debug)]
pub(crate) enum TimeDriver {
Enabled {
driver: crate::runtime::time::Driver,
},
EnabledAlt(IoStack),
Disabled(IoStack),
}
pub(crate) type Clock = crate::time::Clock;
pub(crate) type TimeHandle = Option<crate::runtime::time::Handle>;
fn create_clock(enable_pausing: bool, start_paused: bool) -> Clock {
crate::time::Clock::new(enable_pausing, start_paused)
}
fn create_time_driver(
enable: bool,
timer_flavor: crate::runtime::TimerFlavor,
io_stack: IoStack,
clock: &Clock,
) -> (TimeDriver, TimeHandle) {
if enable {
match timer_flavor {
crate::runtime::TimerFlavor::Traditional => {
let (driver, handle) = crate::runtime::time::Driver::new(io_stack, clock);
(TimeDriver::Enabled { driver }, Some(handle))
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
crate::runtime::TimerFlavor::Alternative => {
(TimeDriver::EnabledAlt(io_stack), Some(crate::runtime::time::Driver::new_alt(clock)))
}
}
} else {
(TimeDriver::Disabled(io_stack), None)
}
}
impl TimeDriver {
pub(crate) fn park(&mut self, handle: &Handle) {
match self {
TimeDriver::Enabled { driver, .. } => driver.park(handle),
TimeDriver::EnabledAlt(v) => v.park(handle),
TimeDriver::Disabled(v) => v.park(handle),
}
}
pub(crate) fn park_timeout(&mut self, handle: &Handle, duration: Duration) {
match self {
TimeDriver::Enabled { driver } => driver.park_timeout(handle, duration),
TimeDriver::EnabledAlt(v) => v.park_timeout(handle, duration),
TimeDriver::Disabled(v) => v.park_timeout(handle, duration),
}
}
pub(crate) fn shutdown(&mut self, handle: &Handle) {
match self {
TimeDriver::Enabled { driver } => driver.shutdown(handle),
TimeDriver::EnabledAlt(v) => v.shutdown(handle),
TimeDriver::Disabled(v) => v.shutdown(handle),
}
}
}
}
cfg_not_time! {
type TimeDriver = IoStack;
pub(crate) type Clock = ();
pub(crate) type TimeHandle = ();
fn create_clock(_enable_pausing: bool, _start_paused: bool) -> Clock {
()
}
fn create_time_driver(
_enable: bool,
_timer_flavor: crate::runtime::TimerFlavor,
io_stack: IoStack,
_clock: &Clock,
) -> (TimeDriver, TimeHandle) {
(io_stack, ())
}
}
cfg_io_uring! {
pub(crate) mod op;
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/handle.rs | tokio/src/runtime/handle.rs | use crate::runtime;
use crate::runtime::{context, scheduler, RuntimeFlavor, RuntimeMetrics};
/// Handle to the runtime.
///
/// The handle is internally reference-counted and can be freely cloned. A handle can be
/// obtained using the [`Runtime::handle`] method.
///
/// [`Runtime::handle`]: crate::runtime::Runtime::handle()
#[derive(Debug, Clone)]
// When the `rt` feature is *not* enabled, this type is still defined, but not
// included in the public API.
pub struct Handle {
pub(crate) inner: scheduler::Handle,
}
use crate::runtime::task::JoinHandle;
use crate::runtime::BOX_FUTURE_THRESHOLD;
use crate::util::error::{CONTEXT_MISSING_ERROR, THREAD_LOCAL_DESTROYED_ERROR};
use crate::util::trace::SpawnMeta;
use std::future::Future;
use std::marker::PhantomData;
use std::{error, fmt, mem};
/// Runtime context guard.
///
/// Returned by [`Runtime::enter`] and [`Handle::enter`], the context guard exits
/// the runtime context on drop.
///
/// [`Runtime::enter`]: fn@crate::runtime::Runtime::enter
#[derive(Debug)]
#[must_use = "Creating and dropping a guard does nothing"]
pub struct EnterGuard<'a> {
_guard: context::SetCurrentGuard,
_handle_lifetime: PhantomData<&'a Handle>,
}
impl Handle {
/// Enters the runtime context. This allows you to construct types that must
/// have an executor available on creation such as [`Sleep`] or
/// [`TcpStream`]. It will also allow you to call methods such as
/// [`tokio::spawn`] and [`Handle::current`] without panicking.
///
/// # Panics
///
/// When calling `Handle::enter` multiple times, the returned guards
/// **must** be dropped in the reverse order that they were acquired.
/// Failure to do so will result in a panic and possible memory leaks.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new().unwrap();
///
/// let _guard = rt.enter();
/// tokio::spawn(async {
/// println!("Hello world!");
/// });
/// # }
/// ```
///
/// Do **not** do the following, this shows a scenario that will result in a
/// panic and possible memory leak.
///
/// ```should_panic,ignore-wasm
/// use tokio::runtime::Runtime;
///
/// let rt1 = Runtime::new().unwrap();
/// let rt2 = Runtime::new().unwrap();
///
/// let enter1 = rt1.enter();
/// let enter2 = rt2.enter();
///
/// drop(enter1);
/// drop(enter2);
/// ```
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
pub fn enter(&self) -> EnterGuard<'_> {
EnterGuard {
_guard: match context::try_set_current(&self.inner) {
Some(guard) => guard,
None => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR),
},
_handle_lifetime: PhantomData,
}
}
/// Returns a `Handle` view over the currently running `Runtime`.
///
/// # Panics
///
/// This will panic if called outside the context of a Tokio runtime. That means that you must
/// call this on one of the threads **being run by the runtime**, or from a thread with an active
/// `EnterGuard`. Calling this from within a thread created by `std::thread::spawn` (for example)
/// will cause a panic unless that thread has an active `EnterGuard`.
///
/// # Examples
///
/// This can be used to obtain the handle of the surrounding runtime from an async
/// block or function running on that runtime.
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// # use std::thread;
/// # use tokio::runtime::Runtime;
/// # fn dox() {
/// # let rt = Runtime::new().unwrap();
/// # rt.spawn(async {
/// use tokio::runtime::Handle;
///
/// // Inside an async block or function.
/// let handle = Handle::current();
/// handle.spawn(async {
/// println!("now running in the existing Runtime");
/// });
///
/// # let handle =
/// thread::spawn(move || {
/// // Notice that the handle is created outside of this thread and then moved in
/// handle.spawn(async { /* ... */ });
/// // This next line would cause a panic because we haven't entered the runtime
/// // and created an EnterGuard
/// // let handle2 = Handle::current(); // panic
/// // So we create a guard here with Handle::enter();
/// let _guard = handle.enter();
/// // Now we can call Handle::current();
/// let handle2 = Handle::current();
/// });
/// # handle.join().unwrap();
/// # });
/// # }
/// # }
/// ```
#[track_caller]
pub fn current() -> Self {
Handle {
inner: scheduler::Handle::current(),
}
}
/// Returns a Handle view over the currently running Runtime
///
/// Returns an error if no Runtime has been started
///
/// Contrary to `current`, this never panics
pub fn try_current() -> Result<Self, TryCurrentError> {
context::with_current(|inner| Handle {
inner: inner.clone(),
})
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
/// // Get a handle from this runtime
/// let handle = rt.handle();
///
/// // Spawn a future onto the runtime using the handle
/// handle.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let fut_size = mem::size_of::<F>();
if fut_size > BOX_FUTURE_THRESHOLD {
self.spawn_named(Box::pin(future), SpawnMeta::new_unnamed(fut_size))
} else {
self.spawn_named(future, SpawnMeta::new_unnamed(fut_size))
}
}
/// Runs the provided function on an executor dedicated to blocking
/// operations.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
/// // Get a handle from this runtime
/// let handle = rt.handle();
///
/// // Spawn a blocking function onto the runtime using the handle
/// handle.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
self.inner.blocking_spawner().spawn_blocking(self, func)
}
/// Runs a future to completion on this `Handle`'s associated `Runtime`.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// When this is used on a `current_thread` runtime, only the
/// [`Runtime::block_on`] method can drive the IO and timer drivers, but the
/// `Handle::block_on` method cannot drive them. This means that, when using
/// this method on a `current_thread` runtime, anything that relies on IO or
/// timers will not work unless there is another thread currently calling
/// [`Runtime::block_on`] on the same runtime.
///
/// # If the runtime has been shut down
///
/// If the `Handle`'s associated `Runtime` has been shut down (through
/// [`Runtime::shutdown_background`], [`Runtime::shutdown_timeout`], or by
/// dropping it) and `Handle::block_on` is used it might return an error or
/// panic. Specifically IO resources will return an error and timers will
/// panic. Runtime independent futures will run as normal.
///
/// # Panics
///
/// This function will panic if any of the following conditions are met:
/// - The provided future panics.
/// - It is called from within an asynchronous context, such as inside
/// [`Runtime::block_on`], `Handle::block_on`, or from a function annotated
/// with [`tokio::main`].
/// - A timer future is executed on a runtime that has been shut down.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Get a handle from this runtime
/// let handle = rt.handle();
///
/// // Execute the future, blocking the current thread until completion
/// handle.block_on(async {
/// println!("hello");
/// });
/// # }
/// ```
///
/// Or using `Handle::current`:
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Handle;
///
/// #[tokio::main]
/// async fn main () {
/// let handle = Handle::current();
/// std::thread::spawn(move || {
/// // Using Handle::block_on to run async code in the new thread.
/// handle.block_on(async {
/// println!("hello");
/// });
/// });
/// }
/// # }
/// ```
///
/// `Handle::block_on` may be combined with [`task::block_in_place`] to
/// re-enter the async context of a multi-thread scheduler runtime:
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::task;
/// use tokio::runtime::Handle;
///
/// # async fn docs() {
/// task::block_in_place(move || {
/// Handle::current().block_on(async move {
/// // do something async
/// });
/// });
/// # }
/// # }
/// ```
///
/// [`JoinError`]: struct@crate::task::JoinError
/// [`JoinHandle`]: struct@crate::task::JoinHandle
/// [`Runtime::block_on`]: fn@crate::runtime::Runtime::block_on
/// [`Runtime::shutdown_background`]: fn@crate::runtime::Runtime::shutdown_background
/// [`Runtime::shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout
/// [`spawn_blocking`]: crate::task::spawn_blocking
/// [`tokio::fs`]: crate::fs
/// [`tokio::net`]: crate::net
/// [`tokio::time`]: crate::time
/// [`tokio::main`]: ../attr.main.html
/// [`task::block_in_place`]: crate::task::block_in_place
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
let fut_size = mem::size_of::<F>();
if fut_size > BOX_FUTURE_THRESHOLD {
self.block_on_inner(Box::pin(future), SpawnMeta::new_unnamed(fut_size))
} else {
self.block_on_inner(future, SpawnMeta::new_unnamed(fut_size))
}
}
#[track_caller]
fn block_on_inner<F: Future>(&self, future: F, _meta: SpawnMeta<'_>) -> F::Output {
#[cfg(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future =
crate::util::trace::task(future, "block_on", _meta, super::task::Id::next().as_u64());
// Enter the runtime context. This sets the current driver handles and
// prevents blocking an existing runtime.
context::enter_runtime(&self.inner, true, |blocking| {
blocking.block_on(future).expect("failed to park thread")
})
}
#[track_caller]
pub(crate) fn spawn_named<F>(&self, future: F, meta: SpawnMeta<'_>) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let id = crate::runtime::task::Id::next();
#[cfg(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(future, "task", meta, id.as_u64());
self.inner.spawn(future, id, meta.spawned_at)
}
#[track_caller]
#[allow(dead_code)]
/// # Safety
///
/// This must only be called in `LocalRuntime` if the runtime has been verified to be owned
/// by the current thread.
pub(crate) unsafe fn spawn_local_named<F>(
&self,
future: F,
meta: SpawnMeta<'_>,
) -> JoinHandle<F::Output>
where
F: Future + 'static,
F::Output: 'static,
{
let id = crate::runtime::task::Id::next();
#[cfg(all(
tokio_unstable,
feature = "taskdump",
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(future, "task", meta, id.as_u64());
unsafe { self.inner.spawn_local(future, id, meta.spawned_at) }
}
/// Returns the flavor of the current `Runtime`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::{Handle, RuntimeFlavor};
///
/// #[tokio::main(flavor = "current_thread")]
/// async fn main() {
/// assert_eq!(RuntimeFlavor::CurrentThread, Handle::current().runtime_flavor());
/// }
/// ```
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::{Handle, RuntimeFlavor};
///
/// #[tokio::main(flavor = "multi_thread", worker_threads = 4)]
/// async fn main() {
/// assert_eq!(RuntimeFlavor::MultiThread, Handle::current().runtime_flavor());
/// }
/// # }
/// ```
pub fn runtime_flavor(&self) -> RuntimeFlavor {
match self.inner {
scheduler::Handle::CurrentThread(_) => RuntimeFlavor::CurrentThread,
#[cfg(feature = "rt-multi-thread")]
scheduler::Handle::MultiThread(_) => RuntimeFlavor::MultiThread,
}
}
/// Returns the [`Id`] of the current `Runtime`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// #[tokio::main(flavor = "current_thread")]
/// async fn main() {
/// println!("Current runtime id: {}", Handle::current().id());
/// }
/// ```
///
/// [`Id`]: struct@crate::runtime::Id
pub fn id(&self) -> runtime::Id {
let owned_id = match &self.inner {
scheduler::Handle::CurrentThread(handle) => handle.owned_id(),
#[cfg(feature = "rt-multi-thread")]
scheduler::Handle::MultiThread(handle) => handle.owned_id(),
};
runtime::Id::new(owned_id)
}
/// Returns a view that lets you get information about how the runtime
/// is performing.
pub fn metrics(&self) -> RuntimeMetrics {
RuntimeMetrics::new(self.clone())
}
}
impl std::panic::UnwindSafe for Handle {}
impl std::panic::RefUnwindSafe for Handle {}
cfg_taskdump! {
impl Handle {
/// Captures a snapshot of the runtime's state.
///
/// If you only want to capture a snapshot of a single future's state, you can use
/// [`Trace::capture`][crate::runtime::dump::Trace].
///
/// This functionality is experimental, and comes with a number of
/// requirements and limitations.
///
/// # Examples
///
/// This can be used to get call traces of each task in the runtime.
/// Calls to `Handle::dump` should usually be enclosed in a
/// [timeout][crate::time::timeout], so that dumping does not escalate a
/// single blocked runtime thread into an entirely blocked runtime.
///
/// ```
/// # use tokio::runtime::Runtime;
/// # fn dox() {
/// # let rt = Runtime::new().unwrap();
/// # rt.spawn(async {
/// use tokio::runtime::Handle;
/// use tokio::time::{timeout, Duration};
///
/// // Inside an async block or function.
/// let handle = Handle::current();
/// if let Ok(dump) = timeout(Duration::from_secs(2), handle.dump()).await {
/// for (i, task) in dump.tasks().iter().enumerate() {
/// let trace = task.trace();
/// println!("TASK {i}:");
/// println!("{trace}\n");
/// }
/// }
/// # });
/// # }
/// ```
///
/// This produces highly detailed traces of tasks; e.g.:
///
/// ```plain
/// TASK 0:
/// ╼ dump::main::{{closure}}::a::{{closure}} at /tokio/examples/dump.rs:18:20
/// └╼ dump::main::{{closure}}::b::{{closure}} at /tokio/examples/dump.rs:23:20
/// └╼ dump::main::{{closure}}::c::{{closure}} at /tokio/examples/dump.rs:28:24
/// └╼ tokio::sync::barrier::Barrier::wait::{{closure}} at /tokio/tokio/src/sync/barrier.rs:129:10
/// └╼ <tokio::util::trace::InstrumentedAsyncOp<F> as core::future::future::Future>::poll at /tokio/tokio/src/util/trace.rs:77:46
/// └╼ tokio::sync::barrier::Barrier::wait_internal::{{closure}} at /tokio/tokio/src/sync/barrier.rs:183:36
/// └╼ tokio::sync::watch::Receiver<T>::changed::{{closure}} at /tokio/tokio/src/sync/watch.rs:604:55
/// └╼ tokio::sync::watch::changed_impl::{{closure}} at /tokio/tokio/src/sync/watch.rs:755:18
/// └╼ <tokio::sync::notify::Notified as core::future::future::Future>::poll at /tokio/tokio/src/sync/notify.rs:1103:9
/// └╼ tokio::sync::notify::Notified::poll_notified at /tokio/tokio/src/sync/notify.rs:996:32
/// ```
///
/// # Requirements
///
/// ## Debug Info Must Be Available
///
/// To produce task traces, the application must **not** be compiled
/// with `split debuginfo`. On Linux, including `debuginfo` within the
/// application binary is the (correct) default. You can further ensure
/// this behavior with the following directive in your `Cargo.toml`:
///
/// ```toml
/// [profile.*]
/// split-debuginfo = "off"
/// ```
///
/// ## Unstable Features
///
/// This functionality is **unstable**, and requires both the
/// `--cfg tokio_unstable` and cargo feature `taskdump` to be set.
///
/// You can do this by setting the `RUSTFLAGS` environment variable
/// before invoking `cargo`; e.g.:
/// ```bash
/// RUSTFLAGS="--cfg tokio_unstable cargo run --example dump
/// ```
///
/// Or by [configuring][cargo-config] `rustflags` in
/// `.cargo/config.toml`:
/// ```text
/// [build]
/// rustflags = ["--cfg", "tokio_unstable"]
/// ```
///
/// [cargo-config]:
/// https://doc.rust-lang.org/cargo/reference/config.html
///
/// ## Platform Requirements
///
/// Task dumps are supported on Linux atop `aarch64`, `x86` and `x86_64`.
///
/// ## Current Thread Runtime Requirements
///
/// On the `current_thread` runtime, task dumps may only be requested
/// from *within* the context of the runtime being dumped. Do not, for
/// example, await `Handle::dump()` on a different runtime.
///
/// # Limitations
///
/// ## Performance
///
/// Although enabling the `taskdump` feature imposes virtually no
/// additional runtime overhead, actually calling `Handle::dump` is
/// expensive. The runtime must synchronize and pause its workers, then
/// re-poll every task in a special tracing mode. Avoid requesting dumps
/// often.
///
/// ## Local Executors
///
/// Tasks managed by local executors (e.g., `FuturesUnordered` and
/// [`LocalSet`][crate::task::LocalSet]) may not appear in task dumps.
///
/// ## Non-Termination When Workers Are Blocked
///
/// The future produced by `Handle::dump` may never produce `Ready` if
/// another runtime worker is blocked for more than 250ms. This may
/// occur if a dump is requested during shutdown, or if another runtime
/// worker is infinite looping or synchronously deadlocked. For these
/// reasons, task dumping should usually be paired with an explicit
/// [timeout][crate::time::timeout].
pub async fn dump(&self) -> crate::runtime::Dump {
match &self.inner {
scheduler::Handle::CurrentThread(handle) => handle.dump(),
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
scheduler::Handle::MultiThread(handle) => {
// perform the trace in a separate thread so that the
// trace itself does not appear in the taskdump.
let handle = handle.clone();
spawn_thread(async {
let handle = handle;
handle.dump().await
}).await
},
}
}
/// Produces `true` if the current task is being traced for a dump;
/// otherwise false. This function is only public for integration
/// testing purposes. Do not rely on it.
#[doc(hidden)]
pub fn is_tracing() -> bool {
super::task::trace::Context::is_tracing()
}
}
cfg_rt_multi_thread! {
/// Spawn a new thread and asynchronously await on its result.
async fn spawn_thread<F>(f: F) -> <F as Future>::Output
where
F: Future + Send + 'static,
<F as Future>::Output: Send + 'static
{
let (tx, rx) = crate::sync::oneshot::channel();
crate::loom::thread::spawn(|| {
let rt = crate::runtime::Builder::new_current_thread().build().unwrap();
rt.block_on(async {
let _ = tx.send(f.await);
});
});
rx.await.unwrap()
}
}
}
/// Error returned by `try_current` when no Runtime has been started
#[derive(Debug)]
pub struct TryCurrentError {
kind: TryCurrentErrorKind,
}
impl TryCurrentError {
pub(crate) fn new_no_context() -> Self {
Self {
kind: TryCurrentErrorKind::NoContext,
}
}
pub(crate) fn new_thread_local_destroyed() -> Self {
Self {
kind: TryCurrentErrorKind::ThreadLocalDestroyed,
}
}
/// Returns true if the call failed because there is currently no runtime in
/// the Tokio context.
pub fn is_missing_context(&self) -> bool {
matches!(self.kind, TryCurrentErrorKind::NoContext)
}
/// Returns true if the call failed because the Tokio context thread-local
/// had been destroyed. This can usually only happen if in the destructor of
/// other thread-locals.
pub fn is_thread_local_destroyed(&self) -> bool {
matches!(self.kind, TryCurrentErrorKind::ThreadLocalDestroyed)
}
}
enum TryCurrentErrorKind {
NoContext,
ThreadLocalDestroyed,
}
impl fmt::Debug for TryCurrentErrorKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TryCurrentErrorKind::NoContext => f.write_str("NoContext"),
TryCurrentErrorKind::ThreadLocalDestroyed => f.write_str("ThreadLocalDestroyed"),
}
}
}
impl fmt::Display for TryCurrentError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use TryCurrentErrorKind as E;
match self.kind {
E::NoContext => f.write_str(CONTEXT_MISSING_ERROR),
E::ThreadLocalDestroyed => f.write_str(THREAD_LOCAL_DESTROYED_ERROR),
}
}
}
impl error::Error for TryCurrentError {}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task_hooks.rs | tokio/src/runtime/task_hooks.rs | use super::Config;
use std::marker::PhantomData;
impl TaskHooks {
pub(crate) fn spawn(&self, meta: &TaskMeta<'_>) {
if let Some(f) = self.task_spawn_callback.as_ref() {
f(meta)
}
}
#[allow(dead_code)]
pub(crate) fn from_config(config: &Config) -> Self {
Self {
task_spawn_callback: config.before_spawn.clone(),
task_terminate_callback: config.after_termination.clone(),
#[cfg(tokio_unstable)]
before_poll_callback: config.before_poll.clone(),
#[cfg(tokio_unstable)]
after_poll_callback: config.after_poll.clone(),
}
}
#[cfg(tokio_unstable)]
#[inline]
pub(crate) fn poll_start_callback(&self, meta: &TaskMeta<'_>) {
if let Some(poll_start) = &self.before_poll_callback {
(poll_start)(meta);
}
}
#[cfg(tokio_unstable)]
#[inline]
pub(crate) fn poll_stop_callback(&self, meta: &TaskMeta<'_>) {
if let Some(poll_stop) = &self.after_poll_callback {
(poll_stop)(meta);
}
}
}
#[derive(Clone)]
pub(crate) struct TaskHooks {
pub(crate) task_spawn_callback: Option<TaskCallback>,
pub(crate) task_terminate_callback: Option<TaskCallback>,
#[cfg(tokio_unstable)]
pub(crate) before_poll_callback: Option<TaskCallback>,
#[cfg(tokio_unstable)]
pub(crate) after_poll_callback: Option<TaskCallback>,
}
/// Task metadata supplied to user-provided hooks for task events.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [unstable]: crate#unstable-features
#[allow(missing_debug_implementations)]
#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))]
pub struct TaskMeta<'a> {
/// The opaque ID of the task.
pub(crate) id: super::task::Id,
/// The location where the task was spawned.
#[cfg_attr(not(tokio_unstable), allow(unreachable_pub, dead_code))]
pub(crate) spawned_at: crate::runtime::task::SpawnLocation,
pub(crate) _phantom: PhantomData<&'a ()>,
}
impl<'a> TaskMeta<'a> {
/// Return the opaque ID of the task.
#[cfg_attr(not(tokio_unstable), allow(unreachable_pub, dead_code))]
pub fn id(&self) -> super::task::Id {
self.id
}
/// Return the source code location where the task was spawned.
#[cfg(tokio_unstable)]
pub fn spawned_at(&self) -> &'static std::panic::Location<'static> {
self.spawned_at.0
}
}
/// Runs on specific task-related events
pub(crate) type TaskCallback = std::sync::Arc<dyn Fn(&TaskMeta<'_>) + Send + Sync>;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/inject.rs | tokio/src/runtime/tests/inject.rs | use crate::runtime::scheduler::inject;
#[test]
fn push_and_pop() {
const N: usize = 2;
let (inject, mut synced) = inject::Shared::new();
for i in 0..N {
assert_eq!(inject.len(), i);
let (task, _) = super::unowned(async {});
unsafe { inject.push(&mut synced, task) };
}
for i in 0..N {
assert_eq!(inject.len(), N - i);
assert!(unsafe { inject.pop(&mut synced) }.is_some());
}
println!("--------------");
assert!(unsafe { inject.pop(&mut synced) }.is_none());
}
#[test]
fn push_batch_and_pop() {
let (inject, mut inject_synced) = inject::Shared::new();
unsafe {
inject.push_batch(
&mut inject_synced,
(0..10).map(|_| super::unowned(async {}).0),
);
assert_eq!(5, inject.pop_n(&mut inject_synced, 5).count());
assert_eq!(5, inject.pop_n(&mut inject_synced, 5).count());
assert_eq!(0, inject.pop_n(&mut inject_synced, 5).count());
}
}
#[test]
fn pop_n_drains_on_drop() {
let (inject, mut inject_synced) = inject::Shared::new();
unsafe {
inject.push_batch(
&mut inject_synced,
(0..10).map(|_| super::unowned(async {}).0),
);
let _ = inject.pop_n(&mut inject_synced, 10);
assert_eq!(inject.len(), 0);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_current_thread.rs | tokio/src/runtime/tests/loom_current_thread.rs | mod yield_now;
use crate::loom::sync::atomic::{AtomicUsize, Ordering};
use crate::loom::sync::Arc;
use crate::loom::thread;
use crate::runtime::{Builder, Runtime};
use crate::sync::oneshot::{self, Receiver};
use crate::task;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::Ordering::{Acquire, Release};
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
fn assert_at_most_num_polls(rt: Arc<Runtime>, at_most_polls: usize) {
let (tx, rx) = oneshot::channel();
let num_polls = Arc::new(AtomicUsize::new(0));
rt.spawn(async move {
for _ in 0..12 {
task::yield_now().await;
}
tx.send(()).unwrap();
});
rt.block_on(async {
BlockedFuture {
rx,
num_polls: num_polls.clone(),
}
.await;
});
let polls = num_polls.load(Acquire);
assert!(polls <= at_most_polls);
}
#[test]
fn block_on_num_polls() {
loom::model(|| {
// we expect at most 4 number of polls because there are three points at
// which we poll the future and an opportunity for a false-positive.. At
// any of these points it can be ready:
//
// - when we fail to steal the parker and we block on a notification
// that it is available.
//
// - when we steal the parker and we schedule the future
//
// - when the future is woken up and we have ran the max number of tasks
// for the current tick or there are no more tasks to run.
//
// - a thread is notified that the parker is available but a third
// thread acquires it before the notified thread can.
//
let at_most = 4;
let rt1 = Arc::new(Builder::new_current_thread().build().unwrap());
let rt2 = rt1.clone();
let rt3 = rt1.clone();
let th1 = thread::spawn(move || assert_at_most_num_polls(rt1, at_most));
let th2 = thread::spawn(move || assert_at_most_num_polls(rt2, at_most));
let th3 = thread::spawn(move || assert_at_most_num_polls(rt3, at_most));
th1.join().unwrap();
th2.join().unwrap();
th3.join().unwrap();
});
}
#[test]
fn assert_no_unnecessary_polls() {
loom::model(|| {
// // After we poll outer future, woken should reset to false
let rt = Builder::new_current_thread().build().unwrap();
let (tx, rx) = oneshot::channel();
let pending_cnt = Arc::new(AtomicUsize::new(0));
rt.spawn(async move {
for _ in 0..24 {
task::yield_now().await;
}
tx.send(()).unwrap();
});
let pending_cnt_clone = pending_cnt.clone();
rt.block_on(async move {
// use task::yield_now() to ensure woken set to true
// ResetFuture will be polled at most once
// Here comes two cases
// 1. recv no message from channel, ResetFuture will be polled
// but get Pending and we record ResetFuture.pending_cnt ++.
// Then when message arrive, ResetFuture returns Ready. So we
// expect ResetFuture.pending_cnt = 1
// 2. recv message from channel, ResetFuture returns Ready immediately.
// We expect ResetFuture.pending_cnt = 0
task::yield_now().await;
ResetFuture {
rx,
pending_cnt: pending_cnt_clone,
}
.await;
});
let pending_cnt = pending_cnt.load(Acquire);
assert!(pending_cnt <= 1);
});
}
#[test]
fn drop_jh_during_schedule() {
unsafe fn waker_clone(ptr: *const ()) -> RawWaker {
let atomic = unsafe { &*(ptr as *const AtomicUsize) };
atomic.fetch_add(1, Ordering::Relaxed);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn waker_drop(ptr: *const ()) {
let atomic = unsafe { &*(ptr as *const AtomicUsize) };
atomic.fetch_sub(1, Ordering::Relaxed);
}
unsafe fn waker_nop(_ptr: *const ()) {}
static VTABLE: RawWakerVTable =
RawWakerVTable::new(waker_clone, waker_drop, waker_nop, waker_drop);
loom::model(|| {
let rt = Builder::new_current_thread().build().unwrap();
let mut jh = rt.spawn(async {});
// Using AbortHandle to increment task refcount. This ensures that the waker is not
// destroyed due to the refcount hitting zero.
let task_refcnt = jh.abort_handle();
let waker_refcnt = AtomicUsize::new(1);
{
// Set up the join waker.
use std::future::Future;
use std::pin::Pin;
// SAFETY: Before `waker_refcnt` goes out of scope, this test asserts that the refcnt
// has dropped to zero.
let join_waker = unsafe {
Waker::from_raw(RawWaker::new(
(&waker_refcnt) as *const AtomicUsize as *const (),
&VTABLE,
))
};
assert!(Pin::new(&mut jh)
.poll(&mut Context::from_waker(&join_waker))
.is_pending());
}
assert_eq!(waker_refcnt.load(Ordering::Relaxed), 1);
let bg_thread = loom::thread::spawn(move || drop(jh));
rt.block_on(crate::task::yield_now());
bg_thread.join().unwrap();
assert_eq!(waker_refcnt.load(Ordering::Relaxed), 0);
drop(task_refcnt);
});
}
struct BlockedFuture {
rx: Receiver<()>,
num_polls: Arc<AtomicUsize>,
}
impl Future for BlockedFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.num_polls.fetch_add(1, Release);
match Pin::new(&mut self.rx).poll(cx) {
Poll::Pending => Poll::Pending,
_ => Poll::Ready(()),
}
}
}
struct ResetFuture {
rx: Receiver<()>,
pending_cnt: Arc<AtomicUsize>,
}
impl Future for ResetFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.rx).poll(cx) {
Poll::Pending => {
self.pending_cnt.fetch_add(1, Release);
Poll::Pending
}
_ => Poll::Ready(()),
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_oneshot.rs | tokio/src/runtime/tests/loom_oneshot.rs | use crate::loom::sync::{Arc, Mutex};
use loom::sync::Notify;
pub(crate) fn channel<T>() -> (Sender<T>, Receiver<T>) {
let inner = Arc::new(Inner {
notify: Notify::new(),
value: Mutex::new(None),
});
let tx = Sender {
inner: inner.clone(),
};
let rx = Receiver { inner };
(tx, rx)
}
pub(crate) struct Sender<T> {
inner: Arc<Inner<T>>,
}
pub(crate) struct Receiver<T> {
inner: Arc<Inner<T>>,
}
struct Inner<T> {
notify: Notify,
value: Mutex<Option<T>>,
}
impl<T> Sender<T> {
pub(crate) fn send(self, value: T) {
*self.inner.value.lock() = Some(value);
self.inner.notify.notify();
}
}
impl<T> Receiver<T> {
pub(crate) fn recv(self) -> T {
loop {
if let Some(v) = self.inner.value.lock().take() {
return v;
}
self.inner.notify.wait();
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_multi_thread.rs | tokio/src/runtime/tests/loom_multi_thread.rs | mod queue;
mod shutdown;
mod yield_now;
/// Full runtime loom tests. These are heavy tests and take significant time to
/// run on CI.
///
/// Use `LOOM_MAX_PREEMPTIONS=1` to do a "quick" run as a smoke test.
///
/// In order to speed up the C
use crate::runtime::tests::loom_oneshot as oneshot;
use crate::runtime::{self, Runtime};
use crate::{spawn, task};
use tokio_test::assert_ok;
use loom::sync::atomic::{AtomicBool, AtomicUsize};
use loom::sync::Arc;
use pin_project_lite::pin_project;
use std::future::{poll_fn, Future};
use std::pin::Pin;
use std::sync::atomic::Ordering::{Relaxed, SeqCst};
use std::task::{ready, Context, Poll};
mod atomic_take {
use loom::sync::atomic::AtomicBool;
use std::mem::MaybeUninit;
use std::sync::atomic::Ordering::SeqCst;
pub(super) struct AtomicTake<T> {
inner: MaybeUninit<T>,
taken: AtomicBool,
}
impl<T> AtomicTake<T> {
pub(super) fn new(value: T) -> Self {
Self {
inner: MaybeUninit::new(value),
taken: AtomicBool::new(false),
}
}
pub(super) fn take(&self) -> Option<T> {
// safety: Only one thread will see the boolean change from false
// to true, so that thread is able to take the value.
match self.taken.fetch_or(true, SeqCst) {
false => unsafe { Some(std::ptr::read(self.inner.as_ptr())) },
true => None,
}
}
}
impl<T> Drop for AtomicTake<T> {
fn drop(&mut self) {
drop(self.take());
}
}
}
#[derive(Clone)]
struct AtomicOneshot<T> {
value: std::sync::Arc<atomic_take::AtomicTake<oneshot::Sender<T>>>,
}
impl<T> AtomicOneshot<T> {
fn new(sender: oneshot::Sender<T>) -> Self {
Self {
value: std::sync::Arc::new(atomic_take::AtomicTake::new(sender)),
}
}
fn assert_send(&self, value: T) {
self.value.take().unwrap().send(value);
}
}
/// Tests are divided into groups to make the runs faster on CI.
mod group_a {
use super::*;
#[test]
fn racy_shutdown() {
loom::model(|| {
let pool = mk_pool(1);
// here's the case we want to exercise:
//
// a worker that still has tasks in its local queue gets sent to the blocking pool (due to
// block_in_place). the blocking pool is shut down, so drops the worker. the worker's
// shutdown method never gets run.
//
// we do this by spawning two tasks on one worker, the first of which does block_in_place,
// and then immediately drop the pool.
pool.spawn(track(async {
crate::task::block_in_place(|| {});
}));
pool.spawn(track(async {}));
drop(pool);
});
}
#[test]
fn pool_multi_spawn() {
loom::model(|| {
let pool = mk_pool(2);
let c1 = Arc::new(AtomicUsize::new(0));
let (tx, rx) = oneshot::channel();
let tx1 = AtomicOneshot::new(tx);
// Spawn a task
let c2 = c1.clone();
let tx2 = tx1.clone();
pool.spawn(track(async move {
spawn(track(async move {
if 1 == c1.fetch_add(1, Relaxed) {
tx1.assert_send(());
}
}));
}));
// Spawn a second task
pool.spawn(track(async move {
spawn(track(async move {
if 1 == c2.fetch_add(1, Relaxed) {
tx2.assert_send(());
}
}));
}));
rx.recv();
});
}
fn only_blocking_inner(first_pending: bool) {
loom::model(move || {
let pool = mk_pool(1);
let (block_tx, block_rx) = oneshot::channel();
pool.spawn(track(async move {
crate::task::block_in_place(move || {
block_tx.send(());
});
if first_pending {
task::yield_now().await
}
}));
block_rx.recv();
drop(pool);
});
}
#[test]
fn only_blocking_without_pending() {
only_blocking_inner(false)
}
#[test]
fn only_blocking_with_pending() {
only_blocking_inner(true)
}
}
mod group_b {
use super::*;
fn blocking_and_regular_inner(first_pending: bool) {
const NUM: usize = 3;
loom::model(move || {
let pool = mk_pool(1);
let cnt = Arc::new(AtomicUsize::new(0));
let (block_tx, block_rx) = oneshot::channel();
let (done_tx, done_rx) = oneshot::channel();
let done_tx = AtomicOneshot::new(done_tx);
pool.spawn(track(async move {
crate::task::block_in_place(move || {
block_tx.send(());
});
if first_pending {
task::yield_now().await
}
}));
for _ in 0..NUM {
let cnt = cnt.clone();
let done_tx = done_tx.clone();
pool.spawn(track(async move {
if NUM == cnt.fetch_add(1, Relaxed) + 1 {
done_tx.assert_send(());
}
}));
}
done_rx.recv();
block_rx.recv();
drop(pool);
});
}
#[test]
fn blocking_and_regular() {
blocking_and_regular_inner(false);
}
#[test]
fn blocking_and_regular_with_pending() {
blocking_and_regular_inner(true);
}
#[test]
fn join_output() {
loom::model(|| {
let rt = mk_pool(1);
rt.block_on(async {
let t = crate::spawn(track(async { "hello" }));
let out = assert_ok!(t.await);
assert_eq!("hello", out.into_inner());
});
});
}
#[test]
fn poll_drop_handle_then_drop() {
loom::model(|| {
let rt = mk_pool(1);
rt.block_on(async move {
let mut t = crate::spawn(track(async { "hello" }));
poll_fn(|cx| {
let _ = Pin::new(&mut t).poll(cx);
Poll::Ready(())
})
.await;
});
})
}
#[test]
fn complete_block_on_under_load() {
loom::model(|| {
let pool = mk_pool(1);
pool.block_on(async {
// Trigger a re-schedule
crate::spawn(track(async {
for _ in 0..2 {
task::yield_now().await;
}
}));
gated2(true).await
});
});
}
#[test]
fn shutdown_with_notification() {
use crate::sync::oneshot;
loom::model(|| {
let rt = mk_pool(2);
let (done_tx, done_rx) = oneshot::channel::<()>();
rt.spawn(track(async move {
let (tx, rx) = oneshot::channel::<()>();
crate::spawn(async move {
crate::task::spawn_blocking(move || {
let _ = tx.send(());
});
let _ = done_rx.await;
});
let _ = rx.await;
let _ = done_tx.send(());
}));
});
}
}
mod group_c {
use super::*;
#[test]
fn pool_shutdown() {
loom::model(|| {
let pool = mk_pool(2);
pool.spawn(track(async move {
gated2(true).await;
}));
pool.spawn(track(async move {
gated2(false).await;
}));
drop(pool);
});
}
}
mod group_d {
use super::*;
#[test]
fn pool_multi_notify() {
loom::model(|| {
let pool = mk_pool(2);
let c1 = Arc::new(AtomicUsize::new(0));
let (done_tx, done_rx) = oneshot::channel();
let done_tx1 = AtomicOneshot::new(done_tx);
let done_tx2 = done_tx1.clone();
// Spawn a task
let c2 = c1.clone();
pool.spawn(track(async move {
multi_gated().await;
if 1 == c1.fetch_add(1, Relaxed) {
done_tx1.assert_send(());
}
}));
// Spawn a second task
pool.spawn(track(async move {
multi_gated().await;
if 1 == c2.fetch_add(1, Relaxed) {
done_tx2.assert_send(());
}
}));
done_rx.recv();
});
}
}
fn mk_pool(num_threads: usize) -> Runtime {
runtime::Builder::new_multi_thread()
.worker_threads(num_threads)
// Set the intervals to avoid tuning logic
.event_interval(2)
.build()
.unwrap()
}
fn gated2(thread: bool) -> impl Future<Output = &'static str> {
use loom::thread;
use std::sync::Arc;
let gate = Arc::new(AtomicBool::new(false));
let mut fired = false;
poll_fn(move |cx| {
if !fired {
let gate = gate.clone();
let waker = cx.waker().clone();
if thread {
thread::spawn(move || {
gate.store(true, SeqCst);
waker.wake_by_ref();
});
} else {
spawn(track(async move {
gate.store(true, SeqCst);
waker.wake_by_ref();
}));
}
fired = true;
return Poll::Pending;
}
if gate.load(SeqCst) {
Poll::Ready("hello world")
} else {
Poll::Pending
}
})
}
async fn multi_gated() {
struct Gate {
waker: loom::future::AtomicWaker,
count: AtomicUsize,
}
let gate = Arc::new(Gate {
waker: loom::future::AtomicWaker::new(),
count: AtomicUsize::new(0),
});
{
let gate = gate.clone();
spawn(track(async move {
for i in 1..3 {
gate.count.store(i, SeqCst);
gate.waker.wake();
}
}));
}
poll_fn(move |cx| {
gate.waker.register_by_ref(cx.waker());
if gate.count.load(SeqCst) < 2 {
Poll::Pending
} else {
Poll::Ready(())
}
})
.await;
}
fn track<T: Future>(f: T) -> Track<T> {
Track {
inner: f,
arc: Arc::new(()),
}
}
pin_project! {
struct Track<T> {
#[pin]
inner: T,
// Arc is used to hook into loom's leak tracking.
arc: Arc<()>,
}
}
impl<T> Track<T> {
fn into_inner(self) -> T {
self.inner
}
}
impl<T: Future> Future for Track<T> {
type Output = Track<T::Output>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
Poll::Ready(Track {
inner: ready!(me.inner.poll(cx)),
arc: me.arc.clone(),
})
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/mod.rs | tokio/src/runtime/tests/mod.rs | // Enable dead_code / unreachable_pub here. It has been disabled in lib.rs for
// other code when running loom tests.
#![cfg_attr(loom, warn(dead_code, unreachable_pub))]
use self::noop_scheduler::NoopSchedule;
use self::unowned_wrapper::unowned;
mod noop_scheduler {
use crate::runtime::task::{self, Task, TaskHarnessScheduleHooks};
/// `task::Schedule` implementation that does nothing, for testing.
pub(crate) struct NoopSchedule;
impl task::Schedule for NoopSchedule {
fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> {
None
}
fn schedule(&self, _task: task::Notified<Self>) {
unreachable!();
}
fn hooks(&self) -> TaskHarnessScheduleHooks {
TaskHarnessScheduleHooks {
task_terminate_callback: None,
}
}
}
}
mod unowned_wrapper {
use crate::runtime::task::{Id, JoinHandle, Notified, SpawnLocation};
use crate::runtime::tests::NoopSchedule;
#[cfg(all(tokio_unstable, feature = "tracing"))]
#[track_caller]
pub(crate) fn unowned<T>(task: T) -> (Notified<NoopSchedule>, JoinHandle<T::Output>)
where
T: std::future::Future + Send + 'static,
T::Output: Send + 'static,
{
use tracing::Instrument;
let span = tracing::trace_span!("test_span");
let task = task.instrument(span);
let (task, handle) =
crate::runtime::task::unowned(task, NoopSchedule, Id::next(), SpawnLocation::capture());
(task.into_notified(), handle)
}
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
#[track_caller]
pub(crate) fn unowned<T>(task: T) -> (Notified<NoopSchedule>, JoinHandle<T::Output>)
where
T: std::future::Future + Send + 'static,
T::Output: Send + 'static,
{
let (task, handle) =
crate::runtime::task::unowned(task, NoopSchedule, Id::next(), SpawnLocation::capture());
(task.into_notified(), handle)
}
}
cfg_loom! {
mod loom_blocking;
mod loom_current_thread;
mod loom_join_set;
mod loom_local;
mod loom_multi_thread;
mod loom_oneshot;
// Make sure debug assertions are enabled
#[cfg(not(debug_assertions))]
compile_error!("these tests require debug assertions to be enabled");
}
cfg_not_loom! {
mod inject;
mod queue;
#[cfg(not(miri))] // takes a really long time with miri
mod task_combinations;
#[cfg(miri)]
mod task;
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_join_set.rs | tokio/src/runtime/tests/loom_join_set.rs | use crate::runtime::Builder;
use crate::task::JoinSet;
#[test]
fn test_join_set() {
loom::model(|| {
let rt = Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
let mut set = JoinSet::new();
rt.block_on(async {
assert_eq!(set.len(), 0);
set.spawn(async { () });
assert_eq!(set.len(), 1);
set.spawn(async { () });
assert_eq!(set.len(), 2);
let () = set.join_next().await.unwrap().unwrap();
assert_eq!(set.len(), 1);
set.spawn(async { () });
assert_eq!(set.len(), 2);
let () = set.join_next().await.unwrap().unwrap();
assert_eq!(set.len(), 1);
let () = set.join_next().await.unwrap().unwrap();
assert_eq!(set.len(), 0);
set.spawn(async { () });
assert_eq!(set.len(), 1);
});
drop(set);
drop(rt);
});
}
#[test]
fn abort_all_during_completion() {
use std::sync::{
atomic::{AtomicBool, Ordering::SeqCst},
Arc,
};
// These booleans assert that at least one execution had the task complete first, and that at
// least one execution had the task be cancelled before it completed.
let complete_happened = Arc::new(AtomicBool::new(false));
let cancel_happened = Arc::new(AtomicBool::new(false));
{
let complete_happened = complete_happened.clone();
let cancel_happened = cancel_happened.clone();
loom::model(move || {
let rt = Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
let mut set = JoinSet::new();
rt.block_on(async {
set.spawn(async { () });
set.abort_all();
match set.join_next().await {
Some(Ok(())) => complete_happened.store(true, SeqCst),
Some(Err(err)) if err.is_cancelled() => cancel_happened.store(true, SeqCst),
Some(Err(err)) => panic!("fail: {}", err),
None => {
unreachable!("Aborting the task does not remove it from the JoinSet.")
}
}
assert!(matches!(set.join_next().await, None));
});
drop(set);
drop(rt);
});
}
assert!(complete_happened.load(SeqCst));
assert!(cancel_happened.load(SeqCst));
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/task.rs | tokio/src/runtime/tests/task.rs | use crate::runtime::task::{
self, unowned, Id, JoinHandle, OwnedTasks, Schedule, SpawnLocation, Task,
TaskHarnessScheduleHooks,
};
use crate::runtime::tests::NoopSchedule;
use std::collections::VecDeque;
use std::future::Future;
#[cfg(tokio_unstable)]
use std::panic::Location;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
struct AssertDropHandle {
is_dropped: Arc<AtomicBool>,
}
impl AssertDropHandle {
#[track_caller]
fn assert_dropped(&self) {
assert!(self.is_dropped.load(Ordering::SeqCst));
}
#[track_caller]
fn assert_not_dropped(&self) {
assert!(!self.is_dropped.load(Ordering::SeqCst));
}
}
struct AssertDrop {
is_dropped: Arc<AtomicBool>,
}
impl AssertDrop {
fn new() -> (Self, AssertDropHandle) {
let shared = Arc::new(AtomicBool::new(false));
(
AssertDrop {
is_dropped: shared.clone(),
},
AssertDropHandle {
is_dropped: shared.clone(),
},
)
}
}
impl Drop for AssertDrop {
fn drop(&mut self) {
self.is_dropped.store(true, Ordering::SeqCst);
}
}
// A Notified does not shut down on drop, but it is dropped once the ref-count
// hits zero.
#[test]
fn create_drop1() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
Id::next(),
SpawnLocation::capture(),
);
drop(notified);
handle.assert_not_dropped();
drop(join);
handle.assert_dropped();
}
#[test]
fn create_drop2() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
Id::next(),
SpawnLocation::capture(),
);
drop(join);
handle.assert_not_dropped();
drop(notified);
handle.assert_dropped();
}
#[test]
fn drop_abort_handle1() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
Id::next(),
SpawnLocation::capture(),
);
let abort = join.abort_handle();
drop(join);
handle.assert_not_dropped();
drop(notified);
handle.assert_not_dropped();
drop(abort);
handle.assert_dropped();
}
#[test]
fn drop_abort_handle2() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
Id::next(),
SpawnLocation::capture(),
);
let abort = join.abort_handle();
drop(notified);
handle.assert_not_dropped();
drop(abort);
handle.assert_not_dropped();
drop(join);
handle.assert_dropped();
}
#[test]
fn drop_abort_handle_clone() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
Id::next(),
SpawnLocation::capture(),
);
let abort = join.abort_handle();
let abort_clone = abort.clone();
drop(join);
handle.assert_not_dropped();
drop(notified);
handle.assert_not_dropped();
drop(abort);
handle.assert_not_dropped();
drop(abort_clone);
handle.assert_dropped();
}
// Shutting down through Notified works
#[test]
fn create_shutdown1() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
Id::next(),
SpawnLocation::capture(),
);
drop(join);
handle.assert_not_dropped();
notified.shutdown();
handle.assert_dropped();
}
#[test]
fn create_shutdown2() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
Id::next(),
SpawnLocation::capture(),
);
handle.assert_not_dropped();
notified.shutdown();
handle.assert_dropped();
drop(join);
}
#[test]
fn unowned_poll() {
let (task, _) = unowned(async {}, NoopSchedule, Id::next(), SpawnLocation::capture());
task.run();
}
#[test]
fn schedule() {
with(|rt| {
rt.spawn(async {
crate::task::yield_now().await;
});
assert_eq!(2, rt.tick());
rt.shutdown();
})
}
#[test]
fn shutdown() {
with(|rt| {
rt.spawn(async {
loop {
crate::task::yield_now().await;
}
});
rt.tick_max(1);
rt.shutdown();
})
}
#[test]
fn shutdown_immediately() {
with(|rt| {
rt.spawn(async {
loop {
crate::task::yield_now().await;
}
});
rt.shutdown();
})
}
// Test for https://github.com/tokio-rs/tokio/issues/6729
#[test]
fn spawn_niche_in_task() {
use std::future::poll_fn;
use std::task::{Context, Poll, Waker};
with(|rt| {
let state = Arc::new(Mutex::new(State::new()));
let mut subscriber = Subscriber::new(Arc::clone(&state), 1);
rt.spawn(async move {
subscriber.wait().await;
subscriber.wait().await;
});
rt.spawn(async move {
state.lock().unwrap().set_version(2);
state.lock().unwrap().set_version(0);
});
rt.tick_max(10);
assert!(rt.is_empty());
rt.shutdown();
});
pub(crate) struct Subscriber {
state: Arc<Mutex<State>>,
observed_version: u64,
waker_key: Option<usize>,
}
impl Subscriber {
pub(crate) fn new(state: Arc<Mutex<State>>, version: u64) -> Self {
Self {
state,
observed_version: version,
waker_key: None,
}
}
pub(crate) async fn wait(&mut self) {
poll_fn(|cx| {
self.state
.lock()
.unwrap()
.poll_update(&mut self.observed_version, &mut self.waker_key, cx)
.map(|_| ())
})
.await;
}
}
struct State {
version: u64,
wakers: Vec<Waker>,
}
impl State {
pub(crate) fn new() -> Self {
Self {
version: 1,
wakers: Vec::new(),
}
}
pub(crate) fn poll_update(
&mut self,
observed_version: &mut u64,
waker_key: &mut Option<usize>,
cx: &Context<'_>,
) -> Poll<Option<()>> {
if self.version == 0 {
*waker_key = None;
Poll::Ready(None)
} else if *observed_version < self.version {
*waker_key = None;
*observed_version = self.version;
Poll::Ready(Some(()))
} else {
self.wakers.push(cx.waker().clone());
*waker_key = Some(self.wakers.len());
Poll::Pending
}
}
pub(crate) fn set_version(&mut self, version: u64) {
self.version = version;
for waker in self.wakers.drain(..) {
waker.wake();
}
}
}
}
#[test]
fn spawn_during_shutdown() {
static DID_SPAWN: AtomicBool = AtomicBool::new(false);
struct SpawnOnDrop(Runtime);
impl Drop for SpawnOnDrop {
fn drop(&mut self) {
DID_SPAWN.store(true, Ordering::SeqCst);
self.0.spawn(async {});
}
}
with(|rt| {
let rt2 = rt.clone();
rt.spawn(async move {
let _spawn_on_drop = SpawnOnDrop(rt2);
loop {
crate::task::yield_now().await;
}
});
rt.tick_max(1);
rt.shutdown();
});
assert!(DID_SPAWN.load(Ordering::SeqCst));
}
fn with(f: impl FnOnce(Runtime)) {
struct Reset;
impl Drop for Reset {
fn drop(&mut self) {
let _rt = CURRENT.try_lock().unwrap().take();
}
}
let _reset = Reset;
let rt = Runtime(Arc::new(Inner {
owned: OwnedTasks::new(16),
core: Mutex::new(Core {
queue: VecDeque::new(),
}),
}));
*CURRENT.try_lock().unwrap() = Some(rt.clone());
f(rt)
}
#[derive(Clone)]
struct Runtime(Arc<Inner>);
struct Inner {
core: Mutex<Core>,
owned: OwnedTasks<Runtime>,
}
struct Core {
queue: VecDeque<task::Notified<Runtime>>,
}
static CURRENT: Mutex<Option<Runtime>> = Mutex::new(None);
impl Runtime {
#[track_caller]
fn spawn<T>(&self, future: T) -> JoinHandle<T::Output>
where
T: 'static + Send + Future,
T::Output: 'static + Send,
{
let (handle, notified) =
self.0
.owned
.bind(future, self.clone(), Id::next(), SpawnLocation::capture());
if let Some(notified) = notified {
self.schedule(notified);
}
handle
}
fn tick(&self) -> usize {
self.tick_max(usize::MAX)
}
fn tick_max(&self, max: usize) -> usize {
let mut n = 0;
while !self.is_empty() && n < max {
let task = self.next_task();
n += 1;
let task = self.0.owned.assert_owner(task);
task.run();
}
n
}
fn is_empty(&self) -> bool {
self.0.core.try_lock().unwrap().queue.is_empty()
}
fn next_task(&self) -> task::Notified<Runtime> {
self.0.core.try_lock().unwrap().queue.pop_front().unwrap()
}
fn shutdown(&self) {
let mut core = self.0.core.try_lock().unwrap();
self.0.owned.close_and_shutdown_all(0);
while let Some(task) = core.queue.pop_back() {
drop(task);
}
drop(core);
assert!(self.0.owned.is_empty());
}
}
impl Schedule for Runtime {
fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
self.0.owned.remove(task)
}
fn schedule(&self, task: task::Notified<Self>) {
self.0.core.try_lock().unwrap().queue.push_back(task);
}
fn hooks(&self) -> TaskHarnessScheduleHooks {
TaskHarnessScheduleHooks {
task_terminate_callback: None,
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_blocking.rs | tokio/src/runtime/tests/loom_blocking.rs | use crate::runtime::{self, Runtime};
use std::sync::Arc;
#[test]
fn blocking_shutdown() {
loom::model(|| {
let v = Arc::new(());
let rt = mk_runtime(1);
{
let _enter = rt.enter();
for _ in 0..2 {
let v = v.clone();
crate::task::spawn_blocking(move || {
assert!(1 < Arc::strong_count(&v));
});
}
}
drop(rt);
assert_eq!(1, Arc::strong_count(&v));
});
}
#[test]
fn spawn_mandatory_blocking_should_always_run() {
use crate::runtime::tests::loom_oneshot;
loom::model(|| {
let rt = runtime::Builder::new_current_thread().build().unwrap();
let (tx, rx) = loom_oneshot::channel();
let _enter = rt.enter();
runtime::spawn_blocking(|| {});
runtime::spawn_mandatory_blocking(move || {
let _ = tx.send(());
})
.unwrap();
drop(rt);
// This call will deadlock if `spawn_mandatory_blocking` doesn't run.
let () = rx.recv();
});
}
#[test]
fn spawn_mandatory_blocking_should_run_even_when_shutting_down_from_other_thread() {
use crate::runtime::tests::loom_oneshot;
loom::model(|| {
let rt = runtime::Builder::new_current_thread().build().unwrap();
let handle = rt.handle().clone();
// Drop the runtime in a different thread
{
loom::thread::spawn(move || {
drop(rt);
});
}
let _enter = handle.enter();
let (tx, rx) = loom_oneshot::channel();
let handle = runtime::spawn_mandatory_blocking(move || {
let _ = tx.send(());
});
// handle.is_some() means that `spawn_mandatory_blocking`
// promised us to run the blocking task
if handle.is_some() {
// This call will deadlock if `spawn_mandatory_blocking` doesn't run.
let () = rx.recv();
}
});
}
#[test]
fn spawn_blocking_when_paused() {
use std::time::Duration;
loom::model(|| {
let rt = crate::runtime::Builder::new_current_thread()
.enable_time()
.start_paused(true)
.build()
.unwrap();
let handle = rt.handle();
let _enter = handle.enter();
let a = crate::task::spawn_blocking(|| {});
let b = crate::task::spawn_blocking(|| {});
rt.block_on(crate::time::timeout(Duration::from_millis(1), async move {
a.await.expect("blocking task should finish");
b.await.expect("blocking task should finish");
}))
.expect("timeout should not trigger");
});
}
fn mk_runtime(num_threads: usize) -> Runtime {
runtime::Builder::new_multi_thread()
.worker_threads(num_threads)
.build()
.unwrap()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/task_combinations.rs | tokio/src/runtime/tests/task_combinations.rs | use std::fmt;
use std::future::Future;
use std::panic;
use std::pin::Pin;
use std::task::{Context, Poll};
use crate::runtime::task::AbortHandle;
use crate::runtime::Builder;
use crate::sync::oneshot;
use crate::task::JoinHandle;
use futures::future::FutureExt;
// Enums for each option in the combinations being tested
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiRuntime {
CurrentThread,
Multi1,
Multi2,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiLocalSet {
Yes,
No,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiTask {
PanicOnRun,
PanicOnDrop,
PanicOnRunAndDrop,
NoPanic,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiOutput {
PanicOnDrop,
NoPanic,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiJoinInterest {
Polled,
NotPolled,
}
#[allow(clippy::enum_variant_names)] // we aren't using glob imports
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiJoinHandle {
DropImmediately = 1,
DropFirstPoll = 2,
DropAfterNoConsume = 3,
DropAfterConsume = 4,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiAbort {
NotAborted = 0,
AbortedImmediately = 1,
AbortedFirstPoll = 2,
AbortedAfterFinish = 3,
AbortedAfterConsumeOutput = 4,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CombiAbortSource {
JoinHandle,
AbortHandle,
}
#[test]
#[cfg_attr(panic = "abort", ignore)]
fn test_combinations() {
let mut rt = &[
CombiRuntime::CurrentThread,
CombiRuntime::Multi1,
CombiRuntime::Multi2,
][..];
if cfg!(miri) {
rt = &[CombiRuntime::CurrentThread];
}
let ls = [CombiLocalSet::Yes, CombiLocalSet::No];
let task = [
CombiTask::NoPanic,
CombiTask::PanicOnRun,
CombiTask::PanicOnDrop,
CombiTask::PanicOnRunAndDrop,
];
let output = [CombiOutput::NoPanic, CombiOutput::PanicOnDrop];
let ji = [CombiJoinInterest::Polled, CombiJoinInterest::NotPolled];
let jh = [
CombiJoinHandle::DropImmediately,
CombiJoinHandle::DropFirstPoll,
CombiJoinHandle::DropAfterNoConsume,
CombiJoinHandle::DropAfterConsume,
];
let abort = [
CombiAbort::NotAborted,
CombiAbort::AbortedImmediately,
CombiAbort::AbortedFirstPoll,
CombiAbort::AbortedAfterFinish,
CombiAbort::AbortedAfterConsumeOutput,
];
let ah = [
None,
Some(CombiJoinHandle::DropImmediately),
Some(CombiJoinHandle::DropFirstPoll),
Some(CombiJoinHandle::DropAfterNoConsume),
Some(CombiJoinHandle::DropAfterConsume),
];
for rt in rt.iter().copied() {
for ls in ls.iter().copied() {
for task in task.iter().copied() {
for output in output.iter().copied() {
for ji in ji.iter().copied() {
for jh in jh.iter().copied() {
for abort in abort.iter().copied() {
// abort via join handle --- abort handles
// may be dropped at any point
for ah in ah.iter().copied() {
test_combination(
rt,
ls,
task,
output,
ji,
jh,
ah,
abort,
CombiAbortSource::JoinHandle,
);
}
// if aborting via AbortHandle, it will
// never be dropped.
test_combination(
rt,
ls,
task,
output,
ji,
jh,
None,
abort,
CombiAbortSource::AbortHandle,
);
}
}
}
}
}
}
}
}
fn is_debug<T: fmt::Debug>(_: &T) {}
#[allow(clippy::too_many_arguments)]
fn test_combination(
rt: CombiRuntime,
ls: CombiLocalSet,
task: CombiTask,
output: CombiOutput,
ji: CombiJoinInterest,
jh: CombiJoinHandle,
ah: Option<CombiJoinHandle>,
abort: CombiAbort,
abort_src: CombiAbortSource,
) {
match (abort_src, ah) {
(CombiAbortSource::JoinHandle, _) if (jh as usize) < (abort as usize) => {
// join handle dropped prior to abort
return;
}
(CombiAbortSource::AbortHandle, Some(_)) => {
// abort handle dropped, we can't abort through the
// abort handle
return;
}
_ => {}
}
if (task == CombiTask::PanicOnDrop) && (output == CombiOutput::PanicOnDrop) {
// this causes double panic
return;
}
if (task == CombiTask::PanicOnRunAndDrop) && (abort != CombiAbort::AbortedImmediately) {
// this causes double panic
return;
}
is_debug(&rt);
is_debug(&ls);
is_debug(&task);
is_debug(&output);
is_debug(&ji);
is_debug(&jh);
is_debug(&ah);
is_debug(&abort);
is_debug(&abort_src);
// A runtime optionally with a LocalSet
struct Rt {
rt: crate::runtime::Runtime,
ls: Option<crate::task::LocalSet>,
}
impl Rt {
fn new(rt: CombiRuntime, ls: CombiLocalSet) -> Self {
let rt = match rt {
CombiRuntime::CurrentThread => Builder::new_current_thread().build().unwrap(),
CombiRuntime::Multi1 => Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap(),
CombiRuntime::Multi2 => Builder::new_multi_thread()
.worker_threads(2)
.build()
.unwrap(),
};
let ls = match ls {
CombiLocalSet::Yes => Some(crate::task::LocalSet::new()),
CombiLocalSet::No => None,
};
Self { rt, ls }
}
fn block_on<T>(&self, task: T) -> T::Output
where
T: Future,
{
match &self.ls {
Some(ls) => ls.block_on(&self.rt, task),
None => self.rt.block_on(task),
}
}
fn spawn<T>(&self, task: T) -> JoinHandle<T::Output>
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
match &self.ls {
Some(ls) => ls.spawn_local(task),
None => self.rt.spawn(task),
}
}
}
// The type used for the output of the future
struct Output {
panic_on_drop: bool,
on_drop: Option<oneshot::Sender<()>>,
}
impl Output {
fn disarm(&mut self) {
self.panic_on_drop = false;
}
}
impl Drop for Output {
fn drop(&mut self) {
let _ = self.on_drop.take().unwrap().send(());
if self.panic_on_drop {
panic!("Panicking in Output");
}
}
}
// A wrapper around the future that is spawned
struct FutWrapper<F> {
inner: F,
on_drop: Option<oneshot::Sender<()>>,
panic_on_drop: bool,
}
impl<F: Future> Future for FutWrapper<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<F::Output> {
unsafe {
let me = Pin::into_inner_unchecked(self);
let inner = Pin::new_unchecked(&mut me.inner);
inner.poll(cx)
}
}
}
impl<F> Drop for FutWrapper<F> {
fn drop(&mut self) {
let _: Result<(), ()> = self.on_drop.take().unwrap().send(());
if self.panic_on_drop {
panic!("Panicking in FutWrapper");
}
}
}
// The channels passed to the task
struct Signals {
on_first_poll: Option<oneshot::Sender<()>>,
wait_complete: Option<oneshot::Receiver<()>>,
on_output_drop: Option<oneshot::Sender<()>>,
}
// The task we will spawn
async fn my_task(mut signal: Signals, task: CombiTask, out: CombiOutput) -> Output {
// Signal that we have been polled once
let _ = signal.on_first_poll.take().unwrap().send(());
// Wait for a signal, then complete the future
let _ = signal.wait_complete.take().unwrap().await;
// If the task gets past wait_complete without yielding, then aborts
// may not be caught without this yield_now.
crate::task::yield_now().await;
if task == CombiTask::PanicOnRun || task == CombiTask::PanicOnRunAndDrop {
panic!("Panicking in my_task on {:?}", std::thread::current().id());
}
Output {
panic_on_drop: out == CombiOutput::PanicOnDrop,
on_drop: signal.on_output_drop.take(),
}
}
let rt = Rt::new(rt, ls);
let (on_first_poll, wait_first_poll) = oneshot::channel();
let (on_complete, wait_complete) = oneshot::channel();
let (on_future_drop, wait_future_drop) = oneshot::channel();
let (on_output_drop, wait_output_drop) = oneshot::channel();
let signal = Signals {
on_first_poll: Some(on_first_poll),
wait_complete: Some(wait_complete),
on_output_drop: Some(on_output_drop),
};
// === Spawn task ===
let mut handle = Some(rt.spawn(FutWrapper {
inner: my_task(signal, task, output),
on_drop: Some(on_future_drop),
panic_on_drop: task == CombiTask::PanicOnDrop || task == CombiTask::PanicOnRunAndDrop,
}));
// Keep track of whether the task has been killed with an abort
let mut aborted = false;
// If we want to poll the JoinHandle, do it now
if ji == CombiJoinInterest::Polled {
assert!(
handle.as_mut().unwrap().now_or_never().is_none(),
"Polling handle succeeded"
);
}
// If we are either aborting the task via an abort handle, or dropping via
// an abort handle, do that now.
let mut abort_handle = if ah.is_some() || abort_src == CombiAbortSource::AbortHandle {
handle.as_ref().map(JoinHandle::abort_handle)
} else {
None
};
let do_abort = |abort_handle: &mut Option<AbortHandle>,
join_handle: Option<&mut JoinHandle<_>>| {
match abort_src {
CombiAbortSource::AbortHandle => abort_handle.take().unwrap().abort(),
CombiAbortSource::JoinHandle => join_handle.unwrap().abort(),
}
};
if abort == CombiAbort::AbortedImmediately {
do_abort(&mut abort_handle, handle.as_mut());
aborted = true;
}
if jh == CombiJoinHandle::DropImmediately {
drop(handle.take().unwrap());
}
// === Wait for first poll ===
let got_polled = rt.block_on(wait_first_poll).is_ok();
if !got_polled {
// it's possible that we are aborted but still got polled
assert!(
aborted,
"Task completed without ever being polled but was not aborted."
);
}
if abort == CombiAbort::AbortedFirstPoll {
do_abort(&mut abort_handle, handle.as_mut());
aborted = true;
}
if jh == CombiJoinHandle::DropFirstPoll {
drop(handle.take().unwrap());
}
if ah == Some(CombiJoinHandle::DropFirstPoll) {
drop(abort_handle.take().unwrap());
}
// Signal the future that it can return now
let _ = on_complete.send(());
// === Wait for future to be dropped ===
assert!(
rt.block_on(wait_future_drop).is_ok(),
"The future should always be dropped."
);
if abort == CombiAbort::AbortedAfterFinish {
// Don't set aborted to true here as the task already finished
do_abort(&mut abort_handle, handle.as_mut());
}
if jh == CombiJoinHandle::DropAfterNoConsume {
if ah == Some(CombiJoinHandle::DropAfterNoConsume) {
drop(handle.take().unwrap());
// The runtime will usually have dropped every ref-count at this point,
// in which case dropping the AbortHandle drops the output.
//
// (But it might race and still hold a ref-count)
let panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
drop(abort_handle.take().unwrap());
}));
if panic.is_err() {
assert!(
(output == CombiOutput::PanicOnDrop)
&& (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop))
&& !aborted,
"Dropping AbortHandle shouldn't panic here"
);
}
} else {
// The runtime will usually have dropped every ref-count at this point,
// in which case dropping the JoinHandle drops the output.
//
// (But it might race and still hold a ref-count)
let panic = panic::catch_unwind(panic::AssertUnwindSafe(|| {
drop(handle.take().unwrap());
}));
if panic.is_err() {
assert!(
(output == CombiOutput::PanicOnDrop)
&& (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop))
&& !aborted,
"Dropping JoinHandle shouldn't panic here"
);
}
}
}
// Check whether we drop after consuming the output
if jh == CombiJoinHandle::DropAfterConsume {
// Using as_mut() to not immediately drop the handle
let result = rt.block_on(handle.as_mut().unwrap());
match result {
Ok(mut output) => {
// Don't panic here.
output.disarm();
assert!(!aborted, "Task was aborted but returned output");
}
Err(err) if err.is_cancelled() => assert!(aborted, "Cancelled output but not aborted"),
Err(err) if err.is_panic() => {
assert!(
(task == CombiTask::PanicOnRun)
|| (task == CombiTask::PanicOnDrop)
|| (task == CombiTask::PanicOnRunAndDrop)
|| (output == CombiOutput::PanicOnDrop),
"Panic but nothing should panic"
);
}
_ => unreachable!(),
}
let mut handle = handle.take().unwrap();
if abort == CombiAbort::AbortedAfterConsumeOutput {
do_abort(&mut abort_handle, Some(&mut handle));
}
drop(handle);
if ah == Some(CombiJoinHandle::DropAfterConsume) {
drop(abort_handle.take());
}
}
// The output should have been dropped now. Check whether the output
// object was created at all.
let output_created = rt.block_on(wait_output_drop).is_ok();
assert_eq!(
output_created,
(!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop)) && !aborted,
"Creation of output object"
);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/queue.rs | tokio/src/runtime/tests/queue.rs | use crate::runtime::scheduler::multi_thread::{queue, Stats};
use std::cell::RefCell;
use std::thread;
use std::time::Duration;
#[allow(unused)]
macro_rules! assert_metrics {
($stats:ident, $field:ident == $v:expr) => {
#[cfg(target_has_atomic = "64")]
{
use crate::runtime::WorkerMetrics;
use std::sync::atomic::Ordering::Relaxed;
let worker = WorkerMetrics::new();
$stats.submit(&worker);
let expect = $v;
let actual = worker.$field.load(Relaxed);
assert!(actual == expect, "expect = {}; actual = {}", expect, actual)
}
};
}
fn new_stats() -> Stats {
use crate::runtime::WorkerMetrics;
Stats::new(&WorkerMetrics::new())
}
#[test]
fn fits_256_one_at_a_time() {
let (_, mut local) = queue::local();
let inject = RefCell::new(vec![]);
let mut stats = new_stats();
for _ in 0..256 {
let (task, _) = super::unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
}
cfg_unstable_metrics! {
assert_metrics!(stats, overflow_count == 0);
}
assert!(inject.borrow_mut().pop().is_none());
while local.pop().is_some() {}
}
#[test]
fn fits_256_all_at_once() {
let (_, mut local) = queue::local();
let mut tasks = (0..256)
.map(|_| super::unowned(async {}).0)
.collect::<Vec<_>>();
local.push_back(tasks.drain(..));
let mut i = 0;
while local.pop().is_some() {
i += 1;
}
assert_eq!(i, 256);
}
#[test]
fn fits_256_all_in_chunks() {
let (_, mut local) = queue::local();
let mut tasks = (0..256)
.map(|_| super::unowned(async {}).0)
.collect::<Vec<_>>();
local.push_back(tasks.drain(..10));
local.push_back(tasks.drain(..100));
local.push_back(tasks.drain(..46));
local.push_back(tasks.drain(..100));
let mut i = 0;
while local.pop().is_some() {
i += 1;
}
assert_eq!(i, 256);
}
#[test]
fn overflow() {
let (_, mut local) = queue::local();
let inject = RefCell::new(vec![]);
let mut stats = new_stats();
for _ in 0..257 {
let (task, _) = super::unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
}
cfg_unstable_metrics! {
assert_metrics!(stats, overflow_count == 1);
}
let mut n = 0;
n += inject.borrow_mut().drain(..).count();
while local.pop().is_some() {
n += 1;
}
assert_eq!(n, 257);
}
#[test]
fn steal_batch() {
let mut stats = new_stats();
let (steal1, mut local1) = queue::local();
let (_, mut local2) = queue::local();
let inject = RefCell::new(vec![]);
for _ in 0..4 {
let (task, _) = super::unowned(async {});
local1.push_back_or_overflow(task, &inject, &mut stats);
}
assert!(steal1.steal_into(&mut local2, &mut stats).is_some());
cfg_unstable_metrics! {
assert_metrics!(stats, steal_count == 2);
}
for _ in 0..1 {
assert!(local2.pop().is_some());
}
assert!(local2.pop().is_none());
for _ in 0..2 {
assert!(local1.pop().is_some());
}
assert!(local1.pop().is_none());
}
const fn normal_or_miri(normal: usize, miri: usize) -> usize {
if cfg!(miri) {
miri
} else {
normal
}
}
#[test]
fn stress1() {
const NUM_ITER: usize = 5;
const NUM_STEAL: usize = normal_or_miri(1_000, 10);
const NUM_LOCAL: usize = normal_or_miri(1_000, 10);
const NUM_PUSH: usize = normal_or_miri(500, 10);
const NUM_POP: usize = normal_or_miri(250, 10);
let mut stats = new_stats();
for _ in 0..NUM_ITER {
let (steal, mut local) = queue::local();
let inject = RefCell::new(vec![]);
let th = thread::spawn(move || {
let mut stats = new_stats();
let (_, mut local) = queue::local();
let mut n = 0;
for _ in 0..NUM_STEAL {
if steal.steal_into(&mut local, &mut stats).is_some() {
n += 1;
}
while local.pop().is_some() {
n += 1;
}
thread::yield_now();
}
cfg_unstable_metrics! {
assert_metrics!(stats, steal_count == n as _);
}
n
});
let mut n = 0;
for _ in 0..NUM_LOCAL {
for _ in 0..NUM_PUSH {
let (task, _) = super::unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
}
for _ in 0..NUM_POP {
if local.pop().is_some() {
n += 1;
} else {
break;
}
}
}
n += inject.borrow_mut().drain(..).count();
n += th.join().unwrap();
assert_eq!(n, NUM_LOCAL * NUM_PUSH);
}
}
#[test]
fn stress2() {
const NUM_ITER: usize = 1;
const NUM_TASKS: usize = normal_or_miri(1_000_000, 50);
const NUM_STEAL: usize = normal_or_miri(1_000, 10);
let mut stats = new_stats();
for _ in 0..NUM_ITER {
let (steal, mut local) = queue::local();
let inject = RefCell::new(vec![]);
let th = thread::spawn(move || {
let mut stats = new_stats();
let (_, mut local) = queue::local();
let mut n = 0;
for _ in 0..NUM_STEAL {
if steal.steal_into(&mut local, &mut stats).is_some() {
n += 1;
}
while local.pop().is_some() {
n += 1;
}
thread::sleep(Duration::from_micros(10));
}
n
});
let mut num_pop = 0;
for i in 0..NUM_TASKS {
let (task, _) = super::unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
if i % 128 == 0 && local.pop().is_some() {
num_pop += 1;
}
num_pop += inject.borrow_mut().drain(..).count();
}
num_pop += th.join().unwrap();
while local.pop().is_some() {
num_pop += 1;
}
num_pop += inject.borrow_mut().drain(..).count();
assert_eq!(num_pop, NUM_TASKS);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_local.rs | tokio/src/runtime/tests/loom_local.rs | use crate::runtime::tests::loom_oneshot as oneshot;
use crate::runtime::Builder;
use crate::task::LocalSet;
use std::task::Poll;
/// Waking a runtime will attempt to push a task into a queue of notifications
/// in the runtime, however the tasks in such a queue usually have a reference
/// to the runtime itself. This means that if they are not properly removed at
/// runtime shutdown, this will cause a memory leak.
///
/// This test verifies that waking something during shutdown of a `LocalSet` does
/// not result in tasks lingering in the queue once shutdown is complete. This
/// is verified using loom's leak finder.
#[test]
fn wake_during_shutdown() {
loom::model(|| {
let rt = Builder::new_current_thread().build().unwrap();
let ls = LocalSet::new();
let (send, recv) = oneshot::channel();
ls.spawn_local(async move {
let mut send = Some(send);
let () = std::future::poll_fn(|cx| {
if let Some(send) = send.take() {
send.send(cx.waker().clone());
}
Poll::Pending
})
.await;
});
let handle = loom::thread::spawn(move || {
let waker = recv.recv();
waker.wake();
});
ls.block_on(&rt, crate::task::yield_now());
drop(ls);
handle.join().unwrap();
drop(rt);
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_current_thread/yield_now.rs | tokio/src/runtime/tests/loom_current_thread/yield_now.rs | use crate::runtime::park;
use crate::runtime::{self, Runtime};
#[test]
fn yield_calls_park_before_scheduling_again() {
// Don't need to check all permutations
let mut loom = loom::model::Builder::default();
loom.max_permutations = Some(1);
loom.check(|| {
let rt = mk_runtime();
let jh = rt.spawn(async {
let tid = loom::thread::current().id();
let park_count = park::current_thread_park_count();
crate::task::yield_now().await;
if tid == loom::thread::current().id() {
let new_park_count = park::current_thread_park_count();
assert_eq!(park_count + 1, new_park_count);
}
});
rt.block_on(jh).unwrap();
});
}
fn mk_runtime() -> Runtime {
runtime::Builder::new_current_thread().build().unwrap()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_multi_thread/shutdown.rs | tokio/src/runtime/tests/loom_multi_thread/shutdown.rs | use crate::runtime::{Builder, Handle};
#[test]
fn join_handle_cancel_on_shutdown() {
let mut builder = loom::model::Builder::new();
builder.preemption_bound = Some(2);
builder.check(|| {
use futures::future::FutureExt;
let rt = Builder::new_multi_thread()
.worker_threads(2)
.build()
.unwrap();
let handle = rt.block_on(async move { Handle::current() });
let jh1 = handle.spawn(futures::future::pending::<()>());
drop(rt);
let jh2 = handle.spawn(futures::future::pending::<()>());
let err1 = jh1.now_or_never().unwrap().unwrap_err();
let err2 = jh2.now_or_never().unwrap().unwrap_err();
assert!(err1.is_cancelled());
assert!(err2.is_cancelled());
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs | tokio/src/runtime/tests/loom_multi_thread/yield_now.rs | use crate::runtime::park;
use crate::runtime::tests::loom_oneshot as oneshot;
use crate::runtime::{self, Runtime};
#[test]
fn yield_calls_park_before_scheduling_again() {
// Don't need to check all permutations
let mut loom = loom::model::Builder::default();
loom.max_permutations = Some(1);
loom.check(|| {
let rt = mk_runtime(2);
let (tx, rx) = oneshot::channel::<()>();
rt.spawn(async {
let tid = loom::thread::current().id();
let park_count = park::current_thread_park_count();
crate::task::yield_now().await;
if tid == loom::thread::current().id() {
let new_park_count = park::current_thread_park_count();
assert_eq!(park_count + 1, new_park_count);
}
tx.send(());
});
rx.recv();
});
}
fn mk_runtime(num_threads: usize) -> Runtime {
runtime::Builder::new_multi_thread()
.worker_threads(num_threads)
.build()
.unwrap()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/tests/loom_multi_thread/queue.rs | tokio/src/runtime/tests/loom_multi_thread/queue.rs | use crate::runtime::scheduler::multi_thread::{queue, Stats};
use crate::runtime::tests::{unowned, NoopSchedule};
use loom::thread;
use std::cell::RefCell;
fn new_stats() -> Stats {
Stats::new(&crate::runtime::WorkerMetrics::new())
}
#[test]
fn basic() {
loom::model(|| {
let (steal, mut local) = queue::local();
let inject = RefCell::new(vec![]);
let mut stats = new_stats();
let th = thread::spawn(move || {
let mut stats = new_stats();
let (_, mut local) = queue::local();
let mut n = 0;
for _ in 0..3 {
if steal.steal_into(&mut local, &mut stats).is_some() {
n += 1;
}
while local.pop().is_some() {
n += 1;
}
}
n
});
let mut n = 0;
for _ in 0..2 {
for _ in 0..2 {
let (task, _) = unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
}
if local.pop().is_some() {
n += 1;
}
// Push another task
let (task, _) = unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
while local.pop().is_some() {
n += 1;
}
}
n += inject.borrow_mut().drain(..).count();
n += th.join().unwrap();
assert_eq!(6, n);
});
}
#[test]
fn steal_overflow() {
loom::model(|| {
let (steal, mut local) = queue::local();
let inject = RefCell::new(vec![]);
let mut stats = new_stats();
let th = thread::spawn(move || {
let mut stats = new_stats();
let (_, mut local) = queue::local();
let mut n = 0;
if steal.steal_into(&mut local, &mut stats).is_some() {
n += 1;
}
while local.pop().is_some() {
n += 1;
}
n
});
let mut n = 0;
// push a task, pop a task
let (task, _) = unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
if local.pop().is_some() {
n += 1;
}
for _ in 0..6 {
let (task, _) = unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
}
n += th.join().unwrap();
while local.pop().is_some() {
n += 1;
}
n += inject.borrow_mut().drain(..).count();
assert_eq!(7, n);
});
}
#[test]
fn multi_stealer() {
const NUM_TASKS: usize = 5;
fn steal_tasks(steal: queue::Steal<NoopSchedule>) -> usize {
let mut stats = new_stats();
let (_, mut local) = queue::local();
if steal.steal_into(&mut local, &mut stats).is_none() {
return 0;
}
let mut n = 1;
while local.pop().is_some() {
n += 1;
}
n
}
loom::model(|| {
let (steal, mut local) = queue::local();
let inject = RefCell::new(vec![]);
let mut stats = new_stats();
// Push work
for _ in 0..NUM_TASKS {
let (task, _) = unowned(async {});
local.push_back_or_overflow(task, &inject, &mut stats);
}
let th1 = {
let steal = steal.clone();
thread::spawn(move || steal_tasks(steal))
};
let th2 = thread::spawn(move || steal_tasks(steal));
let mut n = 0;
while local.pop().is_some() {
n += 1;
}
n += inject.borrow_mut().drain(..).count();
n += th1.join().unwrap();
n += th2.join().unwrap();
assert_eq!(n, NUM_TASKS);
});
}
#[test]
fn chained_steal() {
loom::model(|| {
let mut stats = new_stats();
let (s1, mut l1) = queue::local();
let (s2, mut l2) = queue::local();
let inject = RefCell::new(vec![]);
// Load up some tasks
for _ in 0..4 {
let (task, _) = unowned(async {});
l1.push_back_or_overflow(task, &inject, &mut stats);
let (task, _) = unowned(async {});
l2.push_back_or_overflow(task, &inject, &mut stats);
}
// Spawn a task to steal from **our** queue
let th = thread::spawn(move || {
let mut stats = new_stats();
let (_, mut local) = queue::local();
s1.steal_into(&mut local, &mut stats);
while local.pop().is_some() {}
});
// Drain our tasks, then attempt to steal
while l1.pop().is_some() {}
s2.steal_into(&mut l1, &mut stats);
th.join().unwrap();
while l1.pop().is_some() {}
while l2.pop().is_some() {}
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/io.rs | tokio/src/runtime/metrics/io.rs | #![cfg_attr(not(feature = "net"), allow(dead_code))]
use crate::util::metric_atomics::MetricAtomicU64;
use std::sync::atomic::Ordering::Relaxed;
#[derive(Default)]
pub(crate) struct IoDriverMetrics {
pub(super) fd_registered_count: MetricAtomicU64,
pub(super) fd_deregistered_count: MetricAtomicU64,
pub(super) ready_count: MetricAtomicU64,
}
impl IoDriverMetrics {
pub(crate) fn incr_fd_count(&self) {
self.fd_registered_count.add(1, Relaxed);
}
pub(crate) fn dec_fd_count(&self) {
self.fd_deregistered_count.add(1, Relaxed);
}
pub(crate) fn incr_ready_count_by(&self, amt: u64) {
self.ready_count.add(amt, Relaxed);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/batch.rs | tokio/src/runtime/metrics/batch.rs | use crate::runtime::metrics::WorkerMetrics;
cfg_unstable_metrics! {
use crate::runtime::metrics::HistogramBatch;
}
use std::sync::atomic::Ordering::Relaxed;
use std::time::{Duration, Instant};
pub(crate) struct MetricsBatch {
/// The total busy duration in nanoseconds.
busy_duration_total: u64,
/// Instant at which work last resumed (continued after park).
processing_scheduled_tasks_started_at: Option<Instant>,
/// Number of times the worker parked.
park_count: u64,
/// Number of times the worker parked and unparked.
park_unpark_count: u64,
#[cfg(tokio_unstable)]
/// Number of times the worker woke w/o doing work.
noop_count: u64,
#[cfg(tokio_unstable)]
/// Number of tasks stolen.
steal_count: u64,
#[cfg(tokio_unstable)]
/// Number of times tasks where stolen.
steal_operations: u64,
#[cfg(tokio_unstable)]
/// Number of tasks that were polled by the worker.
poll_count: u64,
#[cfg(tokio_unstable)]
/// Number of tasks polled when the worker entered park. This is used to
/// track the noop count.
poll_count_on_last_park: u64,
#[cfg(tokio_unstable)]
/// Number of tasks that were scheduled locally on this worker.
local_schedule_count: u64,
#[cfg(tokio_unstable)]
/// Number of tasks moved to the global queue to make space in the local
/// queue
overflow_count: u64,
#[cfg(tokio_unstable)]
/// If `Some`, tracks poll times in nanoseconds
poll_timer: Option<PollTimer>,
}
cfg_unstable_metrics! {
struct PollTimer {
/// Histogram of poll counts within each band.
poll_counts: HistogramBatch,
/// Instant when the most recent task started polling.
poll_started_at: Instant,
}
}
impl MetricsBatch {
pub(crate) fn new(worker_metrics: &WorkerMetrics) -> MetricsBatch {
let maybe_now = now();
Self::new_unstable(worker_metrics, maybe_now)
}
cfg_metrics_variant! {
stable: {
#[inline(always)]
fn new_unstable(_worker_metrics: &WorkerMetrics, maybe_now: Option<Instant>) -> MetricsBatch {
MetricsBatch {
busy_duration_total: 0,
processing_scheduled_tasks_started_at: maybe_now,
park_count: 0,
park_unpark_count: 0,
}
}
},
unstable: {
#[inline(always)]
fn new_unstable(worker_metrics: &WorkerMetrics, maybe_now: Option<Instant>) -> MetricsBatch {
let poll_timer = maybe_now.and_then(|now| {
worker_metrics
.poll_count_histogram
.as_ref()
.map(|worker_poll_counts| PollTimer {
poll_counts: HistogramBatch::from_histogram(worker_poll_counts),
poll_started_at: now,
})
});
MetricsBatch {
park_count: 0,
park_unpark_count: 0,
noop_count: 0,
steal_count: 0,
steal_operations: 0,
poll_count: 0,
poll_count_on_last_park: 0,
local_schedule_count: 0,
overflow_count: 0,
busy_duration_total: 0,
processing_scheduled_tasks_started_at: maybe_now,
poll_timer,
}
}
}
}
pub(crate) fn submit(&mut self, worker: &WorkerMetrics, mean_poll_time: u64) {
worker
.busy_duration_total
.store(self.busy_duration_total, Relaxed);
self.submit_unstable(worker, mean_poll_time);
}
cfg_metrics_variant! {
stable: {
#[inline(always)]
fn submit_unstable(&mut self, worker: &WorkerMetrics, _mean_poll_time: u64) {
worker.park_count.store(self.park_count, Relaxed);
worker
.park_unpark_count
.store(self.park_unpark_count, Relaxed);
}
},
unstable: {
#[inline(always)]
fn submit_unstable(&mut self, worker: &WorkerMetrics, mean_poll_time: u64) {
worker.mean_poll_time.store(mean_poll_time, Relaxed);
worker.park_count.store(self.park_count, Relaxed);
worker
.park_unpark_count
.store(self.park_unpark_count, Relaxed);
worker.noop_count.store(self.noop_count, Relaxed);
worker.steal_count.store(self.steal_count, Relaxed);
worker
.steal_operations
.store(self.steal_operations, Relaxed);
worker.poll_count.store(self.poll_count, Relaxed);
worker
.local_schedule_count
.store(self.local_schedule_count, Relaxed);
worker.overflow_count.store(self.overflow_count, Relaxed);
if let Some(poll_timer) = &self.poll_timer {
let dst = worker.poll_count_histogram.as_ref().unwrap();
poll_timer.poll_counts.submit(dst);
}
}
}
}
cfg_metrics_variant! {
stable: {
/// The worker is about to park.
pub(crate) fn about_to_park(&mut self) {
self.park_count += 1;
self.park_unpark_count += 1;
}
},
unstable: {
/// The worker is about to park.
pub(crate) fn about_to_park(&mut self) {
{
self.park_count += 1;
self.park_unpark_count += 1;
if self.poll_count_on_last_park == self.poll_count {
self.noop_count += 1;
} else {
self.poll_count_on_last_park = self.poll_count;
}
}
}
}
}
/// The worker was unparked.
pub(crate) fn unparked(&mut self) {
self.park_unpark_count += 1;
}
/// Start processing a batch of tasks
pub(crate) fn start_processing_scheduled_tasks(&mut self) {
self.processing_scheduled_tasks_started_at = now();
}
/// Stop processing a batch of tasks
pub(crate) fn end_processing_scheduled_tasks(&mut self) {
if let Some(processing_scheduled_tasks_started_at) =
self.processing_scheduled_tasks_started_at
{
let busy_duration = processing_scheduled_tasks_started_at.elapsed();
self.busy_duration_total += duration_as_u64(busy_duration);
}
}
cfg_metrics_variant! {
stable: {
/// Start polling an individual task
pub(crate) fn start_poll(&mut self) {}
},
unstable: {
/// Start polling an individual task
pub(crate) fn start_poll(&mut self) {
self.poll_count += 1;
if let Some(poll_timer) = &mut self.poll_timer {
poll_timer.poll_started_at = Instant::now();
}
}
}
}
cfg_metrics_variant! {
stable: {
/// Stop polling an individual task
pub(crate) fn end_poll(&mut self) {}
},
unstable: {
/// Stop polling an individual task
pub(crate) fn end_poll(&mut self) {
if let Some(poll_timer) = &mut self.poll_timer {
let elapsed = duration_as_u64(poll_timer.poll_started_at.elapsed());
poll_timer.poll_counts.measure(elapsed, 1);
}
}
}
}
cfg_metrics_variant! {
stable: {
pub(crate) fn inc_local_schedule_count(&mut self) {}
},
unstable: {
pub(crate) fn inc_local_schedule_count(&mut self) {
self.local_schedule_count += 1;
}
}
}
}
cfg_rt_multi_thread! {
impl MetricsBatch {
cfg_metrics_variant! {
stable: {
pub(crate) fn incr_steal_count(&mut self, _by: u16) {}
},
unstable: {
pub(crate) fn incr_steal_count(&mut self, by: u16) {
self.steal_count += by as u64;
}
}
}
cfg_metrics_variant! {
stable: {
pub(crate) fn incr_steal_operations(&mut self) {}
},
unstable: {
pub(crate) fn incr_steal_operations(&mut self) {
self.steal_operations += 1;
}
}
}
cfg_metrics_variant! {
stable: {
pub(crate) fn incr_overflow_count(&mut self) {}
},
unstable: {
pub(crate) fn incr_overflow_count(&mut self) {
self.overflow_count += 1;
}
}
}
}
}
pub(crate) fn duration_as_u64(dur: Duration) -> u64 {
u64::try_from(dur.as_nanos()).unwrap_or(u64::MAX)
}
/// Gate unsupported time metrics for `wasm32-unknown-unknown`
/// <https://github.com/tokio-rs/tokio/issues/7319>
fn now() -> Option<Instant> {
if cfg!(all(
target_arch = "wasm32",
target_os = "unknown",
target_vendor = "unknown"
)) {
None
} else {
Some(Instant::now())
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/worker.rs | tokio/src/runtime/metrics/worker.rs | use crate::runtime::Config;
use crate::util::metric_atomics::{MetricAtomicU64, MetricAtomicUsize};
use std::sync::atomic::Ordering::Relaxed;
use std::sync::Mutex;
use std::thread::ThreadId;
cfg_unstable_metrics! {
use crate::runtime::metrics::Histogram;
}
/// Retrieve runtime worker metrics.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [unstable]: crate#unstable-features
#[derive(Debug, Default)]
#[repr(align(128))]
pub(crate) struct WorkerMetrics {
/// Amount of time the worker spent doing work vs. parking.
pub(crate) busy_duration_total: MetricAtomicU64,
/// Number of tasks currently in the local queue. Used only by the
/// current-thread scheduler.
pub(crate) queue_depth: MetricAtomicUsize,
/// Thread id of worker thread.
thread_id: Mutex<Option<ThreadId>>,
/// Number of times the worker parked.
pub(crate) park_count: MetricAtomicU64,
/// Number of times the worker parked and unparked.
pub(crate) park_unpark_count: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// Number of times the worker woke then parked again without doing work.
pub(crate) noop_count: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// Number of tasks the worker stole.
pub(crate) steal_count: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// Number of times the worker stole
pub(crate) steal_operations: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// Number of tasks the worker polled.
pub(crate) poll_count: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// EWMA task poll time, in nanoseconds.
pub(crate) mean_poll_time: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// Number of tasks scheduled for execution on the worker's local queue.
pub(crate) local_schedule_count: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// Number of tasks moved from the local queue to the global queue to free space.
pub(crate) overflow_count: MetricAtomicU64,
#[cfg(tokio_unstable)]
/// If `Some`, tracks the number of polls by duration range.
pub(super) poll_count_histogram: Option<Histogram>,
}
impl WorkerMetrics {
pub(crate) fn new() -> WorkerMetrics {
WorkerMetrics::default()
}
pub(crate) fn set_queue_depth(&self, len: usize) {
self.queue_depth.store(len, Relaxed);
}
pub(crate) fn set_thread_id(&self, thread_id: ThreadId) {
*self.thread_id.lock().unwrap() = Some(thread_id);
}
cfg_metrics_variant! {
stable: {
pub(crate) fn from_config(_: &Config) -> WorkerMetrics {
WorkerMetrics::new()
}
},
unstable: {
pub(crate) fn from_config(config: &Config) -> WorkerMetrics {
let mut worker_metrics = WorkerMetrics::new();
worker_metrics.poll_count_histogram = config
.metrics_poll_count_histogram
.as_ref()
.map(|histogram_builder| histogram_builder.build());
worker_metrics
}
}
}
cfg_unstable_metrics! {
pub(crate) fn queue_depth(&self) -> usize {
self.queue_depth.load(Relaxed)
}
pub(crate) fn thread_id(&self) -> Option<ThreadId> {
*self.thread_id.lock().unwrap()
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/runtime.rs | tokio/src/runtime/metrics/runtime.rs | use crate::runtime::Handle;
use std::time::Duration;
cfg_64bit_metrics! {
use std::sync::atomic::Ordering::Relaxed;
}
cfg_unstable_metrics! {
use std::ops::Range;
use std::thread::ThreadId;
}
/// Handle to the runtime's metrics.
///
/// This handle is internally reference-counted and can be freely cloned. A
/// `RuntimeMetrics` handle is obtained using the [`Runtime::metrics`] method.
///
/// [`Runtime::metrics`]: crate::runtime::Runtime::metrics()
#[derive(Clone, Debug)]
pub struct RuntimeMetrics {
handle: Handle,
}
impl RuntimeMetrics {
pub(crate) fn new(handle: Handle) -> RuntimeMetrics {
RuntimeMetrics { handle }
}
/// Returns the number of worker threads used by the runtime.
///
/// The number of workers is set by configuring `worker_threads` on
/// `runtime::Builder`. When using the `current_thread` runtime, the return
/// value is always `1`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.num_workers();
/// println!("Runtime is using {} workers", n);
/// # }
/// ```
pub fn num_workers(&self) -> usize {
self.handle.inner.num_workers()
}
/// Returns the current number of alive tasks in the runtime.
///
/// This counter increases when a task is spawned and decreases when a
/// task exits.
///
/// Note: When using the multi-threaded runtime this number may not
/// not have strong consistency i.e. no tasks may be running but the metric
/// reports otherwise.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.num_alive_tasks();
/// println!("Runtime has {} alive tasks", n);
/// # }
/// ```
pub fn num_alive_tasks(&self) -> usize {
self.handle.inner.num_alive_tasks()
}
/// Returns the number of tasks currently scheduled in the runtime's
/// global queue.
///
/// Tasks that are spawned or notified from a non-runtime thread are
/// scheduled using the runtime's global queue. This metric returns the
/// **current** number of tasks pending in the global queue. As such, the
/// returned value may increase or decrease as new tasks are scheduled and
/// processed.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.global_queue_depth();
/// println!("{} tasks currently pending in the runtime's global queue", n);
/// # }
/// ```
pub fn global_queue_depth(&self) -> usize {
self.handle.inner.injection_queue_depth()
}
cfg_64bit_metrics! {
/// Returns the amount of time the given worker thread has been busy.
///
/// The worker busy duration starts at zero when the runtime is created and
/// increases whenever the worker is spending time processing work. Using
/// this value can indicate the load of the given worker. If a lot of time
/// is spent busy, then the worker is under load and will check for inbound
/// events less often.
///
/// The timer is monotonically increasing. It is never decremented or reset
/// to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_total_busy_duration(0);
/// println!("worker 0 was busy for a total of {:?}", n);
/// # }
/// ```
pub fn worker_total_busy_duration(&self, worker: usize) -> Duration {
let nanos = self
.handle
.inner
.worker_metrics(worker)
.busy_duration_total
.load(Relaxed);
Duration::from_nanos(nanos)
}
/// Returns the total number of times the given worker thread has parked.
///
/// The worker park count starts at zero when the runtime is created and
/// increases by one each time the worker parks the thread waiting for new
/// inbound events to process. This usually means the worker has processed
/// all pending work and is currently idle.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_park_count(0);
/// println!("worker 0 parked {} times", n);
/// # }
/// ```
pub fn worker_park_count(&self, worker: usize) -> u64 {
self.handle
.inner
.worker_metrics(worker)
.park_count
.load(Relaxed)
}
/// Returns the total number of times the given worker thread has parked
/// and unparked.
///
/// The worker park/unpark count starts at zero when the runtime is created
/// and increases by one each time the worker parks the thread waiting for
/// new inbound events to process. This usually means the worker has processed
/// all pending work and is currently idle. When new work becomes available,
/// the worker is unparked and the park/unpark count is again increased by one.
///
/// An odd count means that the worker is currently parked.
/// An even count means that the worker is currently active.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
/// let n = metrics.worker_park_unpark_count(0);
///
/// println!("worker 0 parked and unparked {} times", n);
///
/// if n % 2 == 0 {
/// println!("worker 0 is active");
/// } else {
/// println!("worker 0 is parked");
/// }
/// # }
/// ```
pub fn worker_park_unpark_count(&self, worker: usize) -> u64 {
self.handle
.inner
.worker_metrics(worker)
.park_unpark_count
.load(Relaxed)
}
}
cfg_unstable_metrics! {
/// Returns the number of additional threads spawned by the runtime.
///
/// The number of workers is set by configuring `max_blocking_threads` on
/// `runtime::Builder`.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let _ = tokio::task::spawn_blocking(move || {
/// // Stand-in for compute-heavy work or using synchronous APIs
/// 1 + 1
/// }).await;
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.num_blocking_threads();
/// println!("Runtime has created {} threads", n);
/// # }
/// # }
/// ```
pub fn num_blocking_threads(&self) -> usize {
self.handle.inner.num_blocking_threads()
}
#[deprecated = "Renamed to num_alive_tasks"]
/// Renamed to [`RuntimeMetrics::num_alive_tasks`]
pub fn active_tasks_count(&self) -> usize {
self.num_alive_tasks()
}
/// Returns the number of idle threads, which have spawned by the runtime
/// for `spawn_blocking` calls.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime::Handle;
///
/// #[tokio::main]
/// async fn main() {
/// let _ = tokio::task::spawn_blocking(move || {
/// // Stand-in for compute-heavy work or using synchronous APIs
/// 1 + 1
/// }).await;
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.num_idle_blocking_threads();
/// println!("Runtime has {} idle blocking thread pool threads", n);
/// }
/// # }
/// ```
pub fn num_idle_blocking_threads(&self) -> usize {
self.handle.inner.num_idle_blocking_threads()
}
/// Returns the thread id of the given worker thread.
///
/// The returned value is `None` if the worker thread has not yet finished
/// starting up.
///
/// If additional information about the thread, such as its native id, are
/// required, those can be collected in [`on_thread_start`] and correlated
/// using the thread id.
///
/// [`on_thread_start`]: crate::runtime::Builder::on_thread_start
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let id = metrics.worker_thread_id(0);
/// println!("worker 0 has id {:?}", id);
/// # }
/// ```
pub fn worker_thread_id(&self, worker: usize) -> Option<ThreadId> {
self.handle
.inner
.worker_metrics(worker)
.thread_id()
}
/// Renamed to [`RuntimeMetrics::global_queue_depth`]
#[deprecated = "Renamed to global_queue_depth"]
#[doc(hidden)]
pub fn injection_queue_depth(&self) -> usize {
self.handle.inner.injection_queue_depth()
}
/// Returns the number of tasks currently scheduled in the given worker's
/// local queue.
///
/// Tasks that are spawned or notified from within a runtime thread are
/// scheduled using that worker's local queue. This metric returns the
/// **current** number of tasks pending in the worker's local queue. As
/// such, the returned value may increase or decrease as new tasks are
/// scheduled and processed.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_local_queue_depth(0);
/// println!("{} tasks currently pending in worker 0's local queue", n);
/// # }
/// ```
pub fn worker_local_queue_depth(&self, worker: usize) -> usize {
self.handle.inner.worker_local_queue_depth(worker)
}
/// Returns `true` if the runtime is tracking the distribution of task poll
/// times.
///
/// Task poll times are not instrumented by default as doing so requires
/// calling [`Instant::now()`] twice per task poll. The feature is enabled
/// by calling [`enable_metrics_poll_time_histogram()`] when building the
/// runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::{self, Handle};
///
/// fn main() {
/// runtime::Builder::new_current_thread()
/// .enable_metrics_poll_time_histogram()
/// .build()
/// .unwrap()
/// .block_on(async {
/// let metrics = Handle::current().metrics();
/// let enabled = metrics.poll_time_histogram_enabled();
///
/// println!("Tracking task poll time distribution: {:?}", enabled);
/// });
/// }
/// ```
///
/// [`enable_metrics_poll_time_histogram()`]: crate::runtime::Builder::enable_metrics_poll_time_histogram
/// [`Instant::now()`]: std::time::Instant::now
pub fn poll_time_histogram_enabled(&self) -> bool {
self.handle
.inner
.worker_metrics(0)
.poll_count_histogram
.is_some()
}
#[deprecated(note = "Renamed to `poll_time_histogram_enabled`")]
#[doc(hidden)]
pub fn poll_count_histogram_enabled(&self) -> bool {
self.poll_time_histogram_enabled()
}
/// Returns the number of histogram buckets tracking the distribution of
/// task poll times.
///
/// This value is configured by calling
/// [`metrics_poll_time_histogram_configuration()`] when building the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::{self, Handle};
///
/// fn main() {
/// runtime::Builder::new_current_thread()
/// .enable_metrics_poll_time_histogram()
/// .build()
/// .unwrap()
/// .block_on(async {
/// let metrics = Handle::current().metrics();
/// let buckets = metrics.poll_time_histogram_num_buckets();
///
/// println!("Histogram buckets: {:?}", buckets);
/// });
/// }
/// ```
///
/// [`metrics_poll_time_histogram_configuration()`]:
/// crate::runtime::Builder::metrics_poll_time_histogram_configuration
pub fn poll_time_histogram_num_buckets(&self) -> usize {
self.handle
.inner
.worker_metrics(0)
.poll_count_histogram
.as_ref()
.map(|histogram| histogram.num_buckets())
.unwrap_or_default()
}
/// Deprecated. Use [`poll_time_histogram_num_buckets()`] instead.
///
/// [`poll_time_histogram_num_buckets()`]: Self::poll_time_histogram_num_buckets
#[doc(hidden)]
#[deprecated(note = "renamed to `poll_time_histogram_num_buckets`.")]
pub fn poll_count_histogram_num_buckets(&self) -> usize {
self.poll_time_histogram_num_buckets()
}
/// Returns the range of task poll times tracked by the given bucket.
///
/// This value is configured by calling
/// [`metrics_poll_time_histogram_configuration()`] when building the runtime.
///
/// # Panics
///
/// The method panics if `bucket` represents an invalid bucket index, i.e.
/// is greater than or equal to `poll_time_histogram_num_buckets()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::{self, Handle};
///
/// fn main() {
/// runtime::Builder::new_current_thread()
/// .enable_metrics_poll_time_histogram()
/// .build()
/// .unwrap()
/// .block_on(async {
/// let metrics = Handle::current().metrics();
/// let buckets = metrics.poll_time_histogram_num_buckets();
///
/// for i in 0..buckets {
/// let range = metrics.poll_time_histogram_bucket_range(i);
/// println!("Histogram bucket {} range: {:?}", i, range);
/// }
/// });
/// }
/// ```
///
/// [`metrics_poll_time_histogram_configuration()`]:
/// crate::runtime::Builder::metrics_poll_time_histogram_configuration
#[track_caller]
pub fn poll_time_histogram_bucket_range(&self, bucket: usize) -> Range<Duration> {
self.handle
.inner
.worker_metrics(0)
.poll_count_histogram
.as_ref()
.map(|histogram| {
let range = histogram.bucket_range(bucket);
std::ops::Range {
start: Duration::from_nanos(range.start),
end: Duration::from_nanos(range.end),
}
})
.unwrap_or_default()
}
/// Deprecated. Use [`poll_time_histogram_bucket_range()`] instead.
///
/// [`poll_time_histogram_bucket_range()`]: Self::poll_time_histogram_bucket_range
#[track_caller]
#[doc(hidden)]
#[deprecated(note = "renamed to `poll_time_histogram_bucket_range`")]
pub fn poll_count_histogram_bucket_range(&self, bucket: usize) -> Range<Duration> {
self.poll_time_histogram_bucket_range(bucket)
}
/// Returns the number of tasks currently scheduled in the blocking
/// thread pool, spawned using `spawn_blocking`.
///
/// This metric returns the **current** number of tasks pending in
/// blocking thread pool. As such, the returned value may increase
/// or decrease as new tasks are scheduled and processed.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.blocking_queue_depth();
/// println!("{} tasks currently pending in the blocking thread pool", n);
/// # }
/// ```
pub fn blocking_queue_depth(&self) -> usize {
self.handle.inner.blocking_queue_depth()
}
}
feature! {
#![all(
tokio_unstable,
target_has_atomic = "64"
)]
/// Returns the number of tasks spawned in this runtime since it was created.
///
/// This count starts at zero when the runtime is created and increases by one each time a task is spawned.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.spawned_tasks_count();
/// println!("Runtime has had {} tasks spawned", n);
/// # }
/// ```
pub fn spawned_tasks_count(&self) -> u64 {
self.handle.inner.spawned_tasks_count()
}
/// Returns the number of tasks scheduled from **outside** of the runtime.
///
/// The remote schedule count starts at zero when the runtime is created and
/// increases by one each time a task is woken from **outside** of the
/// runtime. This usually means that a task is spawned or notified from a
/// non-runtime thread and must be queued using the Runtime's injection
/// queue, which tends to be slower.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.remote_schedule_count();
/// println!("{} tasks were scheduled from outside the runtime", n);
/// # }
/// ```
pub fn remote_schedule_count(&self) -> u64 {
self.handle
.inner
.scheduler_metrics()
.remote_schedule_count
.load(Relaxed)
}
/// Returns the number of times that tasks have been forced to yield back to the scheduler
/// after exhausting their task budgets.
///
/// This count starts at zero when the runtime is created and increases by one each time a task yields due to exhausting its budget.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
pub fn budget_forced_yield_count(&self) -> u64 {
self.handle
.inner
.scheduler_metrics()
.budget_forced_yield_count
.load(Relaxed)
}
/// Returns the number of times the given worker thread unparked but
/// performed no work before parking again.
///
/// The worker no-op count starts at zero when the runtime is created and
/// increases by one each time the worker unparks the thread but finds no
/// new work and goes back to sleep. This indicates a false-positive wake up.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_noop_count(0);
/// println!("worker 0 had {} no-op unparks", n);
/// # }
/// ```
pub fn worker_noop_count(&self, worker: usize) -> u64 {
self.handle
.inner
.worker_metrics(worker)
.noop_count
.load(Relaxed)
}
/// Returns the number of tasks the given worker thread stole from
/// another worker thread.
///
/// This metric only applies to the **multi-threaded** runtime and will
/// always return `0` when using the current thread runtime.
///
/// The worker steal count starts at zero when the runtime is created and
/// increases by `N` each time the worker has processed its scheduled queue
/// and successfully steals `N` more pending tasks from another worker.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_steal_count(0);
/// println!("worker 0 has stolen {} tasks", n);
/// # }
/// ```
pub fn worker_steal_count(&self, worker: usize) -> u64 {
self.handle
.inner
.worker_metrics(worker)
.steal_count
.load(Relaxed)
}
/// Returns the number of times the given worker thread stole tasks from
/// another worker thread.
///
/// This metric only applies to the **multi-threaded** runtime and will
/// always return `0` when using the current thread runtime.
///
/// The worker steal count starts at zero when the runtime is created and
/// increases by one each time the worker has processed its scheduled queue
/// and successfully steals more pending tasks from another worker.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_steal_operations(0);
/// println!("worker 0 has stolen tasks {} times", n);
/// # }
/// ```
pub fn worker_steal_operations(&self, worker: usize) -> u64 {
self.handle
.inner
.worker_metrics(worker)
.steal_operations
.load(Relaxed)
}
/// Returns the number of tasks the given worker thread has polled.
///
/// The worker poll count starts at zero when the runtime is created and
/// increases by one each time the worker polls a scheduled task.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_poll_count(0);
/// println!("worker 0 has polled {} tasks", n);
/// # }
/// ```
pub fn worker_poll_count(&self, worker: usize) -> u64 {
self.handle
.inner
.worker_metrics(worker)
.poll_count
.load(Relaxed)
}
/// Returns the number of tasks scheduled from **within** the runtime on the
/// given worker's local queue.
///
/// The local schedule count starts at zero when the runtime is created and
/// increases by one each time a task is woken from **inside** of the
/// runtime on the given worker. This usually means that a task is spawned
/// or notified from within a runtime thread and will be queued on the
/// worker-local queue.
///
/// The counter is monotonically increasing. It is never decremented or
/// reset to zero.
///
/// # Arguments
///
/// `worker` is the index of the worker being queried. The given value must
/// be between 0 and `num_workers()`. The index uniquely identifies a single
/// worker and will continue to identify the worker throughout the lifetime
/// of the runtime instance.
///
/// # Panics
///
/// The method panics when `worker` represents an invalid worker, i.e. is
/// greater than or equal to `num_workers()`.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Handle;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let metrics = Handle::current().metrics();
///
/// let n = metrics.worker_local_schedule_count(0);
/// println!("{} tasks were scheduled on the worker's local queue", n);
/// # }
/// ```
pub fn worker_local_schedule_count(&self, worker: usize) -> u64 {
self.handle
.inner
.worker_metrics(worker)
.local_schedule_count
.load(Relaxed)
}
/// Returns the number of times the given worker thread saturated its local
/// queue.
///
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/mod.rs | tokio/src/runtime/metrics/mod.rs | //! This module contains information need to view information about how the
//! runtime is performing.
//!
//! **Note**: This is an [unstable API][unstable]. The public API of types in
//! this module may break in 1.x releases. See [the documentation on unstable
//! features][unstable] for details.
//!
//! [unstable]: crate#unstable-features
#![allow(clippy::module_inception)]
mod runtime;
pub use runtime::RuntimeMetrics;
mod batch;
pub(crate) use batch::MetricsBatch;
mod worker;
pub(crate) use worker::WorkerMetrics;
cfg_unstable_metrics! {
mod histogram;
pub(crate) use histogram::{Histogram, HistogramBatch, HistogramBuilder};
#[allow(unreachable_pub)] // rust-lang/rust#57411
pub use histogram::{HistogramScale, HistogramConfiguration, LogHistogram, LogHistogramBuilder, InvalidHistogramConfiguration};
mod scheduler;
pub(crate) use scheduler::SchedulerMetrics;
cfg_net! {
mod io;
pub(crate) use io::IoDriverMetrics;
}
}
cfg_not_unstable_metrics! {
mod mock;
pub(crate) use mock::{SchedulerMetrics, HistogramBuilder};
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/mock.rs | tokio/src/runtime/metrics/mock.rs | //! This file contains mocks of the types in src/runtime/metrics
pub(crate) struct SchedulerMetrics {}
#[derive(Clone, Default)]
pub(crate) struct HistogramBuilder {}
impl SchedulerMetrics {
pub(crate) fn new() -> Self {
Self {}
}
/// Increment the number of tasks scheduled externally
pub(crate) fn inc_remote_schedule_count(&self) {}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/histogram.rs | tokio/src/runtime/metrics/histogram.rs | mod h2_histogram;
pub use h2_histogram::{InvalidHistogramConfiguration, LogHistogram, LogHistogramBuilder};
use crate::util::metric_atomics::MetricAtomicU64;
use std::sync::atomic::Ordering::Relaxed;
use crate::runtime::metrics::batch::duration_as_u64;
use std::cmp;
use std::ops::Range;
use std::time::Duration;
#[derive(Debug)]
pub(crate) struct Histogram {
/// The histogram buckets
buckets: Box<[MetricAtomicU64]>,
/// The type of the histogram
///
/// This handles `fn(bucket) -> Range` and `fn(value) -> bucket`
histogram_type: HistogramType,
}
#[derive(Debug, Clone)]
pub(crate) struct HistogramBuilder {
pub(crate) histogram_type: HistogramType,
pub(crate) legacy: Option<LegacyBuilder>,
}
#[derive(Debug, Clone)]
pub(crate) struct LegacyBuilder {
pub(crate) resolution: u64,
pub(crate) scale: HistogramScale,
pub(crate) num_buckets: usize,
}
impl Default for LegacyBuilder {
fn default() -> Self {
Self {
resolution: 100_000,
num_buckets: 10,
scale: HistogramScale::Linear,
}
}
}
#[derive(Debug)]
pub(crate) struct HistogramBatch {
buckets: Box<[u64]>,
configuration: HistogramType,
}
cfg_unstable! {
/// Whether the histogram used to aggregate a metric uses a linear or
/// logarithmic scale.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[non_exhaustive]
pub enum HistogramScale {
/// Linear bucket scale
Linear,
/// Logarithmic bucket scale
Log,
}
/// Configuration for the poll count histogram
#[derive(Debug, Clone)]
pub struct HistogramConfiguration {
pub(crate) inner: HistogramType
}
impl HistogramConfiguration {
/// Create a linear bucketed histogram
///
/// # Arguments
///
/// * `bucket_width`: The width of each bucket
/// * `num_buckets`: The number of buckets
pub fn linear(bucket_width: Duration, num_buckets: usize) -> Self {
Self {
inner: HistogramType::Linear(LinearHistogram {
num_buckets,
bucket_width: duration_as_u64(bucket_width),
}),
}
}
/// Creates a log-scaled bucketed histogram
///
/// See [`LogHistogramBuilder`] for information about configuration & defaults
pub fn log(configuration: impl Into<LogHistogram>) -> Self {
Self {
inner: HistogramType::H2(configuration.into()),
}
}
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub(crate) enum HistogramType {
/// Linear histogram with fixed width buckets
Linear(LinearHistogram),
/// Old log histogram where each bucket doubles in size
LogLegacy(LegacyLogHistogram),
/// Log histogram implementation based on H2 Histograms
H2(LogHistogram),
}
impl HistogramType {
pub(crate) fn num_buckets(&self) -> usize {
match self {
HistogramType::Linear(linear) => linear.num_buckets,
HistogramType::LogLegacy(log) => log.num_buckets,
HistogramType::H2(h2) => h2.num_buckets,
}
}
fn value_to_bucket(&self, value: u64) -> usize {
match self {
HistogramType::Linear(LinearHistogram {
num_buckets,
bucket_width,
}) => {
let max = num_buckets - 1;
cmp::min(value / *bucket_width, max as u64) as usize
}
HistogramType::LogLegacy(LegacyLogHistogram {
num_buckets,
first_bucket_width,
}) => {
let max = num_buckets - 1;
if value < *first_bucket_width {
0
} else {
let significant_digits = 64 - value.leading_zeros();
let bucket_digits = 64 - (first_bucket_width - 1).leading_zeros();
cmp::min(significant_digits as usize - bucket_digits as usize, max)
}
}
HistogramType::H2(log_histogram) => log_histogram.value_to_bucket(value),
}
}
fn bucket_range(&self, bucket: usize) -> Range<u64> {
match self {
HistogramType::Linear(LinearHistogram {
num_buckets,
bucket_width,
}) => Range {
start: bucket_width * bucket as u64,
end: if bucket == num_buckets - 1 {
u64::MAX
} else {
bucket_width * (bucket as u64 + 1)
},
},
HistogramType::LogLegacy(LegacyLogHistogram {
num_buckets,
first_bucket_width,
}) => Range {
start: if bucket == 0 {
0
} else {
first_bucket_width << (bucket - 1)
},
end: if bucket == num_buckets - 1 {
u64::MAX
} else {
first_bucket_width << bucket
},
},
HistogramType::H2(log) => log.bucket_range(bucket),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub(crate) struct LinearHistogram {
num_buckets: usize,
bucket_width: u64,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub(crate) struct LegacyLogHistogram {
num_buckets: usize,
first_bucket_width: u64,
}
impl Histogram {
pub(crate) fn num_buckets(&self) -> usize {
self.buckets.len()
}
cfg_64bit_metrics! {
pub(crate) fn get(&self, bucket: usize) -> u64 {
self.buckets[bucket].load(Relaxed)
}
}
pub(crate) fn bucket_range(&self, bucket: usize) -> Range<u64> {
self.histogram_type.bucket_range(bucket)
}
}
impl HistogramBatch {
pub(crate) fn from_histogram(histogram: &Histogram) -> HistogramBatch {
let buckets = vec![0; histogram.buckets.len()].into_boxed_slice();
HistogramBatch {
buckets,
configuration: histogram.histogram_type,
}
}
pub(crate) fn measure(&mut self, value: u64, count: u64) {
self.buckets[self.value_to_bucket(value)] += count;
}
pub(crate) fn submit(&self, histogram: &Histogram) {
debug_assert_eq!(self.configuration, histogram.histogram_type);
debug_assert_eq!(self.buckets.len(), histogram.buckets.len());
for i in 0..self.buckets.len() {
histogram.buckets[i].store(self.buckets[i], Relaxed);
}
}
fn value_to_bucket(&self, value: u64) -> usize {
self.configuration.value_to_bucket(value)
}
}
impl HistogramBuilder {
pub(crate) fn new() -> HistogramBuilder {
HistogramBuilder {
histogram_type: HistogramType::Linear(LinearHistogram {
num_buckets: 10,
bucket_width: 100_000,
}),
legacy: None,
}
}
pub(crate) fn legacy_mut(&mut self, f: impl Fn(&mut LegacyBuilder)) {
let legacy = self.legacy.get_or_insert_with(LegacyBuilder::default);
f(legacy);
}
pub(crate) fn build(&self) -> Histogram {
let histogram_type = match &self.legacy {
Some(legacy) => {
assert!(legacy.resolution > 0);
match legacy.scale {
HistogramScale::Linear => HistogramType::Linear(LinearHistogram {
num_buckets: legacy.num_buckets,
bucket_width: legacy.resolution,
}),
HistogramScale::Log => HistogramType::LogLegacy(LegacyLogHistogram {
num_buckets: legacy.num_buckets,
first_bucket_width: legacy.resolution.next_power_of_two(),
}),
}
}
None => self.histogram_type,
};
let num_buckets = histogram_type.num_buckets();
Histogram {
buckets: (0..num_buckets)
.map(|_| MetricAtomicU64::new(0))
.collect::<Vec<_>>()
.into_boxed_slice(),
histogram_type,
}
}
}
impl Default for HistogramBuilder {
fn default() -> HistogramBuilder {
HistogramBuilder::new()
}
}
#[cfg(all(test, target_has_atomic = "64"))]
mod test {
use super::*;
macro_rules! assert_bucket_eq {
($h:expr, $bucket:expr, $val:expr) => {{
assert_eq!($h.buckets[$bucket], $val);
}};
}
fn linear(resolution: u64, num_buckets: usize) -> Histogram {
HistogramBuilder {
histogram_type: HistogramType::Linear(LinearHistogram {
bucket_width: resolution,
num_buckets,
}),
legacy: None,
}
.build()
}
#[test]
fn test_legacy_builder() {
let mut builder = HistogramBuilder::new();
builder.legacy_mut(|b| b.num_buckets = 20);
assert_eq!(builder.build().num_buckets(), 20);
}
#[test]
fn log_scale_resolution_1() {
let h = HistogramBuilder {
histogram_type: HistogramType::LogLegacy(LegacyLogHistogram {
first_bucket_width: 1,
num_buckets: 10,
}),
legacy: None,
}
.build();
assert_eq!(h.bucket_range(0), 0..1);
assert_eq!(h.bucket_range(1), 1..2);
assert_eq!(h.bucket_range(2), 2..4);
assert_eq!(h.bucket_range(3), 4..8);
assert_eq!(h.bucket_range(9), 256..u64::MAX);
let mut b = HistogramBatch::from_histogram(&h);
b.measure(0, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 0);
b.measure(1, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 0);
b.measure(2, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 1);
b.measure(3, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 2);
b.measure(4, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 2);
assert_bucket_eq!(b, 3, 1);
b.measure(100, 1);
assert_bucket_eq!(b, 7, 1);
b.measure(128, 1);
assert_bucket_eq!(b, 8, 1);
b.measure(4096, 1);
assert_bucket_eq!(b, 9, 1);
b.measure(u64::MAX, 1);
assert_bucket_eq!(b, 9, 2);
}
#[test]
fn log_scale_resolution_2() {
let h = HistogramBuilder {
histogram_type: HistogramType::LogLegacy(LegacyLogHistogram {
num_buckets: 10,
first_bucket_width: 2,
}),
legacy: None,
}
.build();
assert_eq!(h.bucket_range(0), 0..2);
assert_eq!(h.bucket_range(1), 2..4);
assert_eq!(h.bucket_range(2), 4..8);
assert_eq!(h.bucket_range(3), 8..16);
assert_eq!(h.bucket_range(9), 512..u64::MAX);
let mut b = HistogramBatch::from_histogram(&h);
b.measure(0, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 0);
b.measure(1, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 0);
b.measure(2, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 0);
b.measure(3, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 0);
b.measure(4, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 1);
b.measure(5, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 2);
b.measure(6, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 3);
b.measure(7, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 4);
b.measure(8, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 4);
assert_bucket_eq!(b, 3, 1);
b.measure(100, 1);
assert_bucket_eq!(b, 6, 1);
b.measure(128, 1);
assert_bucket_eq!(b, 7, 1);
b.measure(4096, 1);
assert_bucket_eq!(b, 9, 1);
for bucket in h.buckets.iter() {
assert_eq!(bucket.load(Relaxed), 0);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
}
#[test]
fn linear_scale_resolution_1() {
let h = linear(1, 10);
assert_eq!(h.bucket_range(0), 0..1);
assert_eq!(h.bucket_range(1), 1..2);
assert_eq!(h.bucket_range(2), 2..3);
assert_eq!(h.bucket_range(3), 3..4);
assert_eq!(h.bucket_range(9), 9..u64::MAX);
let mut b = HistogramBatch::from_histogram(&h);
b.measure(0, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 0);
b.measure(1, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 0);
b.measure(2, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 1);
assert_bucket_eq!(b, 3, 0);
b.measure(3, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 1);
assert_bucket_eq!(b, 3, 1);
b.measure(5, 1);
assert_bucket_eq!(b, 5, 1);
b.measure(4096, 1);
assert_bucket_eq!(b, 9, 1);
for bucket in h.buckets.iter() {
assert_eq!(bucket.load(Relaxed), 0);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
}
#[test]
fn linear_scale_resolution_100() {
let h = linear(100, 10);
assert_eq!(h.bucket_range(0), 0..100);
assert_eq!(h.bucket_range(1), 100..200);
assert_eq!(h.bucket_range(2), 200..300);
assert_eq!(h.bucket_range(3), 300..400);
assert_eq!(h.bucket_range(9), 900..u64::MAX);
let mut b = HistogramBatch::from_histogram(&h);
b.measure(0, 1);
assert_bucket_eq!(b, 0, 1);
assert_bucket_eq!(b, 1, 0);
b.measure(50, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 0);
b.measure(100, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 1);
assert_bucket_eq!(b, 2, 0);
b.measure(101, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 0);
b.measure(200, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 1);
b.measure(299, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 2);
b.measure(222, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 3);
b.measure(300, 1);
assert_bucket_eq!(b, 0, 2);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 3);
assert_bucket_eq!(b, 3, 1);
b.measure(888, 1);
assert_bucket_eq!(b, 8, 1);
b.measure(4096, 1);
assert_bucket_eq!(b, 9, 1);
for bucket in h.buckets.iter() {
assert_eq!(bucket.load(Relaxed), 0);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
}
#[test]
fn inc_by_more_than_one() {
let h = linear(100, 10);
let mut b = HistogramBatch::from_histogram(&h);
b.measure(0, 3);
assert_bucket_eq!(b, 0, 3);
assert_bucket_eq!(b, 1, 0);
b.measure(50, 5);
assert_bucket_eq!(b, 0, 8);
assert_bucket_eq!(b, 1, 0);
b.measure(100, 2);
assert_bucket_eq!(b, 0, 8);
assert_bucket_eq!(b, 1, 2);
assert_bucket_eq!(b, 2, 0);
b.measure(101, 19);
assert_bucket_eq!(b, 0, 8);
assert_bucket_eq!(b, 1, 21);
assert_bucket_eq!(b, 2, 0);
for bucket in h.buckets.iter() {
assert_eq!(bucket.load(Relaxed), 0);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
b.submit(&h);
for i in 0..h.buckets.len() {
assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]);
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/scheduler.rs | tokio/src/runtime/metrics/scheduler.rs | use crate::loom::sync::atomic::Ordering::Relaxed;
use crate::util::metric_atomics::MetricAtomicU64;
/// Retrieves metrics from the Tokio runtime.
///
/// **Note**: This is an [unstable API][unstable]. The public API of this type
/// may break in 1.x releases. See [the documentation on unstable
/// features][unstable] for details.
///
/// [unstable]: crate#unstable-features
#[derive(Debug)]
pub(crate) struct SchedulerMetrics {
/// Number of tasks that are scheduled from outside the runtime.
pub(super) remote_schedule_count: MetricAtomicU64,
pub(super) budget_forced_yield_count: MetricAtomicU64,
}
impl SchedulerMetrics {
pub(crate) fn new() -> SchedulerMetrics {
SchedulerMetrics {
remote_schedule_count: MetricAtomicU64::new(0),
budget_forced_yield_count: MetricAtomicU64::new(0),
}
}
/// Increment the number of tasks scheduled externally
pub(crate) fn inc_remote_schedule_count(&self) {
self.remote_schedule_count.add(1, Relaxed);
}
/// Increment the number of tasks forced to yield due to budget exhaustion
pub(crate) fn inc_budget_forced_yield_count(&self) {
self.budget_forced_yield_count.add(1, Relaxed);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/metrics/histogram/h2_histogram.rs | tokio/src/runtime/metrics/histogram/h2_histogram.rs | use crate::runtime::metrics::batch::duration_as_u64;
use std::cmp;
use std::error::Error;
use std::fmt::{Display, Formatter};
use std::time::Duration;
const DEFAULT_MIN_VALUE: Duration = Duration::from_nanos(100);
const DEFAULT_MAX_VALUE: Duration = Duration::from_secs(60);
/// Default precision is 2^-2 = 25% max error
const DEFAULT_PRECISION: u32 = 2;
const MAX_PRECISION: u32 = 10;
/// Log Histogram
///
/// This implements an [H2 Histogram](https://iop.systems/blog/h2-histogram/), a histogram similar
/// to HdrHistogram, but with better performance. It guarantees an error bound of `2^-p`.
///
/// Unlike a traditional H2 histogram this has two small changes:
/// 1. The 0th bucket runs for `0..min_value`. This allows truncating a large number of buckets that
/// would cover extremely short timescales that customers usually don't care about.
/// 2. The final bucket runs all the way to `u64::MAX` — traditional H2 histograms would truncate
/// or reject these values.
///
/// For information about the default configuration, see [`LogHistogramBuilder`].
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct LogHistogram {
/// Number of buckets in the histogram
pub(crate) num_buckets: usize,
/// Precision of histogram. Error is bounded to 2^-p.
pub(crate) p: u32,
/// All buckets `idx < bucket_offset` are grouped into bucket 0.
///
/// This increases the smallest measurable value of the histogram.
pub(crate) bucket_offset: usize,
}
impl Default for LogHistogram {
fn default() -> Self {
LogHistogramBuilder::default().build()
}
}
impl LogHistogram {
/// Create a Histogram configuration directly from values for `n` and `p`.
///
/// # Panics
/// - If `bucket_offset` is greater than the specified number of buckets, `(n - p + 1) * 2^p`
fn from_n_p(n: u32, p: u32, bucket_offset: usize) -> Self {
assert!(n >= p, "{n} (n) must be at least as large as {p} (p)");
let num_buckets = ((n - p + 1) << p) as usize - bucket_offset;
Self {
num_buckets,
p,
bucket_offset,
}
}
fn truncate_to_max_value(&self, max_value: u64) -> LogHistogram {
let mut hist = *self;
while hist.max_value() >= max_value {
hist.num_buckets -= 1;
}
hist.num_buckets += 1;
hist
}
/// Creates a builder for [`LogHistogram`]
pub fn builder() -> LogHistogramBuilder {
LogHistogramBuilder::default()
}
/// The maximum value that can be stored before truncation in this histogram
pub fn max_value(&self) -> u64 {
self.bucket_range(self.num_buckets - 2).end
}
pub(crate) fn value_to_bucket(&self, value: u64) -> usize {
let index = bucket_index(value, self.p);
let offset_bucket = index.saturating_sub(self.bucket_offset as u64);
let max = self.num_buckets - 1;
offset_bucket.min(max as u64) as usize
}
pub(crate) fn bucket_range(&self, bucket: usize) -> std::ops::Range<u64> {
let LogHistogram {
p,
bucket_offset,
num_buckets,
} = self;
let input_bucket = bucket;
let bucket = bucket + bucket_offset;
let range_start_0th_bucket = match input_bucket {
0 => Some(0_u64),
_ => None,
};
let range_end_last_bucket = match input_bucket {
n if n == num_buckets - 1 => Some(u64::MAX),
_ => None,
};
if bucket < 1 << p {
// The first set of buckets are all size 1
let bucket = bucket as u64;
range_start_0th_bucket.unwrap_or(bucket)..range_end_last_bucket.unwrap_or(bucket + 1)
} else {
// Determine which range of buckets we're in, then determine which bucket in the range it is
let bucket = bucket as u64;
let p = *p as u64;
let w = (bucket >> p) - 1;
let base_bucket = (w + 1) * (1_u64 << p);
let offset = bucket - base_bucket;
let s = 1_u64 << (w + p);
let start = s + (offset << w);
let end = s + ((offset + 1) << w);
range_start_0th_bucket.unwrap_or(start)..range_end_last_bucket.unwrap_or(end)
}
}
}
/// Configuration for a [`LogHistogram`]
///
/// The log-scaled histogram implements an H2 histogram where the first bucket covers
/// the range from 0 to [`LogHistogramBuilder::min_value`] and the final bucket covers
/// [`LogHistogramBuilder::max_value`] to infinity. The precision is bounded to the specified
/// [`LogHistogramBuilder::max_error`]. Specifically, the precision is the next smallest value
/// of `2^-p` such that it is smaller than the requested max error. You can also select `p` directly
/// with [`LogHistogramBuilder::precision_exact`].
///
/// Depending on the selected parameters, the number of buckets required is variable. To ensure
/// that the histogram size is acceptable, callers may call [`LogHistogramBuilder::max_buckets`].
/// If the resulting histogram would require more buckets, then the method will return an error.
///
/// ## Default values
/// The default configuration provides the following settings:
/// 1. `min_value`: 100ns
/// 2. `max_value`: 68 seconds. The final bucket covers all values >68 seconds
/// 3. `precision`: max error of 25%
///
/// This uses 237 64-bit buckets.
#[derive(Default, Debug, Copy, Clone)]
pub struct LogHistogramBuilder {
max_value: Option<Duration>,
min_value: Option<Duration>,
precision: Option<u32>,
}
impl From<LogHistogramBuilder> for LogHistogram {
fn from(value: LogHistogramBuilder) -> Self {
value.build()
}
}
impl LogHistogramBuilder {
/// Set the precision for this histogram
///
/// This function determines the smallest value of `p` that would satisfy the requested precision
/// such that `2^-p` is less than `precision`. To set `p` directly, use
/// [`LogHistogramBuilder::precision_exact`].
///
/// Precision controls the size of the "bucket groups" (consecutive buckets with identical
/// ranges). When `p` is 0, each bucket will be twice the size of the previous bucket. To match
/// the behavior of the legacy log histogram implementation, use `builder.precision_exact(0)`.
///
/// The default value is 25% (2^-2)
///
/// The highest supported precision is `0.0977%` `(2^-10)`. Provided values
/// less than this will be truncated.
///
/// # Panics
/// - `max_error` <= 0
/// - `max_error` >= 1
pub fn max_error(mut self, max_error: f64) -> Self {
assert!(max_error > 0.0, "max_error must be greater than 0");
assert!(max_error < 1.0, "max_error must be less than 1");
let mut p = 2;
while 2_f64.powf(-(p as f64)) > max_error && p <= MAX_PRECISION {
p += 1;
}
self.precision = Some(p);
self
}
/// Sets the precision of this histogram directly.
///
/// The precision (meaning: the ratio `n/bucket_range(n)` for some given `n`) will be `2^-p`.
///
/// Precision controls the number consecutive buckets with identically sized ranges.
/// When `p` is 0, each bucket will be twice the size of the previous bucket (bucket groups are
/// only a single bucket wide).
///
/// To match the behavior of the legacy implementation ([`HistogramScale::Log`]), use `builder.precision_exact(0)`.
///
/// # Panics
/// - `p` > 10
///
/// [`HistogramScale::Log`]: [crate::runtime::HistogramScale]
pub fn precision_exact(mut self, p: u32) -> Self {
assert!(p <= MAX_PRECISION, "precision must be <= {MAX_PRECISION}");
self.precision = Some(p);
self
}
/// Sets the minimum duration that can be accurately stored by this histogram.
///
/// This sets the resolution. The first bucket will be no larger than
/// the provided duration. Setting this value will reduce the number of required buckets,
/// sometimes quite significantly.
pub fn min_value(mut self, duration: Duration) -> Self {
self.min_value = Some(duration);
self
}
/// Sets the maximum value that can by this histogram without truncation
///
/// Values greater than this fall in the final bucket that stretches to `u64::MAX`.
///
/// # Panics
/// The provided value is 0
pub fn max_value(mut self, duration: Duration) -> Self {
if duration.is_zero() {
panic!("max value must be greater than 0");
}
self.max_value = Some(duration);
self
}
/// Builds the log histogram, enforcing the max buckets requirement
pub fn max_buckets(
&mut self,
max_buckets: usize,
) -> Result<LogHistogram, InvalidHistogramConfiguration> {
let histogram = self.build();
if histogram.num_buckets > max_buckets {
return Err(InvalidHistogramConfiguration::TooManyBuckets {
required_bucket_count: histogram.num_buckets,
});
}
Ok(histogram)
}
/// Builds the log histogram
pub fn build(&self) -> LogHistogram {
let requested_max_value = duration_as_u64(self.max_value.unwrap_or(DEFAULT_MAX_VALUE));
let max_value = requested_max_value.next_power_of_two();
let min_value = duration_as_u64(self.min_value.unwrap_or(DEFAULT_MIN_VALUE));
let p = self.precision.unwrap_or(DEFAULT_PRECISION);
// determine the bucket offset by finding the bucket for the minimum value. We need to lower
// this by one to ensure we are at least as granular as requested.
let bucket_offset = cmp::max(bucket_index(min_value, p), 1) - 1;
// n must be at least as large as p
let n = max_value.ilog2().max(p) + 1;
LogHistogram::from_n_p(n, p, bucket_offset as usize)
.truncate_to_max_value(requested_max_value)
}
}
/// Error constructing a histogram
#[derive(Debug)]
pub enum InvalidHistogramConfiguration {
/// This histogram required more than the specified number of buckets
TooManyBuckets {
/// The number of buckets that would have been required
required_bucket_count: usize,
},
}
impl Display for InvalidHistogramConfiguration {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
InvalidHistogramConfiguration::TooManyBuckets { required_bucket_count } =>
write!(f, "The configuration for this histogram would have required {required_bucket_count} buckets")
}
}
}
impl Error for InvalidHistogramConfiguration {}
/// Compute the index for a given value + p combination
///
/// This function does NOT enforce that the value is within the number of expected buckets.
fn bucket_index(value: u64, p: u32) -> u64 {
// Algorithm described here: https://iop.systems/blog/h2-histogram/
// find the highest non-zero digit
if value == 0 {
return 0;
}
let h = 63 - value.leading_zeros();
if h <= p {
value
} else {
let w = h - p;
((w + 1) * (1_u32 << p)) as u64 + ((value - (1_u64 << h)) >> w)
}
}
#[cfg(test)]
mod test {
use super::InvalidHistogramConfiguration;
use crate::runtime::metrics::histogram::h2_histogram::LogHistogram;
use crate::runtime::metrics::histogram::HistogramType;
#[cfg(not(target_family = "wasm"))]
mod proptests {
use super::*;
use crate::runtime::metrics::batch::duration_as_u64;
use crate::runtime::metrics::histogram::h2_histogram::MAX_PRECISION;
use proptest::prelude::*;
use std::time::Duration;
fn valid_log_histogram_strategy() -> impl Strategy<Value = LogHistogram> {
(1..=50u32, 0..=MAX_PRECISION, 0..100usize).prop_map(|(n, p, bucket_offset)| {
let p = p.min(n);
let base = LogHistogram::from_n_p(n, p, 0);
LogHistogram::from_n_p(n, p, bucket_offset.min(base.num_buckets - 1))
})
}
fn log_histogram_settings() -> impl Strategy<Value = (u64, u64, u32)> {
(
duration_as_u64(Duration::from_nanos(1))..duration_as_u64(Duration::from_secs(20)),
duration_as_u64(Duration::from_secs(1))..duration_as_u64(Duration::from_secs(1000)),
0..MAX_PRECISION,
)
}
// test against a wide assortment of different histogram configurations to ensure invariants hold
proptest! {
#[test]
fn log_histogram_settings_maintain_invariants((min_value, max_value, p) in log_histogram_settings()) {
if max_value < min_value {
return Ok(())
}
let (min_value, max_value) = (Duration::from_nanos(min_value), Duration::from_nanos(max_value));
let histogram = LogHistogram::builder().min_value(min_value).max_value(max_value).precision_exact(p).build();
let first_bucket_end = Duration::from_nanos(histogram.bucket_range(0).end);
let last_bucket_start = Duration::from_nanos(histogram.bucket_range(histogram.num_buckets - 1).start);
let second_last_bucket_start = Duration::from_nanos(histogram.bucket_range(histogram.num_buckets - 2).start);
prop_assert!(
first_bucket_end <= min_value,
"first bucket {first_bucket_end:?} must be less than {min_value:?}"
);
prop_assert!(
last_bucket_start > max_value,
"last bucket start ({last_bucket_start:?} must be at least as big as `max_value` ({max_value:?})"
);
// We should have the exact right number of buckets. The second to last bucket should be strictly less than max value.
prop_assert!(
second_last_bucket_start < max_value,
"second last bucket end ({second_last_bucket_start:?} must be at least as big as `max_value` ({max_value:?})"
);
}
#[test]
fn proptest_log_histogram_invariants(histogram in valid_log_histogram_strategy()) {
// 1. Assert that the first bucket always starts at 0
let first_range = histogram.bucket_range(0);
prop_assert_eq!(first_range.start, 0, "First bucket doesn't start at 0");
// Check that bucket ranges are disjoint and contiguous
let mut prev_end = 0;
let mut prev_size = 0;
for bucket in 0..histogram.num_buckets {
let range = histogram.bucket_range(bucket);
prop_assert_eq!(range.start, prev_end, "Bucket ranges are not contiguous");
prop_assert!(range.start < range.end, "Bucket range is empty or reversed");
let size = range.end - range.start;
// 2. Assert that the sizes of the buckets are always powers of 2
if bucket > 0 && bucket < histogram.num_buckets - 1 {
prop_assert!(size.is_power_of_two(), "Bucket size is not a power of 2");
}
if bucket > 1 {
// Assert that the sizes of the buckets are monotonically increasing
// (after the first bucket, which may be smaller than the 0 bucket)
prop_assert!(size >= prev_size, "Bucket sizes are not monotonically increasing: This size {size} (previous: {prev_size}). Bucket: {bucket}");
}
// 4. Assert that the size of the buckets is always within the error bound of 2^-p
if bucket > 0 && bucket < histogram.num_buckets - 1 {
let p = histogram.p as f64;
let error_bound = 2.0_f64.powf(-p);
// the most it could be wrong is by the length of the range / 2
let relative_error = ((size as f64 - 1.0) / 2.0) / range.start as f64;
prop_assert!(
relative_error <= error_bound,
"Bucket size error exceeds bound: {:?} > {:?} ({range:?})",
relative_error,
error_bound
);
}
prev_end = range.end;
prev_size = size;
}
prop_assert_eq!(prev_end, u64::MAX, "Last bucket should end at u64::MAX");
// Check bijection between value_to_bucket and bucket_range
for bucket in 0..histogram.num_buckets {
let range = histogram.bucket_range(bucket);
for value in [range.start, range.end - 1] {
prop_assert_eq!(
histogram.value_to_bucket(value),
bucket,
"value_to_bucket is not consistent with bucket_range"
);
}
}
}
}
}
#[test]
fn bucket_ranges_are_correct() {
let p = 2;
let config = HistogramType::H2(LogHistogram {
num_buckets: 1024,
p,
bucket_offset: 0,
});
// check precise buckets. There are 2^(p+1) precise buckets
for i in 0..2_usize.pow(p + 1) {
assert_eq!(
config.value_to_bucket(i as u64),
i,
"{i} should be in bucket {i}"
);
}
let mut value = 2_usize.pow(p + 1);
let current_bucket = value;
while value < current_bucket * 2 {
assert_eq!(
config.value_to_bucket(value as u64),
current_bucket + ((value - current_bucket) / 2),
"bucket for {value}"
);
value += 1;
}
}
// test buckets against known values
#[test]
fn bucket_computation_spot_check() {
let p = 9;
let config = HistogramType::H2(LogHistogram {
num_buckets: 4096,
p,
bucket_offset: 0,
});
struct T {
v: u64,
bucket: usize,
}
let tests = [
T { v: 1, bucket: 1 },
T {
v: 1023,
bucket: 1023,
},
T {
v: 1024,
bucket: 1024,
},
T {
v: 2048,
bucket: 1536,
},
T {
v: 2052,
bucket: 1537,
},
];
for test in tests {
assert_eq!(config.value_to_bucket(test.v), test.bucket);
}
}
#[test]
fn last_bucket_goes_to_infinity() {
let conf = HistogramType::H2(LogHistogram::from_n_p(16, 3, 10));
assert_eq!(conf.bucket_range(conf.num_buckets() - 1).end, u64::MAX);
}
#[test]
fn bucket_offset() {
// skip the first 10 buckets
let conf = HistogramType::H2(LogHistogram::from_n_p(16, 3, 10));
for i in 0..10 {
assert_eq!(conf.value_to_bucket(i), 0);
}
// There are 16 1-element buckets. We skipped 10 of them. The first 2 element bucket starts
// at 16
assert_eq!(conf.value_to_bucket(10), 0);
assert_eq!(conf.value_to_bucket(16), 6);
assert_eq!(conf.value_to_bucket(17), 6);
assert_eq!(conf.bucket_range(6), 16..18);
}
#[test]
fn max_buckets_enforcement() {
let error = LogHistogram::builder()
.max_error(0.001)
.max_buckets(5)
.expect_err("this produces way more than 5 buckets");
let num_buckets = match error {
InvalidHistogramConfiguration::TooManyBuckets {
required_bucket_count,
} => required_bucket_count,
};
assert_eq!(num_buckets, 27291);
}
#[test]
fn default_configuration_size() {
let conf = LogHistogram::builder().build();
assert_eq!(conf.num_buckets, 119);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time/source.rs | tokio/src/runtime/time/source.rs | use super::MAX_SAFE_MILLIS_DURATION;
use crate::time::{Clock, Duration, Instant};
/// A structure which handles conversion from Instants to `u64` timestamps.
#[derive(Debug)]
pub(crate) struct TimeSource {
start_time: Instant,
}
impl TimeSource {
pub(crate) fn new(clock: &Clock) -> Self {
Self {
start_time: clock.now(),
}
}
pub(crate) fn deadline_to_tick(&self, t: Instant) -> u64 {
// Round up to the end of a ms
self.instant_to_tick(t + Duration::from_nanos(999_999))
}
pub(crate) fn instant_to_tick(&self, t: Instant) -> u64 {
// round up
let dur: Duration = t.saturating_duration_since(self.start_time);
let ms = dur
.as_millis()
.try_into()
.unwrap_or(MAX_SAFE_MILLIS_DURATION);
ms.min(MAX_SAFE_MILLIS_DURATION)
}
pub(crate) fn tick_to_duration(&self, t: u64) -> Duration {
Duration::from_millis(t)
}
pub(crate) fn now(&self, clock: &Clock) -> u64 {
self.instant_to_tick(clock.now())
}
#[cfg(test)]
#[allow(dead_code)]
pub(super) fn start_time(&self) -> Instant {
self.start_time
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time/mod.rs | tokio/src/runtime/time/mod.rs | // Currently, rust warns when an unsafe fn contains an unsafe {} block. However,
// in the future, this will change to the reverse. For now, suppress this
// warning and generally stick with being explicit about unsafety.
#![allow(unused_unsafe)]
#![cfg_attr(not(feature = "rt"), allow(dead_code))]
//! Time driver.
mod entry;
pub(crate) use entry::TimerEntry;
use entry::{EntryList, TimerHandle, TimerShared, MAX_SAFE_MILLIS_DURATION};
mod handle;
pub(crate) use self::handle::Handle;
mod source;
pub(crate) use source::TimeSource;
mod wheel;
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
use super::time_alt;
use crate::loom::sync::atomic::{AtomicBool, Ordering};
use crate::loom::sync::Mutex;
use crate::runtime::driver::{self, IoHandle, IoStack};
use crate::time::error::Error;
use crate::time::{Clock, Duration};
use crate::util::WakeList;
use std::fmt;
use std::{num::NonZeroU64, ptr::NonNull};
/// Time implementation that drives [`Sleep`][sleep], [`Interval`][interval], and [`Timeout`][timeout].
///
/// A `Driver` instance tracks the state necessary for managing time and
/// notifying the [`Sleep`][sleep] instances once their deadlines are reached.
///
/// It is expected that a single instance manages many individual [`Sleep`][sleep]
/// instances. The `Driver` implementation is thread-safe and, as such, is able
/// to handle callers from across threads.
///
/// After creating the `Driver` instance, the caller must repeatedly call `park`
/// or `park_timeout`. The time driver will perform no work unless `park` or
/// `park_timeout` is called repeatedly.
///
/// The driver has a resolution of one millisecond. Any unit of time that falls
/// between milliseconds are rounded up to the next millisecond.
///
/// When an instance is dropped, any outstanding [`Sleep`][sleep] instance that has not
/// elapsed will be notified with an error. At this point, calling `poll` on the
/// [`Sleep`][sleep] instance will result in panic.
///
/// # Implementation
///
/// The time driver is based on the [paper by Varghese and Lauck][paper].
///
/// A hashed timing wheel is a vector of slots, where each slot handles a time
/// slice. As time progresses, the timer walks over the slot for the current
/// instant, and processes each entry for that slot. When the timer reaches the
/// end of the wheel, it starts again at the beginning.
///
/// The implementation maintains six wheels arranged in a set of levels. As the
/// levels go up, the slots of the associated wheel represent larger intervals
/// of time. At each level, the wheel has 64 slots. Each slot covers a range of
/// time equal to the wheel at the lower level. At level zero, each slot
/// represents one millisecond of time.
///
/// The wheels are:
///
/// * Level 0: 64 x 1 millisecond slots.
/// * Level 1: 64 x 64 millisecond slots.
/// * Level 2: 64 x ~4 second slots.
/// * Level 3: 64 x ~4 minute slots.
/// * Level 4: 64 x ~4 hour slots.
/// * Level 5: 64 x ~12 day slots.
///
/// When the timer processes entries at level zero, it will notify all the
/// `Sleep` instances as their deadlines have been reached. For all higher
/// levels, all entries will be redistributed across the wheel at the next level
/// down. Eventually, as time progresses, entries with [`Sleep`][sleep] instances will
/// either be canceled (dropped) or their associated entries will reach level
/// zero and be notified.
///
/// [paper]: http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
/// [sleep]: crate::time::Sleep
/// [timeout]: crate::time::Timeout
/// [interval]: crate::time::Interval
#[derive(Debug)]
pub(crate) struct Driver {
/// Parker to delegate to.
park: IoStack,
}
enum Inner {
Traditional {
// The state is split like this so `Handle` can access `is_shutdown` without locking the mutex
state: Mutex<InnerState>,
/// True if the driver is being shutdown.
is_shutdown: AtomicBool,
// When `true`, a call to `park_timeout` should immediately return and time
// should not advance. One reason for this to be `true` is if the task
// passed to `Runtime::block_on` called `task::yield_now()`.
//
// While it may look racy, it only has any effect when the clock is paused
// and pausing the clock is restricted to a single-threaded runtime.
#[cfg(feature = "test-util")]
did_wake: AtomicBool,
},
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Alternative {
/// True if the driver is being shutdown.
is_shutdown: AtomicBool,
// When `true`, a call to `park_timeout` should immediately return and time
// should not advance. One reason for this to be `true` is if the task
// passed to `Runtime::block_on` called `task::yield_now()`.
//
// While it may look racy, it only has any effect when the clock is paused
// and pausing the clock is restricted to a single-threaded runtime.
#[cfg(feature = "test-util")]
did_wake: AtomicBool,
},
}
/// Time state shared which must be protected by a `Mutex`
struct InnerState {
/// The earliest time at which we promise to wake up without unparking.
next_wake: Option<NonZeroU64>,
/// Timer wheel.
wheel: wheel::Wheel,
}
// ===== impl Driver =====
impl Driver {
/// Creates a new `Driver` instance that uses `park` to block the current
/// thread and `time_source` to get the current time and convert to ticks.
///
/// Specifying the source of time is useful when testing.
pub(crate) fn new(park: IoStack, clock: &Clock) -> (Driver, Handle) {
let time_source = TimeSource::new(clock);
let handle = Handle {
time_source,
inner: Inner::Traditional {
state: Mutex::new(InnerState {
next_wake: None,
wheel: wheel::Wheel::new(),
}),
is_shutdown: AtomicBool::new(false),
#[cfg(feature = "test-util")]
did_wake: AtomicBool::new(false),
},
};
let driver = Driver { park };
(driver, handle)
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
pub(crate) fn new_alt(clock: &Clock) -> Handle {
let time_source = TimeSource::new(clock);
Handle {
time_source,
inner: Inner::Alternative {
is_shutdown: AtomicBool::new(false),
#[cfg(feature = "test-util")]
did_wake: AtomicBool::new(false),
},
}
}
pub(crate) fn park(&mut self, handle: &driver::Handle) {
self.park_internal(handle, None);
}
pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) {
self.park_internal(handle, Some(duration));
}
pub(crate) fn shutdown(&mut self, rt_handle: &driver::Handle) {
let handle = rt_handle.time();
if handle.is_shutdown() {
return;
}
match &handle.inner {
Inner::Traditional { is_shutdown, .. } => {
is_shutdown.store(true, Ordering::SeqCst);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Inner::Alternative { is_shutdown, .. } => {
is_shutdown.store(true, Ordering::SeqCst);
}
}
// Advance time forward to the end of time.
handle.process_at_time(u64::MAX);
self.park.shutdown(rt_handle);
}
fn park_internal(&mut self, rt_handle: &driver::Handle, limit: Option<Duration>) {
let handle = rt_handle.time();
let mut lock = handle.inner.lock();
assert!(!handle.is_shutdown());
let next_wake = lock.wheel.next_expiration_time();
lock.next_wake =
next_wake.map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap()));
drop(lock);
match next_wake {
Some(when) => {
let now = handle.time_source.now(rt_handle.clock());
// Note that we effectively round up to 1ms here - this avoids
// very short-duration microsecond-resolution sleeps that the OS
// might treat as zero-length.
let mut duration = handle
.time_source
.tick_to_duration(when.saturating_sub(now));
if duration > Duration::from_millis(0) {
if let Some(limit) = limit {
duration = std::cmp::min(limit, duration);
}
self.park_thread_timeout(rt_handle, duration);
} else {
self.park.park_timeout(rt_handle, Duration::from_secs(0));
}
}
None => {
if let Some(duration) = limit {
self.park_thread_timeout(rt_handle, duration);
} else {
self.park.park(rt_handle);
}
}
}
// Process pending timers after waking up
handle.process(rt_handle.clock());
}
cfg_test_util! {
fn park_thread_timeout(&mut self, rt_handle: &driver::Handle, duration: Duration) {
let handle = rt_handle.time();
let clock = rt_handle.clock();
if clock.can_auto_advance() {
self.park.park_timeout(rt_handle, Duration::from_secs(0));
// If the time driver was woken, then the park completed
// before the "duration" elapsed (usually caused by a
// yield in `Runtime::block_on`). In this case, we don't
// advance the clock.
if !handle.did_wake() {
// Simulate advancing time
if let Err(msg) = clock.advance(duration) {
panic!("{}", msg);
}
}
} else {
self.park.park_timeout(rt_handle, duration);
}
}
}
cfg_not_test_util! {
fn park_thread_timeout(&mut self, rt_handle: &driver::Handle, duration: Duration) {
self.park.park_timeout(rt_handle, duration);
}
}
}
impl Handle {
pub(self) fn process(&self, clock: &Clock) {
let now = self.time_source().now(clock);
self.process_at_time(now);
}
pub(self) fn process_at_time(&self, mut now: u64) {
let mut waker_list = WakeList::new();
let mut lock = self.inner.lock();
if now < lock.wheel.elapsed() {
// Time went backwards! This normally shouldn't happen as the Rust language
// guarantees that an Instant is monotonic, but can happen when running
// Linux in a VM on a Windows host due to std incorrectly trusting the
// hardware clock to be monotonic.
//
// See <https://github.com/tokio-rs/tokio/issues/3619> for more information.
now = lock.wheel.elapsed();
}
while let Some(entry) = lock.wheel.poll(now) {
debug_assert!(unsafe { entry.is_pending() });
// SAFETY: We hold the driver lock, and just removed the entry from any linked lists.
if let Some(waker) = unsafe { entry.fire(Ok(())) } {
waker_list.push(waker);
if !waker_list.can_push() {
// Wake a batch of wakers. To avoid deadlock, we must do this with the lock temporarily dropped.
drop(lock);
waker_list.wake_all();
lock = self.inner.lock();
}
}
}
lock.next_wake = lock
.wheel
.poll_at()
.map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap()));
drop(lock);
waker_list.wake_all();
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
pub(crate) fn process_at_time_alt(
&self,
wheel: &mut time_alt::Wheel,
mut now: u64,
wake_queue: &mut time_alt::WakeQueue,
) {
if now < wheel.elapsed() {
// Time went backwards! This normally shouldn't happen as the Rust language
// guarantees that an Instant is monotonic, but can happen when running
// Linux in a VM on a Windows host due to std incorrectly trusting the
// hardware clock to be monotonic.
//
// See <https://github.com/tokio-rs/tokio/issues/3619> for more information.
now = wheel.elapsed();
}
wheel.take_expired(now, wake_queue);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
pub(crate) fn shutdown_alt(&self, wheel: &mut time_alt::Wheel) {
// self.is_shutdown.store(true, Ordering::SeqCst);
// Advance time forward to the end of time.
// This will ensure that all timers are fired.
let max_tick = u64::MAX;
let mut wake_queue = time_alt::WakeQueue::new();
self.process_at_time_alt(wheel, max_tick, &mut wake_queue);
wake_queue.wake_all();
}
/// Removes a registered timer from the driver.
///
/// The timer will be moved to the cancelled state. Wakers will _not_ be
/// invoked. If the timer is already completed, this function is a no-op.
///
/// This function always acquires the driver lock, even if the entry does
/// not appear to be registered.
///
/// SAFETY: The timer must not be registered with some other driver, and
/// `add_entry` must not be called concurrently.
pub(self) unsafe fn clear_entry(&self, entry: NonNull<TimerShared>) {
unsafe {
let mut lock = self.inner.lock();
if entry.as_ref().might_be_registered() {
lock.wheel.remove(entry);
}
entry.as_ref().handle().fire(Ok(()));
}
}
/// Removes and re-adds an entry to the driver.
///
/// SAFETY: The timer must be either unregistered, or registered with this
/// driver. No other threads are allowed to concurrently manipulate the
/// timer at all (the current thread should hold an exclusive reference to
/// the `TimerEntry`)
pub(self) unsafe fn reregister(
&self,
unpark: &IoHandle,
new_tick: u64,
entry: NonNull<TimerShared>,
) {
let waker = unsafe {
let mut lock = self.inner.lock();
// We may have raced with a firing/deregistration, so check before
// deregistering.
if unsafe { entry.as_ref().might_be_registered() } {
lock.wheel.remove(entry);
}
// Now that we have exclusive control of this entry, mint a handle to reinsert it.
let entry = entry.as_ref().handle();
if self.is_shutdown() {
unsafe { entry.fire(Err(crate::time::error::Error::shutdown())) }
} else {
entry.set_expiration(new_tick);
// Note: We don't have to worry about racing with some other resetting
// thread, because add_entry and reregister require exclusive control of
// the timer entry.
match unsafe { lock.wheel.insert(entry) } {
Ok(when) => {
if lock
.next_wake
.map(|next_wake| when < next_wake.get())
.unwrap_or(true)
{
unpark.unpark();
}
None
}
Err((entry, crate::time::error::InsertError::Elapsed)) => unsafe {
entry.fire(Ok(()))
},
}
}
// Must release lock before invoking waker to avoid the risk of deadlock.
};
// The timer was fired synchronously as a result of the reregistration.
// Wake the waker; this is needed because we might reset _after_ a poll,
// and otherwise the task won't be awoken to poll again.
if let Some(waker) = waker {
waker.wake();
}
}
cfg_test_util! {
pub(super) fn did_wake(&self) -> bool {
match &self.inner {
Inner::Traditional { did_wake, .. } => did_wake.swap(false, Ordering::SeqCst),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Inner::Alternative { did_wake, .. } => did_wake.swap(false, Ordering::SeqCst),
}
}
}
}
// ===== impl Inner =====
impl Inner {
/// Locks the driver's inner structure
pub(super) fn lock(&self) -> crate::loom::sync::MutexGuard<'_, InnerState> {
match self {
Inner::Traditional { state, .. } => state.lock(),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Inner::Alternative { .. } => unreachable!("unreachable in alternative timer"),
}
}
// Check whether the driver has been shutdown
pub(super) fn is_shutdown(&self) -> bool {
match self {
Inner::Traditional { is_shutdown, .. } => is_shutdown.load(Ordering::SeqCst),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
Inner::Alternative { is_shutdown, .. } => is_shutdown.load(Ordering::SeqCst),
}
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Inner").finish()
}
}
#[cfg(test)]
mod tests;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time/handle.rs | tokio/src/runtime/time/handle.rs | use crate::runtime::time::TimeSource;
use std::fmt;
/// Handle to time driver instance.
pub(crate) struct Handle {
pub(super) time_source: TimeSource,
pub(super) inner: super::Inner,
}
impl Handle {
/// Returns the time source associated with this handle.
pub(crate) fn time_source(&self) -> &TimeSource {
&self.time_source
}
/// Checks whether the driver has been shutdown.
pub(super) fn is_shutdown(&self) -> bool {
self.inner.is_shutdown()
}
/// Track that the driver is being unparked
pub(crate) fn unpark(&self) {
#[cfg(feature = "test-util")]
match self.inner {
super::Inner::Traditional { ref did_wake, .. } => {
did_wake.store(true, std::sync::atomic::Ordering::SeqCst);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
super::Inner::Alternative { ref did_wake, .. } => {
did_wake.store(true, std::sync::atomic::Ordering::SeqCst);
}
}
}
}
cfg_not_rt! {
impl Handle {
/// Tries to get a handle to the current timer.
///
/// # Panics
///
/// This function panics if there is no current timer set.
///
/// It can be triggered when [`Builder::enable_time`] or
/// [`Builder::enable_all`] are not included in the builder.
///
/// It can also panic whenever a timer is created outside of a
/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic,
/// since the function is executed outside of the runtime.
/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic.
/// And this is because wrapping the function on an async makes it lazy,
/// and so gets executed inside the runtime successfully without
/// panicking.
///
/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time
/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all
#[track_caller]
pub(crate) fn current() -> Self {
panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR)
}
}
}
impl fmt::Debug for Handle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Handle")
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time/entry.rs | tokio/src/runtime/time/entry.rs | //! Timer state structures.
//!
//! This module contains the heart of the intrusive timer implementation, and as
//! such the structures inside are full of tricky concurrency and unsafe code.
//!
//! # Ground rules
//!
//! The heart of the timer implementation here is the [`TimerShared`] structure,
//! shared between the [`TimerEntry`] and the driver. Generally, we permit access
//! to [`TimerShared`] ONLY via either 1) a mutable reference to [`TimerEntry`] or
//! 2) a held driver lock.
//!
//! It follows from this that any changes made while holding BOTH 1 and 2 will
//! be reliably visible, regardless of ordering. This is because of the `acq/rel`
//! fences on the driver lock ensuring ordering with 2, and rust mutable
//! reference rules for 1 (a mutable reference to an object can't be passed
//! between threads without an `acq/rel` barrier, and same-thread we have local
//! happens-before ordering).
//!
//! # State field
//!
//! Each timer has a state field associated with it. This field contains either
//! the current scheduled time, or a special flag value indicating its state.
//! This state can either indicate that the timer is on the 'pending' queue (and
//! thus will be fired with an `Ok(())` result soon) or that it has already been
//! fired/deregistered.
//!
//! This single state field allows for code that is firing the timer to
//! synchronize with any racing `reset` calls reliably.
//!
//! # Registered vs true timeouts
//!
//! To allow for the use case of a timeout that is periodically reset before
//! expiration to be as lightweight as possible, we support optimistically
//! lock-free timer resets, in the case where a timer is rescheduled to a later
//! point than it was originally scheduled for.
//!
//! This is accomplished by lazily rescheduling timers. That is, we update the
//! state field with the true expiration of the timer from the holder of
//! the [`TimerEntry`]. When the driver services timers (ie, whenever it's
//! walking lists of timers), it checks this "true when" value, and reschedules
//! based on it.
//!
//! We do, however, also need to track what the expiration time was when we
//! originally registered the timer; this is used to locate the right linked
//! list when the timer is being cancelled.
//! This is referred to as the `registered_when` internally.
//!
//! There is of course a race condition between timer reset and timer
//! expiration. If the driver fails to observe the updated expiration time, it
//! could trigger expiration of the timer too early. However, because
//! [`mark_pending`][mark_pending] performs a compare-and-swap, it will identify this race and
//! refuse to mark the timer as pending.
//!
//! [mark_pending]: TimerHandle::mark_pending
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::AtomicU64;
use crate::loom::sync::atomic::Ordering;
use crate::runtime::scheduler;
use crate::sync::AtomicWaker;
use crate::time::Instant;
use crate::util::linked_list;
use pin_project_lite::pin_project;
use std::task::{Context, Poll, Waker};
use std::{marker::PhantomPinned, pin::Pin, ptr::NonNull};
type TimerResult = Result<(), crate::time::error::Error>;
pub(in crate::runtime::time) const STATE_DEREGISTERED: u64 = u64::MAX;
const STATE_PENDING_FIRE: u64 = STATE_DEREGISTERED - 1;
const STATE_MIN_VALUE: u64 = STATE_PENDING_FIRE;
/// The largest safe integer to use for ticks.
///
/// This value should be updated if any other signal values are added above.
pub(super) const MAX_SAFE_MILLIS_DURATION: u64 = STATE_MIN_VALUE - 1;
/// This structure holds the current shared state of the timer - its scheduled
/// time (if registered), or otherwise the result of the timer completing, as
/// well as the registered waker.
///
/// Generally, the `StateCell` is only permitted to be accessed from two contexts:
/// Either a thread holding the corresponding `&mut TimerEntry`, or a thread
/// holding the timer driver lock. The write actions on the `StateCell` amount to
/// passing "ownership" of the `StateCell` between these contexts; moving a timer
/// from the `TimerEntry` to the driver requires _both_ holding the `&mut
/// TimerEntry` and the driver lock, while moving it back (firing the timer)
/// requires only the driver lock.
pub(super) struct StateCell {
/// Holds either the scheduled expiration time for this timer, or (if the
/// timer has been fired and is unregistered), `u64::MAX`.
state: AtomicU64,
/// If the timer is fired (an Acquire order read on state shows
/// `u64::MAX`), holds the result that should be returned from
/// polling the timer. Otherwise, the contents are unspecified and reading
/// without holding the driver lock is undefined behavior.
result: UnsafeCell<TimerResult>,
/// The currently-registered waker
waker: AtomicWaker,
}
impl Default for StateCell {
fn default() -> Self {
Self::new()
}
}
impl std::fmt::Debug for StateCell {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "StateCell({:?})", self.read_state())
}
}
impl StateCell {
fn new() -> Self {
Self {
state: AtomicU64::new(STATE_DEREGISTERED),
result: UnsafeCell::new(Ok(())),
waker: AtomicWaker::new(),
}
}
fn is_pending(&self) -> bool {
self.state.load(Ordering::Relaxed) == STATE_PENDING_FIRE
}
/// Returns the current expiration time, or None if not currently scheduled.
fn when(&self) -> Option<u64> {
let cur_state = self.state.load(Ordering::Relaxed);
if cur_state == STATE_DEREGISTERED {
None
} else {
Some(cur_state)
}
}
/// If the timer is completed, returns the result of the timer. Otherwise,
/// returns None and registers the waker.
fn poll(&self, waker: &Waker) -> Poll<TimerResult> {
// We must register first. This ensures that either `fire` will
// observe the new waker, or we will observe a racing fire to have set
// the state, or both.
self.waker.register_by_ref(waker);
self.read_state()
}
fn read_state(&self) -> Poll<TimerResult> {
let cur_state = self.state.load(Ordering::Acquire);
if cur_state == STATE_DEREGISTERED {
// SAFETY: The driver has fired this timer; this involves writing
// the result, and then writing (with release ordering) the state
// field.
Poll::Ready(unsafe { self.result.with(|p| *p) })
} else {
Poll::Pending
}
}
/// Marks this timer as being moved to the pending list, if its scheduled
/// time is not after `not_after`.
///
/// If the timer is scheduled for a time after `not_after`, returns an Err
/// containing the current scheduled time.
///
/// SAFETY: Must hold the driver lock.
unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> {
// Quick initial debug check to see if the timer is already fired. Since
// firing the timer can only happen with the driver lock held, we know
// we shouldn't be able to "miss" a transition to a fired state, even
// with relaxed ordering.
let mut cur_state = self.state.load(Ordering::Relaxed);
loop {
// improve the error message for things like
// https://github.com/tokio-rs/tokio/issues/3675
assert!(
cur_state < STATE_MIN_VALUE,
"mark_pending called when the timer entry is in an invalid state"
);
if cur_state > not_after {
break Err(cur_state);
}
match self.state.compare_exchange_weak(
cur_state,
STATE_PENDING_FIRE,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break Ok(()),
Err(actual_state) => cur_state = actual_state,
}
}
}
/// Fires the timer, setting the result to the provided result.
///
/// Returns:
/// * `Some(waker)` - if fired and a waker needs to be invoked once the
/// driver lock is released
/// * `None` - if fired and a waker does not need to be invoked, or if
/// already fired
///
/// SAFETY: The driver lock must be held.
unsafe fn fire(&self, result: TimerResult) -> Option<Waker> {
// Quick initial check to see if the timer is already fired. Since
// firing the timer can only happen with the driver lock held, we know
// we shouldn't be able to "miss" a transition to a fired state, even
// with relaxed ordering.
let cur_state = self.state.load(Ordering::Relaxed);
if cur_state == STATE_DEREGISTERED {
return None;
}
// SAFETY: We assume the driver lock is held and the timer is not
// fired, so only the driver is accessing this field.
//
// We perform a release-ordered store to state below, to ensure this
// write is visible before the state update is visible.
unsafe { self.result.with_mut(|p| *p = result) };
self.state.store(STATE_DEREGISTERED, Ordering::Release);
self.waker.take_waker()
}
/// Marks the timer as registered (poll will return None) and sets the
/// expiration time.
///
/// While this function is memory-safe, it should only be called from a
/// context holding both `&mut TimerEntry` and the driver lock.
fn set_expiration(&self, timestamp: u64) {
debug_assert!(timestamp < STATE_MIN_VALUE);
// We can use relaxed ordering because we hold the driver lock and will
// fence when we release the lock.
self.state.store(timestamp, Ordering::Relaxed);
}
/// Attempts to adjust the timer to a new timestamp.
///
/// If the timer has already been fired, is pending firing, or the new
/// timestamp is earlier than the old timestamp, (or occasionally
/// spuriously) returns Err without changing the timer's state. In this
/// case, the timer must be deregistered and re-registered.
fn extend_expiration(&self, new_timestamp: u64) -> Result<(), ()> {
let mut prior = self.state.load(Ordering::Relaxed);
loop {
if new_timestamp < prior || prior >= STATE_MIN_VALUE {
return Err(());
}
match self.state.compare_exchange_weak(
prior,
new_timestamp,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return Ok(()),
Err(true_prior) => prior = true_prior,
}
}
}
/// Returns true if the state of this timer indicates that the timer might
/// be registered with the driver. This check is performed with relaxed
/// ordering, but is conservative - if it returns false, the timer is
/// definitely _not_ registered.
pub(super) fn might_be_registered(&self) -> bool {
self.state.load(Ordering::Relaxed) != STATE_DEREGISTERED
}
}
pin_project! {
// A timer entry.
//
// This is the handle to a timer that is controlled by the requester of the
// timer. As this participates in intrusive data structures, it must be pinned
// before polling.
#[derive(Debug)]
pub(crate) struct TimerEntry {
// Arc reference to the runtime handle. We can only free the driver after
// deregistering everything from their respective timer wheels.
driver: scheduler::Handle,
// Shared inner structure; this is part of an intrusive linked list, and
// therefore other references can exist to it while mutable references to
// Entry exist.
//
// This is manipulated only under the inner mutex.
#[pin]
inner: Option<TimerShared>,
// Deadline for the timer. This is used to register on the first
// poll, as we can't register prior to being pinned.
deadline: Instant,
// Whether the deadline has been registered.
registered: bool,
}
impl PinnedDrop for TimerEntry {
fn drop(this: Pin<&mut Self>) {
this.cancel();
}
}
}
unsafe impl Send for TimerEntry {}
unsafe impl Sync for TimerEntry {}
/// An `TimerHandle` is the (non-enforced) "unique" pointer from the driver to the
/// timer entry. Generally, at most one `TimerHandle` exists for a timer at a time
/// (enforced by the timer state machine).
///
/// SAFETY: An `TimerHandle` is essentially a raw pointer, and the usual caveats
/// of pointer safety apply. In particular, `TimerHandle` does not itself enforce
/// that the timer does still exist; however, normally an `TimerHandle` is created
/// immediately before registering the timer, and is consumed when firing the
/// timer, to help minimize mistakes. Still, because `TimerHandle` cannot enforce
/// memory safety, all operations are unsafe.
#[derive(Debug)]
pub(crate) struct TimerHandle {
inner: NonNull<TimerShared>,
}
pub(super) type EntryList = crate::util::linked_list::LinkedList<TimerShared, TimerShared>;
/// The shared state structure of a timer. This structure is shared between the
/// frontend (`Entry`) and driver backend.
///
/// Note that this structure is located inside the `TimerEntry` structure.
pub(crate) struct TimerShared {
/// A link within the doubly-linked list of timers on a particular level and
/// slot. Valid only if state is equal to Registered.
///
/// Only accessed under the entry lock.
pointers: linked_list::Pointers<TimerShared>,
/// The time when the [`TimerEntry`] was registered into the Wheel,
/// [`STATE_DEREGISTERED`] means it is not registered.
///
/// Generally owned by the driver, but is accessed by the entry when not
/// registered.
///
/// We use relaxed ordering for both loading and storing since this value
/// is only accessed either when holding the driver lock or through mutable
/// references to [`TimerEntry`].
registered_when: AtomicU64,
/// Current state. This records whether the timer entry is currently under
/// the ownership of the driver, and if not, its current state (not
/// complete, fired, error, etc).
state: StateCell,
_p: PhantomPinned,
}
unsafe impl Send for TimerShared {}
unsafe impl Sync for TimerShared {}
impl std::fmt::Debug for TimerShared {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TimerShared")
.field(
"registered_when",
&self.registered_when.load(Ordering::Relaxed),
)
.field("state", &self.state)
.finish()
}
}
generate_addr_of_methods! {
impl<> TimerShared {
unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<TimerShared>> {
&self.pointers
}
}
}
impl TimerShared {
pub(super) fn new() -> Self {
Self {
registered_when: AtomicU64::new(0),
pointers: linked_list::Pointers::new(),
state: StateCell::default(),
_p: PhantomPinned,
}
}
/// Gets the cached time-of-expiration value.
pub(super) fn registered_when(&self) -> u64 {
// Cached-when is only accessed under the driver lock, so we can use relaxed
self.registered_when.load(Ordering::Relaxed)
}
/// Gets the true time-of-expiration value, and copies it into the cached
/// time-of-expiration value.
///
/// SAFETY: Must be called with the driver lock held, and when this entry is
/// not in any timer wheel lists.
pub(super) unsafe fn sync_when(&self) -> u64 {
let true_when = self.true_when();
self.registered_when.store(true_when, Ordering::Relaxed);
true_when
}
/// Sets the cached time-of-expiration value.
///
/// SAFETY: Must be called with the driver lock held, and when this entry is
/// not in any timer wheel lists.
unsafe fn set_registered_when(&self, when: u64) {
self.registered_when.store(when, Ordering::Relaxed);
}
/// Returns the true time-of-expiration value, with relaxed memory ordering.
pub(super) fn true_when(&self) -> u64 {
self.state.when().expect("Timer already fired")
}
/// Sets the true time-of-expiration value, even if it is less than the
/// current expiration or the timer is deregistered.
///
/// SAFETY: Must only be called with the driver lock held and the entry not
/// in the timer wheel.
pub(super) unsafe fn set_expiration(&self, t: u64) {
self.state.set_expiration(t);
self.registered_when.store(t, Ordering::Relaxed);
}
/// Sets the true time-of-expiration only if it is after the current.
pub(super) fn extend_expiration(&self, t: u64) -> Result<(), ()> {
self.state.extend_expiration(t)
}
/// Returns a `TimerHandle` for this timer.
pub(super) fn handle(&self) -> TimerHandle {
TimerHandle {
inner: NonNull::from(self),
}
}
/// Returns true if the state of this timer indicates that the timer might
/// be registered with the driver. This check is performed with relaxed
/// ordering, but is conservative - if it returns false, the timer is
/// definitely _not_ registered.
pub(super) fn might_be_registered(&self) -> bool {
self.state.might_be_registered()
}
}
unsafe impl linked_list::Link for TimerShared {
type Handle = TimerHandle;
type Target = TimerShared;
fn as_raw(handle: &Self::Handle) -> NonNull<Self::Target> {
handle.inner
}
unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle {
TimerHandle { inner: ptr }
}
unsafe fn pointers(
target: NonNull<Self::Target>,
) -> NonNull<linked_list::Pointers<Self::Target>> {
unsafe { TimerShared::addr_of_pointers(target) }
}
}
// ===== impl Entry =====
impl TimerEntry {
#[track_caller]
pub(crate) fn new(handle: scheduler::Handle, deadline: Instant) -> Self {
// Panic if the time driver is not enabled
let _ = handle.driver().time();
Self {
driver: handle,
inner: None,
deadline,
registered: false,
}
}
fn inner(&self) -> Option<&TimerShared> {
self.inner.as_ref()
}
fn init_inner(self: Pin<&mut Self>) {
match self.inner {
Some(_) => {}
None => self.project().inner.set(Some(TimerShared::new())),
}
}
pub(crate) fn deadline(&self) -> Instant {
self.deadline
}
pub(crate) fn is_elapsed(&self) -> bool {
let Some(inner) = self.inner() else {
return false;
};
// Is this timer still in the timer wheel?
let deregistered = !inner.might_be_registered();
// Once the timer has expired,
// it will be taken out of the wheel and be fired.
//
// So if we have already registered the timer into the wheel,
// but now it is not in the wheel, it means that it has been
// fired.
//
// +--------------+-----------------+----------+
// | deregistered | self.registered | output |
// +--------------+-----------------+----------+
// | true | false | false | <- never been registered
// +--------------+-----------------+----------+
// | false | false | false | <- never been registered
// +--------------+-----------------+----------+
// | true | true | true | <- registered into the wheel,
// | | | | and then taken out of the wheel.
// +--------------+-----------------+----------+
// | false | true | false | <- still registered in the wheel
// +--------------+-----------------+----------+
deregistered && self.registered
}
/// Cancels and deregisters the timer. This operation is irreversible.
pub(crate) fn cancel(self: Pin<&mut Self>) {
// Avoid calling the `clear_entry` method, because it has not been initialized yet.
let Some(inner) = self.inner() else {
return;
};
// We need to perform an acq/rel fence with the driver thread, and the
// simplest way to do so is to grab the driver lock.
//
// Why is this necessary? We're about to release this timer's memory for
// some other non-timer use. However, we've been doing a bunch of
// relaxed (or even non-atomic) writes from the driver thread, and we'll
// be doing more from _this thread_ (as this memory is interpreted as
// something else).
//
// It is critical to ensure that, from the point of view of the driver,
// those future non-timer writes happen-after the timer is fully fired,
// and from the purpose of this thread, the driver's writes all
// happen-before we drop the timer. This in turn requires us to perform
// an acquire-release barrier in _both_ directions between the driver
// and dropping thread.
//
// The lock acquisition in clear_entry serves this purpose. All of the
// driver manipulations happen with the lock held, so we can just take
// the lock and be sure that this drop happens-after everything the
// driver did so far and happens-before everything the driver does in
// the future. While we have the lock held, we also go ahead and
// deregister the entry if necessary.
unsafe { self.driver().clear_entry(NonNull::from(inner)) };
}
pub(crate) fn reset(mut self: Pin<&mut Self>, new_time: Instant, reregister: bool) {
let this = self.as_mut().project();
*this.deadline = new_time;
*this.registered = reregister;
let tick = self.driver().time_source().deadline_to_tick(new_time);
let inner = match self.inner() {
Some(inner) => inner,
None => {
self.as_mut().init_inner();
self.inner()
.expect("inner should already be initialized by `this.init_inner()`")
}
};
if inner.extend_expiration(tick).is_ok() {
return;
}
if reregister {
unsafe {
self.driver()
.reregister(&self.driver.driver().io, tick, inner.into());
}
}
}
pub(crate) fn poll_elapsed(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), super::Error>> {
assert!(
!self.driver().is_shutdown(),
"{}",
crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR
);
if !self.registered {
let deadline = self.deadline;
self.as_mut().reset(deadline, true);
}
let inner = self
.inner()
.expect("inner should already be initialized by `self.reset()`");
inner.state.poll(cx.waker())
}
pub(crate) fn driver(&self) -> &super::Handle {
self.driver.driver().time()
}
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) fn clock(&self) -> &super::Clock {
self.driver.driver().clock()
}
}
impl TimerHandle {
pub(super) unsafe fn registered_when(&self) -> u64 {
unsafe { self.inner.as_ref().registered_when() }
}
pub(super) unsafe fn sync_when(&self) -> u64 {
unsafe { self.inner.as_ref().sync_when() }
}
pub(super) unsafe fn is_pending(&self) -> bool {
unsafe { self.inner.as_ref().state.is_pending() }
}
/// Forcibly sets the true and cached expiration times to the given tick.
///
/// SAFETY: The caller must ensure that the handle remains valid, the driver
/// lock is held, and that the timer is not in any wheel linked lists.
pub(super) unsafe fn set_expiration(&self, tick: u64) {
unsafe {
self.inner.as_ref().set_expiration(tick);
}
}
/// Attempts to mark this entry as pending. If the expiration time is after
/// `not_after`, however, returns an Err with the current expiration time.
///
/// If an `Err` is returned, the `registered_when` value will be updated to this
/// new expiration time.
///
/// SAFETY: The caller must ensure that the handle remains valid, the driver
/// lock is held, and that the timer is not in any wheel linked lists.
/// After returning Ok, the entry must be added to the pending list.
pub(super) unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> {
match unsafe { self.inner.as_ref().state.mark_pending(not_after) } {
Ok(()) => {
// mark this as being on the pending queue in registered_when
unsafe {
self.inner.as_ref().set_registered_when(STATE_DEREGISTERED);
}
Ok(())
}
Err(tick) => {
unsafe {
self.inner.as_ref().set_registered_when(tick);
}
Err(tick)
}
}
}
/// Attempts to transition to a terminal state. If the state is already a
/// terminal state, does nothing.
///
/// Because the entry might be dropped after the state is moved to a
/// terminal state, this function consumes the handle to ensure we don't
/// access the entry afterwards.
///
/// Returns the last-registered waker, if any.
///
/// SAFETY: The driver lock must be held while invoking this function, and
/// the entry must not be in any wheel linked lists.
pub(super) unsafe fn fire(self, completed_state: TimerResult) -> Option<Waker> {
unsafe { self.inner.as_ref().state.fire(completed_state) }
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time/tests/mod.rs | tokio/src/runtime/time/tests/mod.rs | #![cfg(not(target_os = "wasi"))]
use std::{task::Context, time::Duration};
#[cfg(not(loom))]
use futures::task::noop_waker_ref;
use crate::loom::sync::atomic::{AtomicBool, Ordering};
use crate::loom::sync::Arc;
use crate::loom::thread;
use super::TimerEntry;
fn block_on<T>(f: impl std::future::Future<Output = T>) -> T {
#[cfg(loom)]
return loom::future::block_on(f);
#[cfg(not(loom))]
{
let rt = crate::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.block_on(f)
}
}
fn model(f: impl Fn() + Send + Sync + 'static) {
#[cfg(loom)]
loom::model(f);
#[cfg(not(loom))]
f();
}
fn rt(start_paused: bool) -> crate::runtime::Runtime {
crate::runtime::Builder::new_current_thread()
.enable_time()
.start_paused(start_paused)
.build()
.unwrap()
}
#[test]
fn single_timer() {
model(|| {
let rt = rt(false);
let handle = rt.handle();
let handle_ = handle.clone();
let jh = thread::spawn(move || {
let entry = TimerEntry::new(
handle_.inner.clone(),
handle_.inner.driver().clock().now() + Duration::from_secs(1),
);
pin!(entry);
block_on(std::future::poll_fn(|cx| entry.as_mut().poll_elapsed(cx))).unwrap();
});
thread::yield_now();
let time = handle.inner.driver().time();
let clock = handle.inner.driver().clock();
// advance 2s
time.process_at_time(time.time_source().now(clock) + 2_000_000_000);
jh.join().unwrap();
})
}
#[test]
fn drop_timer() {
model(|| {
let rt = rt(false);
let handle = rt.handle();
let handle_ = handle.clone();
let jh = thread::spawn(move || {
let entry = TimerEntry::new(
handle_.inner.clone(),
handle_.inner.driver().clock().now() + Duration::from_secs(1),
);
pin!(entry);
let _ = entry
.as_mut()
.poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref()));
let _ = entry
.as_mut()
.poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref()));
});
thread::yield_now();
let time = handle.inner.driver().time();
let clock = handle.inner.driver().clock();
// advance 2s in the future.
time.process_at_time(time.time_source().now(clock) + 2_000_000_000);
jh.join().unwrap();
})
}
#[test]
fn change_waker() {
model(|| {
let rt = rt(false);
let handle = rt.handle();
let handle_ = handle.clone();
let jh = thread::spawn(move || {
let entry = TimerEntry::new(
handle_.inner.clone(),
handle_.inner.driver().clock().now() + Duration::from_secs(1),
);
pin!(entry);
let _ = entry
.as_mut()
.poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref()));
block_on(std::future::poll_fn(|cx| entry.as_mut().poll_elapsed(cx))).unwrap();
});
thread::yield_now();
let time = handle.inner.driver().time();
let clock = handle.inner.driver().clock();
// advance 2s
time.process_at_time(time.time_source().now(clock) + 2_000_000_000);
jh.join().unwrap();
})
}
#[test]
fn reset_future() {
model(|| {
let finished_early = Arc::new(AtomicBool::new(false));
let rt = rt(false);
let handle = rt.handle();
let handle_ = handle.clone();
let finished_early_ = finished_early.clone();
let start = handle.inner.driver().clock().now();
let jh = thread::spawn(move || {
let entry = TimerEntry::new(handle_.inner.clone(), start + Duration::from_secs(1));
pin!(entry);
let _ = entry
.as_mut()
.poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref()));
entry.as_mut().reset(start + Duration::from_secs(2), true);
// shouldn't complete before 2s
block_on(std::future::poll_fn(|cx| entry.as_mut().poll_elapsed(cx))).unwrap();
finished_early_.store(true, Ordering::Relaxed);
});
thread::yield_now();
let handle = handle.inner.driver().time();
handle.process_at_time(
handle
.time_source()
.instant_to_tick(start + Duration::from_millis(1500)),
);
assert!(!finished_early.load(Ordering::Relaxed));
handle.process_at_time(
handle
.time_source()
.instant_to_tick(start + Duration::from_millis(2500)),
);
jh.join().unwrap();
assert!(finished_early.load(Ordering::Relaxed));
})
}
#[cfg(not(loom))]
fn normal_or_miri<T>(normal: T, miri: T) -> T {
if cfg!(miri) {
miri
} else {
normal
}
}
#[test]
#[cfg(not(loom))]
fn poll_process_levels() {
let rt = rt(true);
let handle = rt.handle();
let mut entries = vec![];
for i in 0..normal_or_miri(1024, 64) {
let mut entry = Box::pin(TimerEntry::new(
handle.inner.clone(),
handle.inner.driver().clock().now() + Duration::from_millis(i),
));
let _ = entry
.as_mut()
.poll_elapsed(&mut Context::from_waker(noop_waker_ref()));
entries.push(entry);
}
for t in 1..normal_or_miri(1024, 64) {
handle.inner.driver().time().process_at_time(t as u64);
for (deadline, future) in entries.iter_mut().enumerate() {
let mut context = Context::from_waker(noop_waker_ref());
if deadline <= t {
assert!(future.as_mut().poll_elapsed(&mut context).is_ready());
} else {
assert!(future.as_mut().poll_elapsed(&mut context).is_pending());
}
}
}
}
#[test]
#[cfg(not(loom))]
fn poll_process_levels_targeted() {
let mut context = Context::from_waker(noop_waker_ref());
let rt = rt(true);
let handle = rt.handle();
let e1 = TimerEntry::new(
handle.inner.clone(),
handle.inner.driver().clock().now() + Duration::from_millis(193),
);
pin!(e1);
let handle = handle.inner.driver().time();
handle.process_at_time(62);
assert!(e1.as_mut().poll_elapsed(&mut context).is_pending());
handle.process_at_time(192);
handle.process_at_time(192);
}
#[test]
#[cfg(not(loom))]
fn instant_to_tick_max() {
use crate::runtime::time::entry::MAX_SAFE_MILLIS_DURATION;
let rt = rt(true);
let handle = rt.handle().inner.driver().time();
let start_time = handle.time_source.start_time();
let long_future = start_time + std::time::Duration::from_millis(MAX_SAFE_MILLIS_DURATION + 1);
assert!(handle.time_source.instant_to_tick(long_future) <= MAX_SAFE_MILLIS_DURATION);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time/wheel/level.rs | tokio/src/runtime/time/wheel/level.rs | use crate::runtime::time::{EntryList, TimerHandle, TimerShared};
use std::{array, fmt, ptr::NonNull};
/// Wheel for a single level in the timer. This wheel contains 64 slots.
pub(crate) struct Level {
level: usize,
/// Bit field tracking which slots currently contain entries.
///
/// Using a bit field to track slots that contain entries allows avoiding a
/// scan to find entries. This field is updated when entries are added or
/// removed from a slot.
///
/// The least-significant bit represents slot zero.
occupied: u64,
/// Slots. We access these via the EntryInner `current_list` as well, so this needs to be an `UnsafeCell`.
slot: [EntryList; LEVEL_MULT],
}
/// Indicates when a slot must be processed next.
#[derive(Debug)]
pub(crate) struct Expiration {
/// The level containing the slot.
pub(crate) level: usize,
/// The slot index.
pub(crate) slot: usize,
/// The instant at which the slot needs to be processed.
pub(crate) deadline: u64,
}
/// Level multiplier.
///
/// Being a power of 2 is very important.
const LEVEL_MULT: usize = 64;
impl Level {
pub(crate) fn new(level: usize) -> Level {
Level {
level,
occupied: 0,
slot: array::from_fn(|_| EntryList::default()),
}
}
/// Finds the slot that needs to be processed next and returns the slot and
/// `Instant` at which this slot must be processed.
pub(crate) fn next_expiration(&self, now: u64) -> Option<Expiration> {
// Use the `occupied` bit field to get the index of the next slot that
// needs to be processed.
let slot = self.next_occupied_slot(now)?;
// From the slot index, calculate the `Instant` at which it needs to be
// processed. This value *must* be in the future with respect to `now`.
let level_range = level_range(self.level);
let slot_range = slot_range(self.level);
// Compute the start date of the current level by masking the low bits
// of `now` (`level_range` is a power of 2).
let level_start = now & !(level_range - 1);
let mut deadline = level_start + slot as u64 * slot_range;
if deadline <= now {
// A timer is in a slot "prior" to the current time. This can occur
// because we do not have an infinite hierarchy of timer levels, and
// eventually a timer scheduled for a very distant time might end up
// being placed in a slot that is beyond the end of all of the
// arrays.
//
// To deal with this, we first limit timers to being scheduled no
// more than MAX_DURATION ticks in the future; that is, they're at
// most one rotation of the top level away. Then, we force timers
// that logically would go into the top+1 level, to instead go into
// the top level's slots.
//
// What this means is that the top level's slots act as a
// pseudo-ring buffer, and we rotate around them indefinitely. If we
// compute a deadline before now, and it's the top level, it
// therefore means we're actually looking at a slot in the future.
debug_assert_eq!(self.level, super::NUM_LEVELS - 1);
deadline += level_range;
}
debug_assert!(
deadline >= now,
"deadline={:016X}; now={:016X}; level={}; lr={:016X}, sr={:016X}, slot={}; occupied={:b}",
deadline,
now,
self.level,
level_range,
slot_range,
slot,
self.occupied
);
Some(Expiration {
level: self.level,
slot,
deadline,
})
}
fn next_occupied_slot(&self, now: u64) -> Option<usize> {
if self.occupied == 0 {
return None;
}
// Get the slot for now using Maths
let now_slot = (now / slot_range(self.level)) as usize;
let occupied = self.occupied.rotate_right(now_slot as u32);
let zeros = occupied.trailing_zeros() as usize;
let slot = (zeros + now_slot) % LEVEL_MULT;
Some(slot)
}
pub(crate) unsafe fn add_entry(&mut self, item: TimerHandle) {
let slot = slot_for(unsafe { item.registered_when() }, self.level);
self.slot[slot].push_front(item);
self.occupied |= occupied_bit(slot);
}
pub(crate) unsafe fn remove_entry(&mut self, item: NonNull<TimerShared>) {
let slot = slot_for(unsafe { item.as_ref().registered_when() }, self.level);
unsafe { self.slot[slot].remove(item) };
if self.slot[slot].is_empty() {
// The bit is currently set
debug_assert!(self.occupied & occupied_bit(slot) != 0);
// Unset the bit
self.occupied ^= occupied_bit(slot);
}
}
pub(crate) fn take_slot(&mut self, slot: usize) -> EntryList {
self.occupied &= !occupied_bit(slot);
std::mem::take(&mut self.slot[slot])
}
}
impl fmt::Debug for Level {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Level")
.field("occupied", &self.occupied)
.finish()
}
}
fn occupied_bit(slot: usize) -> u64 {
1 << slot
}
fn slot_range(level: usize) -> u64 {
LEVEL_MULT.pow(level as u32) as u64
}
fn level_range(level: usize) -> u64 {
LEVEL_MULT as u64 * slot_range(level)
}
/// Converts a duration (milliseconds) and a level to a slot position.
fn slot_for(duration: u64, level: usize) -> usize {
((duration >> (level * 6)) % LEVEL_MULT as u64) as usize
}
#[cfg(all(test, not(loom)))]
mod test {
use super::*;
#[test]
fn test_slot_for() {
for pos in 0..64 {
assert_eq!(pos as usize, slot_for(pos, 0));
}
for level in 1..5 {
for pos in level..64 {
let a = pos * 64_usize.pow(level as u32);
assert_eq!(pos, slot_for(a as u64, level));
}
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time/wheel/mod.rs | tokio/src/runtime/time/wheel/mod.rs | use crate::runtime::time::{TimerHandle, TimerShared};
use crate::time::error::InsertError;
mod level;
pub(crate) use self::level::Expiration;
use self::level::Level;
use std::{array, ptr::NonNull};
use super::entry::STATE_DEREGISTERED;
use super::EntryList;
/// Timing wheel implementation.
///
/// This type provides the hashed timing wheel implementation that backs
/// [`Driver`].
///
/// See [`Driver`] documentation for some implementation notes.
///
/// [`Driver`]: crate::runtime::time::Driver
#[derive(Debug)]
pub(crate) struct Wheel {
/// The number of milliseconds elapsed since the wheel started.
elapsed: u64,
/// Timer wheel.
///
/// Levels:
///
/// * 1 ms slots / 64 ms range
/// * 64 ms slots / ~ 4 sec range
/// * ~ 4 sec slots / ~ 4 min range
/// * ~ 4 min slots / ~ 4 hr range
/// * ~ 4 hr slots / ~ 12 day range
/// * ~ 12 day slots / ~ 2 yr range
levels: Box<[Level; NUM_LEVELS]>,
/// Entries queued for firing
pending: EntryList,
}
/// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots
/// each, the timer is able to track time up to 2 years into the future with a
/// precision of 1 millisecond.
const NUM_LEVELS: usize = 6;
/// The maximum duration of a `Sleep`.
pub(super) const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1;
impl Wheel {
/// Creates a new timing wheel.
pub(crate) fn new() -> Wheel {
Wheel {
elapsed: 0,
levels: Box::new(array::from_fn(Level::new)),
pending: EntryList::new(),
}
}
/// Returns the number of milliseconds that have elapsed since the timing
/// wheel's creation.
pub(crate) fn elapsed(&self) -> u64 {
self.elapsed
}
/// Inserts an entry into the timing wheel.
///
/// # Arguments
///
/// * `item`: The item to insert into the wheel.
///
/// # Return
///
/// Returns `Ok` when the item is successfully inserted, `Err` otherwise.
///
/// `Err(Elapsed)` indicates that `when` represents an instant that has
/// already passed. In this case, the caller should fire the timeout
/// immediately.
///
/// `Err(Invalid)` indicates an invalid `when` argument as been supplied.
///
/// # Safety
///
/// This function registers item into an intrusive linked list. The caller
/// must ensure that `item` is pinned and will not be dropped without first
/// being deregistered.
pub(crate) unsafe fn insert(
&mut self,
item: TimerHandle,
) -> Result<u64, (TimerHandle, InsertError)> {
let when = unsafe { item.sync_when() };
if when <= self.elapsed {
return Err((item, InsertError::Elapsed));
}
// Get the level at which the entry should be stored
let level = self.level_for(when);
unsafe {
self.levels[level].add_entry(item);
}
debug_assert!({
self.levels[level]
.next_expiration(self.elapsed)
.map(|e| e.deadline >= self.elapsed)
.unwrap_or(true)
});
Ok(when)
}
/// Removes `item` from the timing wheel.
pub(crate) unsafe fn remove(&mut self, item: NonNull<TimerShared>) {
unsafe {
let when = item.as_ref().registered_when();
if when == STATE_DEREGISTERED {
self.pending.remove(item);
} else {
debug_assert!(
self.elapsed <= when,
"elapsed={}; when={}",
self.elapsed,
when
);
let level = self.level_for(when);
self.levels[level].remove_entry(item);
}
}
}
/// Instant at which to poll.
pub(crate) fn poll_at(&self) -> Option<u64> {
self.next_expiration().map(|expiration| expiration.deadline)
}
/// Advances the timer up to the instant represented by `now`.
pub(crate) fn poll(&mut self, now: u64) -> Option<TimerHandle> {
loop {
if let Some(handle) = self.pending.pop_back() {
return Some(handle);
}
match self.next_expiration() {
Some(ref expiration) if expiration.deadline <= now => {
self.process_expiration(expiration);
self.set_elapsed(expiration.deadline);
}
_ => {
// in this case the poll did not indicate an expiration
// _and_ we were not able to find a next expiration in
// the current list of timers. advance to the poll's
// current time and do nothing else.
self.set_elapsed(now);
break;
}
}
}
self.pending.pop_back()
}
/// Returns the instant at which the next timeout expires.
fn next_expiration(&self) -> Option<Expiration> {
if !self.pending.is_empty() {
// Expire immediately as we have things pending firing
return Some(Expiration {
level: 0,
slot: 0,
deadline: self.elapsed,
});
}
// Check all levels
for (level_num, level) in self.levels.iter().enumerate() {
if let Some(expiration) = level.next_expiration(self.elapsed) {
// There cannot be any expirations at a higher level that happen
// before this one.
debug_assert!(self.no_expirations_before(level_num + 1, expiration.deadline));
return Some(expiration);
}
}
None
}
/// Returns the tick at which this timer wheel next needs to perform some
/// processing, or None if there are no timers registered.
pub(super) fn next_expiration_time(&self) -> Option<u64> {
self.next_expiration().map(|ex| ex.deadline)
}
/// Used for debug assertions
fn no_expirations_before(&self, start_level: usize, before: u64) -> bool {
let mut res = true;
for level in &self.levels[start_level..] {
if let Some(e2) = level.next_expiration(self.elapsed) {
if e2.deadline < before {
res = false;
}
}
}
res
}
/// iteratively find entries that are between the wheel's current
/// time and the expiration time. for each in that population either
/// queue it for notification (in the case of the last level) or tier
/// it down to the next level (in all other cases).
pub(crate) fn process_expiration(&mut self, expiration: &Expiration) {
// Note that we need to take _all_ of the entries off the list before
// processing any of them. This is important because it's possible that
// those entries might need to be reinserted into the same slot.
//
// This happens only on the highest level, when an entry is inserted
// more than MAX_DURATION into the future. When this happens, we wrap
// around, and process some entries a multiple of MAX_DURATION before
// they actually need to be dropped down a level. We then reinsert them
// back into the same position; we must make sure we don't then process
// those entries again or we'll end up in an infinite loop.
let mut entries = self.take_entries(expiration);
while let Some(item) = entries.pop_back() {
if expiration.level == 0 {
debug_assert_eq!(unsafe { item.registered_when() }, expiration.deadline);
}
// Try to expire the entry; this is cheap (doesn't synchronize) if
// the timer is not expired, and updates registered_when.
match unsafe { item.mark_pending(expiration.deadline) } {
Ok(()) => {
// Item was expired
self.pending.push_front(item);
}
Err(expiration_tick) => {
let level = level_for(expiration.deadline, expiration_tick);
unsafe {
self.levels[level].add_entry(item);
}
}
}
}
}
fn set_elapsed(&mut self, when: u64) {
assert!(
self.elapsed <= when,
"elapsed={:?}; when={:?}",
self.elapsed,
when
);
if when > self.elapsed {
self.elapsed = when;
}
}
/// Obtains the list of entries that need processing for the given expiration.
fn take_entries(&mut self, expiration: &Expiration) -> EntryList {
self.levels[expiration.level].take_slot(expiration.slot)
}
fn level_for(&self, when: u64) -> usize {
level_for(self.elapsed, when)
}
}
fn level_for(elapsed: u64, when: u64) -> usize {
const SLOT_MASK: u64 = (1 << 6) - 1;
// Mask in the trailing bits ignored by the level calculation in order to cap
// the possible leading zeros
let mut masked = elapsed ^ when | SLOT_MASK;
if masked >= MAX_DURATION {
// Fudge the timer into the top level
masked = MAX_DURATION - 1;
}
let leading_zeros = masked.leading_zeros() as usize;
let significant = 63 - leading_zeros;
significant / NUM_LEVELS
}
#[cfg(all(test, not(loom)))]
mod test {
use super::*;
#[test]
fn test_level_for() {
for pos in 0..64 {
assert_eq!(0, level_for(0, pos), "level_for({pos}) -- binary = {pos:b}");
}
for level in 1..5 {
for pos in level..64 {
let a = pos * 64_usize.pow(level as u32);
assert_eq!(
level,
level_for(0, a as u64),
"level_for({a}) -- binary = {a:b}"
);
if pos > level {
let a = a - 1;
assert_eq!(
level,
level_for(0, a as u64),
"level_for({a}) -- binary = {a:b}"
);
}
if pos < 64 {
let a = a + 1;
assert_eq!(
level,
level_for(0, a as u64),
"level_for({a}) -- binary = {a:b}"
);
}
}
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/inject.rs | tokio/src/runtime/scheduler/inject.rs | //! Inject queue used to send wakeups to a work-stealing scheduler
use crate::loom::sync::Mutex;
use crate::runtime::task;
mod pop;
pub(crate) use pop::Pop;
mod shared;
pub(crate) use shared::Shared;
mod synced;
pub(crate) use synced::Synced;
cfg_rt_multi_thread! {
mod rt_multi_thread;
}
mod metrics;
/// Growable, MPMC queue used to inject new tasks into the scheduler and as an
/// overflow queue when the local, fixed-size, array queue overflows.
pub(crate) struct Inject<T: 'static> {
shared: Shared<T>,
synced: Mutex<Synced>,
}
impl<T: 'static> Inject<T> {
pub(crate) fn new() -> Inject<T> {
let (shared, synced) = Shared::new();
Inject {
shared,
synced: Mutex::new(synced),
}
}
// Kind of annoying to have to include the cfg here
#[cfg(feature = "taskdump")]
pub(crate) fn is_closed(&self) -> bool {
let synced = self.synced.lock();
self.shared.is_closed(&synced)
}
/// Closes the injection queue, returns `true` if the queue is open when the
/// transition is made.
pub(crate) fn close(&self) -> bool {
let mut synced = self.synced.lock();
self.shared.close(&mut synced)
}
/// Pushes a value into the queue.
///
/// This does nothing if the queue is closed.
pub(crate) fn push(&self, task: task::Notified<T>) {
let mut synced = self.synced.lock();
// safety: passing correct `Synced`
unsafe { self.shared.push(&mut synced, task) }
}
pub(crate) fn pop(&self) -> Option<task::Notified<T>> {
if self.shared.is_empty() {
return None;
}
let mut synced = self.synced.lock();
// safety: passing correct `Synced`
unsafe { self.shared.pop(&mut synced) }
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/lock.rs | tokio/src/runtime/scheduler/lock.rs | /// A lock (mutex) yielding generic data.
pub(crate) trait Lock<T> {
type Handle: AsMut<T>;
fn lock(self) -> Self::Handle;
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/block_in_place.rs | tokio/src/runtime/scheduler/block_in_place.rs | use crate::runtime::scheduler;
#[track_caller]
pub(crate) fn block_in_place<F, R>(f: F) -> R
where
F: FnOnce() -> R,
{
scheduler::multi_thread::block_in_place(f)
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/defer.rs | tokio/src/runtime/scheduler/defer.rs | use std::cell::RefCell;
use std::task::Waker;
pub(crate) struct Defer {
deferred: RefCell<Vec<Waker>>,
}
impl Defer {
pub(crate) fn new() -> Defer {
Defer {
deferred: RefCell::default(),
}
}
pub(crate) fn defer(&self, waker: &Waker) {
let mut deferred = self.deferred.borrow_mut();
// If the same task adds itself a bunch of times, then only add it once.
if let Some(last) = deferred.last() {
if last.will_wake(waker) {
return;
}
}
deferred.push(waker.clone());
}
pub(crate) fn is_empty(&self) -> bool {
self.deferred.borrow().is_empty()
}
pub(crate) fn wake(&self) {
while let Some(waker) = self.deferred.borrow_mut().pop() {
waker.wake();
}
}
#[cfg(feature = "taskdump")]
pub(crate) fn take_deferred(&self) -> Vec<Waker> {
let mut deferred = self.deferred.borrow_mut();
std::mem::take(&mut *deferred)
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/mod.rs | tokio/src/runtime/scheduler/mod.rs | cfg_rt! {
pub(crate) mod current_thread;
pub(crate) use current_thread::CurrentThread;
mod defer;
use defer::Defer;
pub(crate) mod inject;
pub(crate) use inject::Inject;
use crate::runtime::TaskHooks;
use crate::runtime::WorkerMetrics;
}
cfg_rt_multi_thread! {
mod block_in_place;
pub(crate) use block_in_place::block_in_place;
mod lock;
use lock::Lock;
pub(crate) mod multi_thread;
pub(crate) use multi_thread::MultiThread;
}
pub(super) mod util;
use crate::runtime::driver;
#[derive(Debug, Clone)]
pub(crate) enum Handle {
#[cfg(feature = "rt")]
CurrentThread(Arc<current_thread::Handle>),
#[cfg(feature = "rt-multi-thread")]
MultiThread(Arc<multi_thread::Handle>),
// TODO: This is to avoid triggering "dead code" warnings many other places
// in the codebase. Remove this during a later cleanup
#[cfg(not(feature = "rt"))]
#[allow(dead_code)]
Disabled,
}
#[cfg(feature = "rt")]
pub(super) enum Context {
CurrentThread(current_thread::Context),
#[cfg(feature = "rt-multi-thread")]
MultiThread(multi_thread::Context),
}
impl Handle {
#[cfg_attr(not(feature = "full"), allow(dead_code))]
pub(crate) fn driver(&self) -> &driver::Handle {
match *self {
#[cfg(feature = "rt")]
Handle::CurrentThread(ref h) => &h.driver,
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(ref h) => &h.driver,
#[cfg(not(feature = "rt"))]
Handle::Disabled => unreachable!(),
}
}
}
cfg_rt! {
use crate::future::Future;
use crate::loom::sync::Arc;
use crate::runtime::{blocking, task::{Id, SpawnLocation}};
use crate::runtime::context;
use crate::task::JoinHandle;
use crate::util::RngSeedGenerator;
use std::task::Waker;
macro_rules! match_flavor {
($self:expr, $ty:ident($h:ident) => $e:expr) => {
match $self {
$ty::CurrentThread($h) => $e,
#[cfg(feature = "rt-multi-thread")]
$ty::MultiThread($h) => $e,
}
}
}
impl Handle {
#[track_caller]
pub(crate) fn current() -> Handle {
match context::with_current(Clone::clone) {
Ok(handle) => handle,
Err(e) => panic!("{}", e),
}
}
pub(crate) fn blocking_spawner(&self) -> &blocking::Spawner {
match_flavor!(self, Handle(h) => &h.blocking_spawner)
}
pub(crate) fn is_local(&self) -> bool {
match self {
Handle::CurrentThread(h) => h.local_tid.is_some(),
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(_) => false,
}
}
#[cfg(feature = "time")]
pub(crate) fn timer_flavor(&self) -> crate::runtime::TimerFlavor {
match self {
Handle::CurrentThread(_) => crate::runtime::TimerFlavor::Traditional,
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(h) => h.timer_flavor,
}
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", feature = "time"))]
/// Returns true if both handles belong to the same runtime instance.
pub(crate) fn is_same_runtime(&self, other: &Handle) -> bool {
match (self, other) {
(Handle::CurrentThread(a), Handle::CurrentThread(b)) => Arc::ptr_eq(a, b),
#[cfg(feature = "rt-multi-thread")]
(Handle::MultiThread(a), Handle::MultiThread(b)) => Arc::ptr_eq(a, b),
#[cfg(feature = "rt-multi-thread")]
_ => false, // different runtime types
}
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", feature = "time"))]
/// Returns true if the runtime is shutting down.
pub(crate) fn is_shutdown(&self) -> bool {
match self {
Handle::CurrentThread(_) => panic!("the alternative timer implementation is not supported on CurrentThread runtime"),
Handle::MultiThread(h) => h.is_shutdown(),
}
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", feature = "time"))]
/// Push a timer entry that was created outside of this runtime
/// into the runtime-global queue. The pushed timer will be
/// processed by a random worker thread.
pub(crate) fn push_remote_timer(&self, entry_hdl: crate::runtime::time_alt::EntryHandle) {
match self {
Handle::CurrentThread(_) => panic!("the alternative timer implementation is not supported on CurrentThread runtime"),
Handle::MultiThread(h) => h.push_remote_timer(entry_hdl),
}
}
/// Returns true if this is a local runtime and the runtime is owned by the current thread.
pub(crate) fn can_spawn_local_on_local_runtime(&self) -> bool {
match self {
Handle::CurrentThread(h) => h.local_tid.map(|x| std::thread::current().id() == x).unwrap_or(false),
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(_) => false,
}
}
pub(crate) fn spawn<F>(&self, future: F, id: Id, spawned_at: SpawnLocation) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
match self {
Handle::CurrentThread(h) => current_thread::Handle::spawn(h, future, id, spawned_at),
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(h) => multi_thread::Handle::spawn(h, future, id, spawned_at),
}
}
/// Spawn a local task
///
/// # Safety
///
/// This should only be called in `LocalRuntime` if the runtime has been verified to be owned
/// by the current thread.
#[allow(irrefutable_let_patterns)]
#[track_caller]
pub(crate) unsafe fn spawn_local<F>(&self, future: F, id: Id, spawned_at: SpawnLocation) -> JoinHandle<F::Output>
where
F: Future + 'static,
F::Output: 'static,
{
if let Handle::CurrentThread(h) = self {
// Safety: caller guarantees that this is a `LocalRuntime`.
unsafe { current_thread::Handle::spawn_local(h, future, id, spawned_at) }
} else {
panic!("Only current_thread and LocalSet have spawn_local internals implemented")
}
}
pub(crate) fn shutdown(&self) {
match *self {
Handle::CurrentThread(_) => {},
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(ref h) => h.shutdown(),
}
}
pub(crate) fn seed_generator(&self) -> &RngSeedGenerator {
match_flavor!(self, Handle(h) => &h.seed_generator)
}
pub(crate) fn as_current_thread(&self) -> &Arc<current_thread::Handle> {
match self {
Handle::CurrentThread(handle) => handle,
#[cfg(feature = "rt-multi-thread")]
_ => panic!("not a CurrentThread handle"),
}
}
pub(crate) fn hooks(&self) -> &TaskHooks {
match self {
Handle::CurrentThread(h) => &h.task_hooks,
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(h) => &h.task_hooks,
}
}
}
impl Handle {
pub(crate) fn num_workers(&self) -> usize {
match self {
Handle::CurrentThread(_) => 1,
#[cfg(feature = "rt-multi-thread")]
Handle::MultiThread(handle) => handle.num_workers(),
}
}
pub(crate) fn num_alive_tasks(&self) -> usize {
match_flavor!(self, Handle(handle) => handle.num_alive_tasks())
}
pub(crate) fn injection_queue_depth(&self) -> usize {
match_flavor!(self, Handle(handle) => handle.injection_queue_depth())
}
pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics {
match_flavor!(self, Handle(handle) => handle.worker_metrics(worker))
}
}
cfg_unstable_metrics! {
use crate::runtime::SchedulerMetrics;
impl Handle {
cfg_64bit_metrics! {
pub(crate) fn spawned_tasks_count(&self) -> u64 {
match_flavor!(self, Handle(handle) => handle.spawned_tasks_count())
}
}
pub(crate) fn num_blocking_threads(&self) -> usize {
match_flavor!(self, Handle(handle) => handle.num_blocking_threads())
}
pub(crate) fn num_idle_blocking_threads(&self) -> usize {
match_flavor!(self, Handle(handle) => handle.num_idle_blocking_threads())
}
pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics {
match_flavor!(self, Handle(handle) => handle.scheduler_metrics())
}
pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize {
match_flavor!(self, Handle(handle) => handle.worker_local_queue_depth(worker))
}
pub(crate) fn blocking_queue_depth(&self) -> usize {
match_flavor!(self, Handle(handle) => handle.blocking_queue_depth())
}
}
}
impl Context {
#[track_caller]
pub(crate) fn expect_current_thread(&self) -> ¤t_thread::Context {
match self {
Context::CurrentThread(context) => context,
#[cfg(feature = "rt-multi-thread")]
_ => panic!("expected `CurrentThread::Context`")
}
}
pub(crate) fn defer(&self, waker: &Waker) {
match_flavor!(self, Context(context) => context.defer(waker));
}
#[cfg(all(tokio_unstable, feature = "time", feature = "rt-multi-thread"))]
pub(crate) fn with_time_temp_local_context<F, R>(&self, f: F) -> R
where
F: FnOnce(Option<crate::runtime::time_alt::TempLocalContext<'_>>) -> R,
{
match self {
Context::CurrentThread(_) => panic!("the alternative timer implementation is not supported on CurrentThread runtime"),
Context::MultiThread(context) => context.with_time_temp_local_context(f),
}
}
cfg_rt_multi_thread! {
#[track_caller]
pub(crate) fn expect_multi_thread(&self) -> &multi_thread::Context {
match self {
Context::MultiThread(context) => context,
_ => panic!("expected `MultiThread::Context`")
}
}
}
}
}
cfg_not_rt! {
#[cfg(any(
feature = "net",
all(unix, feature = "process"),
all(unix, feature = "signal"),
feature = "time",
))]
impl Handle {
#[track_caller]
pub(crate) fn current() -> Handle {
panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR)
}
#[cfg_attr(not(feature = "time"), allow(dead_code))]
#[track_caller]
pub(crate) fn timer_flavor(&self) -> crate::runtime::TimerFlavor {
panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR)
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs | tokio/src/runtime/scheduler/inject/rt_multi_thread.rs | use super::{Shared, Synced};
use crate::runtime::scheduler::Lock;
use crate::runtime::task;
use std::sync::atomic::Ordering::Release;
impl<'a> Lock<Synced> for &'a mut Synced {
type Handle = &'a mut Synced;
fn lock(self) -> Self::Handle {
self
}
}
impl AsMut<Synced> for Synced {
fn as_mut(&mut self) -> &mut Synced {
self
}
}
impl<T: 'static> Shared<T> {
/// Pushes several values into the queue.
///
/// # Safety
///
/// Must be called with the same `Synced` instance returned by `Inject::new`
#[inline]
pub(crate) unsafe fn push_batch<L, I>(&self, shared: L, mut iter: I)
where
L: Lock<Synced>,
I: Iterator<Item = task::Notified<T>>,
{
let first = match iter.next() {
Some(first) => first.into_raw(),
None => return,
};
// Link up all the tasks.
let mut prev = first;
let mut counter = 1;
// We are going to be called with an `std::iter::Chain`, and that
// iterator overrides `for_each` to something that is easier for the
// compiler to optimize than a loop.
iter.for_each(|next| {
let next = next.into_raw();
// safety: Holding the Notified for a task guarantees exclusive
// access to the `queue_next` field.
unsafe { prev.set_queue_next(Some(next)) };
prev = next;
counter += 1;
});
// Now that the tasks are linked together, insert them into the
// linked list.
//
// Safety: exactly the same safety requirements as `push_batch` method.
unsafe {
self.push_batch_inner(shared, first, prev, counter);
}
}
/// Inserts several tasks that have been linked together into the queue.
///
/// The provided head and tail may be the same task. In this case, a
/// single task is inserted.
///
/// # Safety
///
/// Must be called with the same `Synced` instance returned by `Inject::new`
#[inline]
unsafe fn push_batch_inner<L>(
&self,
shared: L,
batch_head: task::RawTask,
batch_tail: task::RawTask,
num: usize,
) where
L: Lock<Synced>,
{
debug_assert!(unsafe { batch_tail.get_queue_next().is_none() });
let mut synced = shared.lock();
if synced.as_mut().is_closed {
drop(synced);
let mut curr = Some(batch_head);
while let Some(task) = curr {
// Safety: exactly the same safety requirements as `push_batch_inner`.
curr = unsafe { task.get_queue_next() };
let _ = unsafe { task::Notified::<T>::from_raw(task) };
}
return;
}
let synced = synced.as_mut();
if let Some(tail) = synced.tail {
unsafe {
tail.set_queue_next(Some(batch_head));
}
} else {
synced.head = Some(batch_head);
}
synced.tail = Some(batch_tail);
// Increment the count.
//
// safety: All updates to the len atomic are guarded by the mutex. As
// such, a non-atomic load followed by a store is safe.
let len = unsafe { self.len.unsync_load() };
self.len.store(len + num, Release);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/inject/synced.rs | tokio/src/runtime/scheduler/inject/synced.rs | #![cfg_attr(
any(not(all(tokio_unstable, feature = "full")), target_family = "wasm"),
allow(dead_code)
)]
use crate::runtime::task;
pub(crate) struct Synced {
/// True if the queue is closed.
pub(super) is_closed: bool,
/// Linked-list head.
pub(super) head: Option<task::RawTask>,
/// Linked-list tail.
pub(super) tail: Option<task::RawTask>,
}
unsafe impl Send for Synced {}
unsafe impl Sync for Synced {}
impl Synced {
pub(super) fn pop<T: 'static>(&mut self) -> Option<task::Notified<T>> {
let task = self.head?;
self.head = unsafe { task.get_queue_next() };
if self.head.is_none() {
self.tail = None;
}
unsafe { task.set_queue_next(None) };
// safety: a `Notified` is pushed into the queue and now it is popped!
Some(unsafe { task::Notified::from_raw(task) })
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/inject/pop.rs | tokio/src/runtime/scheduler/inject/pop.rs | use super::Synced;
use crate::runtime::task;
use std::marker::PhantomData;
pub(crate) struct Pop<'a, T: 'static> {
len: usize,
synced: &'a mut Synced,
_p: PhantomData<T>,
}
impl<'a, T: 'static> Pop<'a, T> {
pub(super) fn new(len: usize, synced: &'a mut Synced) -> Pop<'a, T> {
Pop {
len,
synced,
_p: PhantomData,
}
}
}
impl<'a, T: 'static> Iterator for Pop<'a, T> {
type Item = task::Notified<T>;
fn next(&mut self) -> Option<Self::Item> {
if self.len == 0 {
return None;
}
let ret = self.synced.pop();
// Should be `Some` when `len > 0`
debug_assert!(ret.is_some());
self.len -= 1;
ret
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len, Some(self.len))
}
}
impl<'a, T: 'static> ExactSizeIterator for Pop<'a, T> {
fn len(&self) -> usize {
self.len
}
}
impl<'a, T: 'static> Drop for Pop<'a, T> {
fn drop(&mut self) {
for _ in self.by_ref() {}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/inject/metrics.rs | tokio/src/runtime/scheduler/inject/metrics.rs | use super::Inject;
impl<T: 'static> Inject<T> {
pub(crate) fn len(&self) -> usize {
self.shared.len()
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/inject/shared.rs | tokio/src/runtime/scheduler/inject/shared.rs | use super::{Pop, Synced};
use crate::loom::sync::atomic::AtomicUsize;
use crate::runtime::task;
use std::marker::PhantomData;
use std::sync::atomic::Ordering::{Acquire, Release};
pub(crate) struct Shared<T: 'static> {
/// Number of pending tasks in the queue. This helps prevent unnecessary
/// locking in the hot path.
pub(super) len: AtomicUsize,
_p: PhantomData<T>,
}
unsafe impl<T> Send for Shared<T> {}
unsafe impl<T> Sync for Shared<T> {}
impl<T: 'static> Shared<T> {
pub(crate) fn new() -> (Shared<T>, Synced) {
let inject = Shared {
len: AtomicUsize::new(0),
_p: PhantomData,
};
let synced = Synced {
is_closed: false,
head: None,
tail: None,
};
(inject, synced)
}
pub(crate) fn is_empty(&self) -> bool {
self.len() == 0
}
// Kind of annoying to have to include the cfg here
#[cfg(any(feature = "taskdump", feature = "rt-multi-thread"))]
pub(crate) fn is_closed(&self, synced: &Synced) -> bool {
synced.is_closed
}
/// Closes the injection queue, returns `true` if the queue is open when the
/// transition is made.
pub(crate) fn close(&self, synced: &mut Synced) -> bool {
if synced.is_closed {
return false;
}
synced.is_closed = true;
true
}
pub(crate) fn len(&self) -> usize {
self.len.load(Acquire)
}
/// Pushes a value into the queue.
///
/// This does nothing if the queue is closed.
///
/// # Safety
///
/// Must be called with the same `Synced` instance returned by `Inject::new`
pub(crate) unsafe fn push(&self, synced: &mut Synced, task: task::Notified<T>) {
if synced.is_closed {
return;
}
// safety: only mutated with the lock held
let len = unsafe { self.len.unsync_load() };
let task = task.into_raw();
// The next pointer should already be null
debug_assert!(unsafe { task.get_queue_next().is_none() });
if let Some(tail) = synced.tail {
// safety: Holding the Notified for a task guarantees exclusive
// access to the `queue_next` field.
unsafe { tail.set_queue_next(Some(task)) };
} else {
synced.head = Some(task);
}
synced.tail = Some(task);
self.len.store(len + 1, Release);
}
/// Pop a value from the queue.
///
/// # Safety
///
/// Must be called with the same `Synced` instance returned by `Inject::new`
pub(crate) unsafe fn pop(&self, synced: &mut Synced) -> Option<task::Notified<T>> {
unsafe { self.pop_n(synced, 1).next() }
}
/// Pop `n` values from the queue
///
/// # Safety
///
/// Must be called with the same `Synced` instance returned by `Inject::new`
pub(crate) unsafe fn pop_n<'a>(&'a self, synced: &'a mut Synced, n: usize) -> Pop<'a, T> {
use std::cmp;
debug_assert!(n > 0);
// safety: All updates to the len atomic are guarded by the mutex. As
// such, a non-atomic load followed by a store is safe.
let len = unsafe { self.len.unsync_load() };
let n = cmp::min(n, len);
// Decrement the count.
self.len.store(len - n, Release);
Pop::new(n, synced)
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/util/mod.rs | tokio/src/runtime/scheduler/util/mod.rs | #[cfg(all(tokio_unstable, feature = "time", feature = "rt-multi-thread"))]
pub(in crate::runtime) mod time_alt;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/util/time_alt.rs | tokio/src/runtime/scheduler/util/time_alt.rs | use crate::runtime::scheduler::driver;
use crate::runtime::time_alt::cancellation_queue::{Receiver, Sender};
use crate::runtime::time_alt::{EntryHandle, RegistrationQueue, WakeQueue, Wheel};
use std::time::Duration;
pub(crate) fn min_duration(a: Option<Duration>, b: Option<Duration>) -> Option<Duration> {
match (a, b) {
(Some(dur_a), Some(dur_b)) => Some(std::cmp::min(dur_a, dur_b)),
(Some(dur_a), None) => Some(dur_a),
(None, Some(dur_b)) => Some(dur_b),
(None, None) => None,
}
}
pub(crate) fn process_registration_queue(
registration_queue: &mut RegistrationQueue,
wheel: &mut Wheel,
tx: &Sender,
wake_queue: &mut WakeQueue,
) {
while let Some(hdl) = registration_queue.pop_front() {
if hdl.deadline() <= wheel.elapsed() {
unsafe {
wake_queue.push_front(hdl);
}
} else {
// Safety: the entry is not registered yet
unsafe {
wheel.insert(hdl, tx.clone());
}
}
}
}
pub(crate) fn insert_inject_timers(
wheel: &mut Wheel,
tx: &Sender,
inject: Vec<EntryHandle>,
wake_queue: &mut WakeQueue,
) {
for hdl in inject {
if hdl.deadline() <= wheel.elapsed() {
unsafe {
wake_queue.push_front(hdl);
}
} else {
// Safety: the entry is not registered yet
unsafe {
wheel.insert(hdl, tx.clone());
}
}
}
}
pub(crate) fn remove_cancelled_timers(wheel: &mut Wheel, rx: &mut Receiver) {
for hdl in rx.recv_all() {
debug_assert!(hdl.is_cancelled());
if hdl.deadline() > wheel.elapsed() {
// Safety: the entry is registered in THIS wheel
unsafe {
wheel.remove(hdl);
}
}
}
}
pub(crate) fn next_expiration_time(wheel: &Wheel, drv_hdl: &driver::Handle) -> Option<Duration> {
drv_hdl.with_time(|maybe_time_hdl| {
let Some(time_hdl) = maybe_time_hdl else {
// time driver is not enabled, nothing to do.
return None;
};
let clock = drv_hdl.clock();
let time_source = time_hdl.time_source();
wheel.next_expiration_time().map(|tick| {
let now = time_source.now(clock);
time_source.tick_to_duration(tick.saturating_sub(now))
})
})
}
#[cfg(feature = "test-util")]
pub(crate) fn pre_auto_advance(drv_hdl: &driver::Handle, duration: Option<Duration>) -> bool {
drv_hdl.with_time(|maybe_time_hdl| {
if maybe_time_hdl.is_none() {
// time driver is not enabled, nothing to do.
return false;
}
if duration.is_some() {
let clock = drv_hdl.clock();
if clock.can_auto_advance() {
return true;
}
false
} else {
false
}
})
}
pub(crate) fn process_expired_timers(
wheel: &mut Wheel,
drv_hdl: &driver::Handle,
wake_queue: &mut WakeQueue,
) {
drv_hdl.with_time(|maybe_time_hdl| {
let Some(time_hdl) = maybe_time_hdl else {
// time driver is not enabled, nothing to do.
return;
};
let clock = drv_hdl.clock();
let time_source = time_hdl.time_source();
let now = time_source.now(clock);
time_hdl.process_at_time_alt(wheel, now, wake_queue);
});
}
pub(crate) fn shutdown_local_timers(
wheel: &mut Wheel,
rx: &mut Receiver,
inject: Vec<EntryHandle>,
drv_hdl: &driver::Handle,
) {
drv_hdl.with_time(|maybe_time_hdl| {
let Some(time_hdl) = maybe_time_hdl else {
// time driver is not enabled, nothing to do.
return;
};
remove_cancelled_timers(wheel, rx);
time_hdl.shutdown_alt(wheel);
let mut wake_queue = WakeQueue::new();
// simply wake all unregistered timers
for hdl in inject.into_iter().filter(|hdl| !hdl.is_cancelled()) {
unsafe {
wake_queue.push_front(hdl);
}
}
wake_queue.wake_all();
});
}
#[cfg(feature = "test-util")]
pub(crate) fn post_auto_advance(drv_hdl: &driver::Handle, duration: Option<Duration>) {
drv_hdl.with_time(|maybe_time_hdl| {
let Some(time_hdl) = maybe_time_hdl else {
// time driver is not enabled, nothing to do.
return;
};
if let Some(park_duration) = duration {
let clock = drv_hdl.clock();
if clock.can_auto_advance() && !time_hdl.did_wake() {
if let Err(msg) = clock.advance(park_duration) {
panic!("{msg}");
}
}
}
})
}
#[cfg(not(feature = "test-util"))]
pub(crate) fn pre_auto_advance(_drv_hdl: &driver::Handle, _duration: Option<Duration>) -> bool {
false
}
#[cfg(not(feature = "test-util"))]
pub(crate) fn post_auto_advance(_drv_hdl: &driver::Handle, _duration: Option<Duration>) {
// No-op in non-test util builds
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/current_thread/mod.rs | tokio/src/runtime/scheduler/current_thread/mod.rs | use crate::loom::sync::atomic::AtomicBool;
use crate::loom::sync::Arc;
use crate::runtime::driver::{self, Driver};
use crate::runtime::scheduler::{self, Defer, Inject};
use crate::runtime::task::{
self, JoinHandle, OwnedTasks, Schedule, SpawnLocation, Task, TaskHarnessScheduleHooks,
};
use crate::runtime::{
blocking, context, Config, MetricsBatch, SchedulerMetrics, TaskHooks, TaskMeta, WorkerMetrics,
};
use crate::sync::notify::Notify;
use crate::util::atomic_cell::AtomicCell;
use crate::util::{waker_ref, RngSeedGenerator, Wake, WakerRef};
use std::cell::RefCell;
use std::collections::VecDeque;
use std::future::{poll_fn, Future};
use std::sync::atomic::Ordering::{AcqRel, Release};
use std::task::Poll::{Pending, Ready};
use std::task::Waker;
use std::thread::ThreadId;
use std::time::Duration;
use std::{fmt, thread};
/// Executes tasks on the current thread
pub(crate) struct CurrentThread {
/// Core scheduler data is acquired by a thread entering `block_on`.
core: AtomicCell<Core>,
/// Notifier for waking up other threads to steal the
/// driver.
notify: Notify,
}
/// Handle to the current thread scheduler
pub(crate) struct Handle {
/// Scheduler state shared across threads
shared: Shared,
/// Resource driver handles
pub(crate) driver: driver::Handle,
/// Blocking pool spawner
pub(crate) blocking_spawner: blocking::Spawner,
/// Current random number generator seed
pub(crate) seed_generator: RngSeedGenerator,
/// User-supplied hooks to invoke for things
pub(crate) task_hooks: TaskHooks,
/// If this is a `LocalRuntime`, flags the owning thread ID.
pub(crate) local_tid: Option<ThreadId>,
}
/// Data required for executing the scheduler. The struct is passed around to
/// a function that will perform the scheduling work and acts as a capability token.
struct Core {
/// Scheduler run queue
tasks: VecDeque<Notified>,
/// Current tick
tick: u32,
/// Runtime driver
///
/// The driver is removed before starting to park the thread
driver: Option<Driver>,
/// Metrics batch
metrics: MetricsBatch,
/// How often to check the global queue
global_queue_interval: u32,
/// True if a task panicked without being handled and the runtime is
/// configured to shutdown on unhandled panic.
unhandled_panic: bool,
}
/// Scheduler state shared between threads.
struct Shared {
/// Remote run queue
inject: Inject<Arc<Handle>>,
/// Collection of all active tasks spawned onto this executor.
owned: OwnedTasks<Arc<Handle>>,
/// Indicates whether the blocked on thread was woken.
woken: AtomicBool,
/// Scheduler configuration options
config: Config,
/// Keeps track of various runtime metrics.
scheduler_metrics: SchedulerMetrics,
/// This scheduler only has one worker.
worker_metrics: WorkerMetrics,
}
/// Thread-local context.
///
/// pub(crate) to store in `runtime::context`.
pub(crate) struct Context {
/// Scheduler handle
handle: Arc<Handle>,
/// Scheduler core, enabling the holder of `Context` to execute the
/// scheduler.
core: RefCell<Option<Box<Core>>>,
/// Deferred tasks, usually ones that called `task::yield_now()`.
pub(crate) defer: Defer,
}
type Notified = task::Notified<Arc<Handle>>;
/// Initial queue capacity.
const INITIAL_CAPACITY: usize = 64;
/// Used if none is specified. This is a temporary constant and will be removed
/// as we unify tuning logic between the multi-thread and current-thread
/// schedulers.
const DEFAULT_GLOBAL_QUEUE_INTERVAL: u32 = 31;
impl CurrentThread {
pub(crate) fn new(
driver: Driver,
driver_handle: driver::Handle,
blocking_spawner: blocking::Spawner,
seed_generator: RngSeedGenerator,
config: Config,
local_tid: Option<ThreadId>,
) -> (CurrentThread, Arc<Handle>) {
let worker_metrics = WorkerMetrics::from_config(&config);
worker_metrics.set_thread_id(thread::current().id());
// Get the configured global queue interval, or use the default.
let global_queue_interval = config
.global_queue_interval
.unwrap_or(DEFAULT_GLOBAL_QUEUE_INTERVAL);
let handle = Arc::new(Handle {
task_hooks: TaskHooks {
task_spawn_callback: config.before_spawn.clone(),
task_terminate_callback: config.after_termination.clone(),
#[cfg(tokio_unstable)]
before_poll_callback: config.before_poll.clone(),
#[cfg(tokio_unstable)]
after_poll_callback: config.after_poll.clone(),
},
shared: Shared {
inject: Inject::new(),
owned: OwnedTasks::new(1),
woken: AtomicBool::new(false),
config,
scheduler_metrics: SchedulerMetrics::new(),
worker_metrics,
},
driver: driver_handle,
blocking_spawner,
seed_generator,
local_tid,
});
let core = AtomicCell::new(Some(Box::new(Core {
tasks: VecDeque::with_capacity(INITIAL_CAPACITY),
tick: 0,
driver: Some(driver),
metrics: MetricsBatch::new(&handle.shared.worker_metrics),
global_queue_interval,
unhandled_panic: false,
})));
let scheduler = CurrentThread {
core,
notify: Notify::new(),
};
(scheduler, handle)
}
#[track_caller]
pub(crate) fn block_on<F: Future>(&self, handle: &scheduler::Handle, future: F) -> F::Output {
pin!(future);
crate::runtime::context::enter_runtime(handle, false, |blocking| {
let handle = handle.as_current_thread();
// Attempt to steal the scheduler core and block_on the future if we can
// there, otherwise, lets select on a notification that the core is
// available or the future is complete.
loop {
if let Some(core) = self.take_core(handle) {
handle
.shared
.worker_metrics
.set_thread_id(thread::current().id());
return core.block_on(future);
} else {
let notified = self.notify.notified();
pin!(notified);
if let Some(out) = blocking
.block_on(poll_fn(|cx| {
if notified.as_mut().poll(cx).is_ready() {
return Ready(None);
}
if let Ready(out) = future.as_mut().poll(cx) {
return Ready(Some(out));
}
Pending
}))
.expect("Failed to `Enter::block_on`")
{
return out;
}
}
}
})
}
fn take_core(&self, handle: &Arc<Handle>) -> Option<CoreGuard<'_>> {
let core = self.core.take()?;
Some(CoreGuard {
context: scheduler::Context::CurrentThread(Context {
handle: handle.clone(),
core: RefCell::new(Some(core)),
defer: Defer::new(),
}),
scheduler: self,
})
}
pub(crate) fn shutdown(&mut self, handle: &scheduler::Handle) {
let handle = handle.as_current_thread();
// Avoid a double panic if we are currently panicking and
// the lock may be poisoned.
let core = match self.take_core(handle) {
Some(core) => core,
None if std::thread::panicking() => return,
None => panic!("Oh no! We never placed the Core back, this is a bug!"),
};
// Check that the thread-local is not being destroyed
let tls_available = context::with_current(|_| ()).is_ok();
if tls_available {
core.enter(|core, _context| {
let core = shutdown2(core, handle);
(core, ())
});
} else {
// Shutdown without setting the context. `tokio::spawn` calls will
// fail, but those will fail either way because the thread-local is
// not available anymore.
let context = core.context.expect_current_thread();
let core = context.core.borrow_mut().take().unwrap();
let core = shutdown2(core, handle);
*context.core.borrow_mut() = Some(core);
}
}
}
fn shutdown2(mut core: Box<Core>, handle: &Handle) -> Box<Core> {
// Drain the OwnedTasks collection. This call also closes the
// collection, ensuring that no tasks are ever pushed after this
// call returns.
handle.shared.owned.close_and_shutdown_all(0);
// Drain local queue
// We already shut down every task, so we just need to drop the task.
while let Some(task) = core.next_local_task(handle) {
drop(task);
}
// Close the injection queue
handle.shared.inject.close();
// Drain remote queue
while let Some(task) = handle.shared.inject.pop() {
drop(task);
}
assert!(handle.shared.owned.is_empty());
// Submit metrics
core.submit_metrics(handle);
// Shutdown the resource drivers
if let Some(driver) = core.driver.as_mut() {
driver.shutdown(&handle.driver);
}
core
}
impl fmt::Debug for CurrentThread {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("CurrentThread").finish()
}
}
// ===== impl Core =====
impl Core {
/// Get and increment the current tick
fn tick(&mut self) {
self.tick = self.tick.wrapping_add(1);
}
fn next_task(&mut self, handle: &Handle) -> Option<Notified> {
if self.tick % self.global_queue_interval == 0 {
handle
.next_remote_task()
.or_else(|| self.next_local_task(handle))
} else {
self.next_local_task(handle)
.or_else(|| handle.next_remote_task())
}
}
fn next_local_task(&mut self, handle: &Handle) -> Option<Notified> {
let ret = self.tasks.pop_front();
handle
.shared
.worker_metrics
.set_queue_depth(self.tasks.len());
ret
}
fn push_task(&mut self, handle: &Handle, task: Notified) {
self.tasks.push_back(task);
self.metrics.inc_local_schedule_count();
handle
.shared
.worker_metrics
.set_queue_depth(self.tasks.len());
}
fn submit_metrics(&mut self, handle: &Handle) {
self.metrics.submit(&handle.shared.worker_metrics, 0);
}
}
#[cfg(feature = "taskdump")]
fn wake_deferred_tasks_and_free(context: &Context) {
let wakers = context.defer.take_deferred();
for waker in wakers {
waker.wake();
}
}
// ===== impl Context =====
impl Context {
/// Execute the closure with the given scheduler core stored in the
/// thread-local context.
fn run_task<R>(&self, mut core: Box<Core>, f: impl FnOnce() -> R) -> (Box<Core>, R) {
core.metrics.start_poll();
let mut ret = self.enter(core, || crate::task::coop::budget(f));
ret.0.metrics.end_poll();
ret
}
/// Blocks the current thread until an event is received by the driver,
/// including I/O events, timer events, ...
fn park(&self, mut core: Box<Core>, handle: &Handle) -> Box<Core> {
let mut driver = core.driver.take().expect("driver missing");
if let Some(f) = &handle.shared.config.before_park {
let (c, ()) = self.enter(core, || f());
core = c;
}
// This check will fail if `before_park` spawns a task for us to run
// instead of parking the thread
if core.tasks.is_empty() {
// Park until the thread is signaled
core.metrics.about_to_park();
core.submit_metrics(handle);
core = self.park_internal(core, handle, &mut driver, None);
core.metrics.unparked();
core.submit_metrics(handle);
}
if let Some(f) = &handle.shared.config.after_unpark {
let (c, ()) = self.enter(core, || f());
core = c;
}
core.driver = Some(driver);
core
}
/// Checks the driver for new events without blocking the thread.
fn park_yield(&self, mut core: Box<Core>, handle: &Handle) -> Box<Core> {
let mut driver = core.driver.take().expect("driver missing");
core.submit_metrics(handle);
core = self.park_internal(core, handle, &mut driver, Some(Duration::from_millis(0)));
core.driver = Some(driver);
core
}
fn park_internal(
&self,
core: Box<Core>,
handle: &Handle,
driver: &mut Driver,
duration: Option<Duration>,
) -> Box<Core> {
let (core, ()) = self.enter(core, || {
match duration {
Some(dur) => driver.park_timeout(&handle.driver, dur),
None => driver.park(&handle.driver),
}
self.defer.wake();
});
core
}
fn enter<R>(&self, core: Box<Core>, f: impl FnOnce() -> R) -> (Box<Core>, R) {
// Store the scheduler core in the thread-local context
//
// A drop-guard is employed at a higher level.
*self.core.borrow_mut() = Some(core);
// Execute the closure while tracking the execution budget
let ret = f();
// Take the scheduler core back
let core = self.core.borrow_mut().take().expect("core missing");
(core, ret)
}
pub(crate) fn defer(&self, waker: &Waker) {
self.defer.defer(waker);
}
}
// ===== impl Handle =====
impl Handle {
/// Spawns a future onto the `CurrentThread` scheduler
#[track_caller]
pub(crate) fn spawn<F>(
me: &Arc<Self>,
future: F,
id: crate::runtime::task::Id,
spawned_at: SpawnLocation,
) -> JoinHandle<F::Output>
where
F: crate::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let (handle, notified) = me.shared.owned.bind(future, me.clone(), id, spawned_at);
me.task_hooks.spawn(&TaskMeta {
id,
spawned_at,
_phantom: Default::default(),
});
if let Some(notified) = notified {
me.schedule(notified);
}
handle
}
/// Spawn a task which isn't safe to send across thread boundaries onto the runtime.
///
/// # Safety
///
/// This should only be used when this is a `LocalRuntime` or in another case where the runtime
/// provably cannot be driven from or moved to different threads from the one on which the task
/// is spawned.
#[track_caller]
pub(crate) unsafe fn spawn_local<F>(
me: &Arc<Self>,
future: F,
id: crate::runtime::task::Id,
spawned_at: SpawnLocation,
) -> JoinHandle<F::Output>
where
F: crate::future::Future + 'static,
F::Output: 'static,
{
// Safety: the caller guarantees that this is only called on a `LocalRuntime`.
let (handle, notified) = unsafe {
me.shared
.owned
.bind_local(future, me.clone(), id, spawned_at)
};
me.task_hooks.spawn(&TaskMeta {
id,
spawned_at,
_phantom: Default::default(),
});
if let Some(notified) = notified {
me.schedule(notified);
}
handle
}
/// Capture a snapshot of this runtime's state.
#[cfg(all(
tokio_unstable,
feature = "taskdump",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
pub(crate) fn dump(&self) -> crate::runtime::Dump {
use crate::runtime::dump;
use task::trace::trace_current_thread;
let mut traces = vec![];
// todo: how to make this work outside of a runtime context?
context::with_scheduler(|maybe_context| {
// drain the local queue
let context = if let Some(context) = maybe_context {
context.expect_current_thread()
} else {
return;
};
let mut maybe_core = context.core.borrow_mut();
let core = if let Some(core) = maybe_core.as_mut() {
core
} else {
return;
};
let local = &mut core.tasks;
if self.shared.inject.is_closed() {
return;
}
traces = trace_current_thread(&self.shared.owned, local, &self.shared.inject)
.into_iter()
.map(|(id, trace)| dump::Task::new(id, trace))
.collect();
// Avoid double borrow panic
drop(maybe_core);
// Taking a taskdump could wakes every task, but we probably don't want
// the `yield_now` vector to be that large under normal circumstances.
// Therefore, we free its allocation.
wake_deferred_tasks_and_free(context);
});
dump::Dump::new(traces)
}
fn next_remote_task(&self) -> Option<Notified> {
self.shared.inject.pop()
}
fn waker_ref(me: &Arc<Self>) -> WakerRef<'_> {
// Set woken to true when enter block_on, ensure outer future
// be polled for the first time when enter loop
me.shared.woken.store(true, Release);
waker_ref(me)
}
// reset woken to false and return original value
pub(crate) fn reset_woken(&self) -> bool {
self.shared.woken.swap(false, AcqRel)
}
pub(crate) fn num_alive_tasks(&self) -> usize {
self.shared.owned.num_alive_tasks()
}
pub(crate) fn injection_queue_depth(&self) -> usize {
self.shared.inject.len()
}
pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics {
assert_eq!(0, worker);
&self.shared.worker_metrics
}
}
cfg_unstable_metrics! {
impl Handle {
pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics {
&self.shared.scheduler_metrics
}
pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize {
self.worker_metrics(worker).queue_depth()
}
pub(crate) fn num_blocking_threads(&self) -> usize {
self.blocking_spawner.num_threads()
}
pub(crate) fn num_idle_blocking_threads(&self) -> usize {
self.blocking_spawner.num_idle_threads()
}
pub(crate) fn blocking_queue_depth(&self) -> usize {
self.blocking_spawner.queue_depth()
}
cfg_64bit_metrics! {
pub(crate) fn spawned_tasks_count(&self) -> u64 {
self.shared.owned.spawned_tasks_count()
}
}
}
}
use std::num::NonZeroU64;
impl Handle {
pub(crate) fn owned_id(&self) -> NonZeroU64 {
self.shared.owned.id
}
}
impl fmt::Debug for Handle {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("current_thread::Handle { ... }").finish()
}
}
// ===== impl Shared =====
impl Schedule for Arc<Handle> {
fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
self.shared.owned.remove(task)
}
fn schedule(&self, task: task::Notified<Self>) {
use scheduler::Context::CurrentThread;
context::with_scheduler(|maybe_cx| match maybe_cx {
Some(CurrentThread(cx)) if Arc::ptr_eq(self, &cx.handle) => {
let mut core = cx.core.borrow_mut();
// If `None`, the runtime is shutting down, so there is no need
// to schedule the task.
if let Some(core) = core.as_mut() {
core.push_task(self, task);
}
}
_ => {
// Track that a task was scheduled from **outside** of the runtime.
self.shared.scheduler_metrics.inc_remote_schedule_count();
// Schedule the task
self.shared.inject.push(task);
self.driver.unpark();
}
});
}
fn hooks(&self) -> TaskHarnessScheduleHooks {
TaskHarnessScheduleHooks {
task_terminate_callback: self.task_hooks.task_terminate_callback.clone(),
}
}
cfg_unstable! {
fn unhandled_panic(&self) {
use crate::runtime::UnhandledPanic;
match self.shared.config.unhandled_panic {
UnhandledPanic::Ignore => {
// Do nothing
}
UnhandledPanic::ShutdownRuntime => {
use scheduler::Context::CurrentThread;
// This hook is only called from within the runtime, so
// `context::with_scheduler` should match with `&self`, i.e.
// there is no opportunity for a nested scheduler to be
// called.
context::with_scheduler(|maybe_cx| match maybe_cx {
Some(CurrentThread(cx)) if Arc::ptr_eq(self, &cx.handle) => {
let mut core = cx.core.borrow_mut();
// If `None`, the runtime is shutting down, so there is no need to signal shutdown
if let Some(core) = core.as_mut() {
core.unhandled_panic = true;
self.shared.owned.close_and_shutdown_all(0);
}
}
_ => unreachable!("runtime core not set in CURRENT thread-local"),
})
}
}
}
}
}
impl Wake for Handle {
fn wake(arc_self: Arc<Self>) {
Wake::wake_by_ref(&arc_self);
}
/// Wake by reference
fn wake_by_ref(arc_self: &Arc<Self>) {
arc_self.shared.woken.store(true, Release);
arc_self.driver.unpark();
}
}
// ===== CoreGuard =====
/// Used to ensure we always place the `Core` value back into its slot in
/// `CurrentThread`, even if the future panics.
struct CoreGuard<'a> {
context: scheduler::Context,
scheduler: &'a CurrentThread,
}
impl CoreGuard<'_> {
#[track_caller]
fn block_on<F: Future>(self, future: F) -> F::Output {
let ret = self.enter(|mut core, context| {
let waker = Handle::waker_ref(&context.handle);
let mut cx = std::task::Context::from_waker(&waker);
pin!(future);
core.metrics.start_processing_scheduled_tasks();
'outer: loop {
let handle = &context.handle;
if handle.reset_woken() {
let (c, res) = context.enter(core, || {
crate::task::coop::budget(|| future.as_mut().poll(&mut cx))
});
core = c;
if let Ready(v) = res {
return (core, Some(v));
}
}
for _ in 0..handle.shared.config.event_interval {
// Make sure we didn't hit an unhandled_panic
if core.unhandled_panic {
return (core, None);
}
core.tick();
let entry = core.next_task(handle);
let task = match entry {
Some(entry) => entry,
None => {
core.metrics.end_processing_scheduled_tasks();
core = if !context.defer.is_empty() {
context.park_yield(core, handle)
} else {
context.park(core, handle)
};
core.metrics.start_processing_scheduled_tasks();
// Try polling the `block_on` future next
continue 'outer;
}
};
let task = context.handle.shared.owned.assert_owner(task);
#[cfg(tokio_unstable)]
let task_meta = task.task_meta();
let (c, ()) = context.run_task(core, || {
#[cfg(tokio_unstable)]
context.handle.task_hooks.poll_start_callback(&task_meta);
task.run();
#[cfg(tokio_unstable)]
context.handle.task_hooks.poll_stop_callback(&task_meta);
});
core = c;
}
core.metrics.end_processing_scheduled_tasks();
// Yield to the driver, this drives the timer and pulls any
// pending I/O events.
core = context.park_yield(core, handle);
core.metrics.start_processing_scheduled_tasks();
}
});
match ret {
Some(ret) => ret,
None => {
// `block_on` panicked.
panic!("a spawned task panicked and the runtime is configured to shut down on unhandled panic");
}
}
}
/// Enters the scheduler context. This sets the queue and other necessary
/// scheduler state in the thread-local.
fn enter<F, R>(self, f: F) -> R
where
F: FnOnce(Box<Core>, &Context) -> (Box<Core>, R),
{
let context = self.context.expect_current_thread();
// Remove `core` from `context` to pass into the closure.
let core = context.core.borrow_mut().take().expect("core missing");
// Call the closure and place `core` back
let (core, ret) = context::set_scheduler(&self.context, || f(core, context));
*context.core.borrow_mut() = Some(core);
ret
}
}
impl Drop for CoreGuard<'_> {
fn drop(&mut self) {
let context = self.context.expect_current_thread();
if let Some(core) = context.core.borrow_mut().take() {
// Replace old scheduler back into the state to allow
// other threads to pick it up and drive it.
self.scheduler.core.set(core);
// Wake up other possible threads that could steal the driver.
self.scheduler.notify.notify_one();
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/idle.rs | tokio/src/runtime/scheduler/multi_thread/idle.rs | //! Coordinates idling workers
use crate::loom::sync::atomic::AtomicUsize;
use crate::runtime::scheduler::multi_thread::Shared;
use std::fmt;
use std::sync::atomic::Ordering::{self, SeqCst};
pub(super) struct Idle {
/// Tracks both the number of searching workers and the number of unparked
/// workers.
///
/// Used as a fast-path to avoid acquiring the lock when needed.
state: AtomicUsize,
/// Total number of workers.
num_workers: usize,
}
/// Data synchronized by the scheduler mutex
pub(super) struct Synced {
/// Sleeping workers
sleepers: Vec<usize>,
}
const UNPARK_SHIFT: usize = 16;
const UNPARK_MASK: usize = !SEARCH_MASK;
const SEARCH_MASK: usize = (1 << UNPARK_SHIFT) - 1;
#[derive(Copy, Clone)]
struct State(usize);
impl Idle {
pub(super) fn new(num_workers: usize) -> (Idle, Synced) {
let init = State::new(num_workers);
let idle = Idle {
state: AtomicUsize::new(init.into()),
num_workers,
};
let synced = Synced {
sleepers: Vec::with_capacity(num_workers),
};
(idle, synced)
}
/// If there are no workers actively searching, returns the index of a
/// worker currently sleeping.
pub(super) fn worker_to_notify(&self, shared: &Shared) -> Option<usize> {
// If at least one worker is spinning, work being notified will
// eventually be found. A searching thread will find **some** work and
// notify another worker, eventually leading to our work being found.
//
// For this to happen, this load must happen before the thread
// transitioning `num_searching` to zero. Acquire / Release does not
// provide sufficient guarantees, so this load is done with `SeqCst` and
// will pair with the `fetch_sub(1)` when transitioning out of
// searching.
if !self.notify_should_wakeup() {
return None;
}
// Acquire the lock
let mut lock = shared.synced.lock();
// Check again, now that the lock is acquired
if !self.notify_should_wakeup() {
return None;
}
// A worker should be woken up, atomically increment the number of
// searching workers as well as the number of unparked workers.
State::unpark_one(&self.state, 1);
// Get the worker to unpark
let ret = lock.idle.sleepers.pop();
debug_assert!(ret.is_some());
ret
}
/// Returns `true` if the worker needs to do a final check for submitted
/// work.
pub(super) fn transition_worker_to_parked(
&self,
shared: &Shared,
worker: usize,
is_searching: bool,
) -> bool {
// Acquire the lock
let mut lock = shared.synced.lock();
// Decrement the number of unparked threads
let ret = State::dec_num_unparked(&self.state, is_searching);
// Track the sleeping worker
lock.idle.sleepers.push(worker);
ret
}
pub(super) fn transition_worker_to_searching(&self) -> bool {
let state = State::load(&self.state, SeqCst);
if 2 * state.num_searching() >= self.num_workers {
return false;
}
// It is possible for this routine to allow more than 50% of the workers
// to search. That is OK. Limiting searchers is only an optimization to
// prevent too much contention.
State::inc_num_searching(&self.state, SeqCst);
true
}
/// A lightweight transition from searching -> running.
///
/// Returns `true` if this is the final searching worker. The caller
/// **must** notify a new worker.
pub(super) fn transition_worker_from_searching(&self) -> bool {
State::dec_num_searching(&self.state)
}
/// Unpark a specific worker. This happens if tasks are submitted from
/// within the worker's park routine.
///
/// Returns `true` if the worker was parked before calling the method.
pub(super) fn unpark_worker_by_id(&self, shared: &Shared, worker_id: usize) -> bool {
let mut lock = shared.synced.lock();
let sleepers = &mut lock.idle.sleepers;
for index in 0..sleepers.len() {
if sleepers[index] == worker_id {
sleepers.swap_remove(index);
// Update the state accordingly while the lock is held.
State::unpark_one(&self.state, 0);
return true;
}
}
false
}
/// Returns `true` if `worker_id` is contained in the sleep set.
pub(super) fn is_parked(&self, shared: &Shared, worker_id: usize) -> bool {
let lock = shared.synced.lock();
lock.idle.sleepers.contains(&worker_id)
}
fn notify_should_wakeup(&self) -> bool {
let state = State(self.state.fetch_add(0, SeqCst));
state.num_searching() == 0 && state.num_unparked() < self.num_workers
}
}
impl State {
fn new(num_workers: usize) -> State {
// All workers start in the unparked state
let ret = State(num_workers << UNPARK_SHIFT);
debug_assert_eq!(num_workers, ret.num_unparked());
debug_assert_eq!(0, ret.num_searching());
ret
}
fn load(cell: &AtomicUsize, ordering: Ordering) -> State {
State(cell.load(ordering))
}
fn unpark_one(cell: &AtomicUsize, num_searching: usize) {
cell.fetch_add(num_searching | (1 << UNPARK_SHIFT), SeqCst);
}
fn inc_num_searching(cell: &AtomicUsize, ordering: Ordering) {
cell.fetch_add(1, ordering);
}
/// Returns `true` if this is the final searching worker
fn dec_num_searching(cell: &AtomicUsize) -> bool {
let state = State(cell.fetch_sub(1, SeqCst));
state.num_searching() == 1
}
/// Track a sleeping worker
///
/// Returns `true` if this is the final searching worker.
fn dec_num_unparked(cell: &AtomicUsize, is_searching: bool) -> bool {
let mut dec = 1 << UNPARK_SHIFT;
if is_searching {
dec += 1;
}
let prev = State(cell.fetch_sub(dec, SeqCst));
is_searching && prev.num_searching() == 1
}
/// Number of workers currently searching
fn num_searching(self) -> usize {
self.0 & SEARCH_MASK
}
/// Number of workers currently unparked
fn num_unparked(self) -> usize {
(self.0 & UNPARK_MASK) >> UNPARK_SHIFT
}
}
impl From<usize> for State {
fn from(src: usize) -> State {
State(src)
}
}
impl From<State> for usize {
fn from(src: State) -> usize {
src.0
}
}
impl fmt::Debug for State {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("worker::State")
.field("num_unparked", &self.num_unparked())
.field("num_searching", &self.num_searching())
.finish()
}
}
#[test]
fn test_state() {
assert_eq!(0, UNPARK_MASK & SEARCH_MASK);
assert_eq!(0, !(UNPARK_MASK | SEARCH_MASK));
let state = State::new(10);
assert_eq!(10, state.num_unparked());
assert_eq!(0, state.num_searching());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/stats.rs | tokio/src/runtime/scheduler/multi_thread/stats.rs | use crate::runtime::{Config, MetricsBatch, WorkerMetrics};
use std::time::{Duration, Instant};
/// Per-worker statistics. This is used for both tuning the scheduler and
/// reporting runtime-level metrics/stats.
pub(crate) struct Stats {
/// The metrics batch used to report runtime-level metrics/stats to the
/// user.
batch: MetricsBatch,
/// Instant at which work last resumed (continued after park).
///
/// This duplicates the value stored in `MetricsBatch`. We will unify
/// `Stats` and `MetricsBatch` when we stabilize metrics.
processing_scheduled_tasks_started_at: Instant,
/// Number of tasks polled in the batch of scheduled tasks
tasks_polled_in_batch: usize,
/// Exponentially-weighted moving average of time spent polling scheduled a
/// task.
///
/// Tracked in nanoseconds, stored as a `f64` since that is what we use with
/// the EWMA calculations
task_poll_time_ewma: f64,
}
/// How to weigh each individual poll time, value is plucked from thin air.
const TASK_POLL_TIME_EWMA_ALPHA: f64 = 0.1;
/// Ideally, we wouldn't go above this, value is plucked from thin air.
const TARGET_GLOBAL_QUEUE_INTERVAL: f64 = Duration::from_micros(200).as_nanos() as f64;
/// Max value for the global queue interval. This is 2x the previous default
const MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 127;
/// This is the previous default
const TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 61;
impl Stats {
pub(crate) fn new(worker_metrics: &WorkerMetrics) -> Stats {
// Seed the value with what we hope to see.
let task_poll_time_ewma =
TARGET_GLOBAL_QUEUE_INTERVAL / TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL as f64;
Stats {
batch: MetricsBatch::new(worker_metrics),
processing_scheduled_tasks_started_at: Instant::now(),
tasks_polled_in_batch: 0,
task_poll_time_ewma,
}
}
pub(crate) fn tuned_global_queue_interval(&self, config: &Config) -> u32 {
// If an interval is explicitly set, don't tune.
if let Some(configured) = config.global_queue_interval {
return configured;
}
// As of Rust 1.45, casts from f64 -> u32 are saturating, which is fine here.
let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32;
// If we are using self-tuning, we don't want to return less than 2 as that would result in the
// global queue always getting checked first.
tasks_per_interval.clamp(2, MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL)
}
pub(crate) fn submit(&mut self, to: &WorkerMetrics) {
self.batch.submit(to, self.task_poll_time_ewma as u64);
}
pub(crate) fn about_to_park(&mut self) {
self.batch.about_to_park();
}
pub(crate) fn unparked(&mut self) {
self.batch.unparked();
}
pub(crate) fn inc_local_schedule_count(&mut self) {
self.batch.inc_local_schedule_count();
}
pub(crate) fn start_processing_scheduled_tasks(&mut self) {
self.batch.start_processing_scheduled_tasks();
self.processing_scheduled_tasks_started_at = Instant::now();
self.tasks_polled_in_batch = 0;
}
pub(crate) fn end_processing_scheduled_tasks(&mut self) {
self.batch.end_processing_scheduled_tasks();
// Update the EWMA task poll time
if self.tasks_polled_in_batch > 0 {
let now = Instant::now();
// If we "overflow" this conversion, we have bigger problems than
// slightly off stats.
let elapsed = (now - self.processing_scheduled_tasks_started_at).as_nanos() as f64;
let num_polls = self.tasks_polled_in_batch as f64;
// Calculate the mean poll duration for a single task in the batch
let mean_poll_duration = elapsed / num_polls;
// Compute the alpha weighted by the number of tasks polled this batch.
let weighted_alpha = 1.0 - (1.0 - TASK_POLL_TIME_EWMA_ALPHA).powf(num_polls);
// Now compute the new weighted average task poll time.
self.task_poll_time_ewma = weighted_alpha * mean_poll_duration
+ (1.0 - weighted_alpha) * self.task_poll_time_ewma;
}
}
pub(crate) fn start_poll(&mut self) {
self.batch.start_poll();
self.tasks_polled_in_batch += 1;
}
pub(crate) fn end_poll(&mut self) {
self.batch.end_poll();
}
pub(crate) fn incr_steal_count(&mut self, by: u16) {
self.batch.incr_steal_count(by);
}
pub(crate) fn incr_steal_operations(&mut self) {
self.batch.incr_steal_operations();
}
pub(crate) fn incr_overflow_count(&mut self) {
self.batch.incr_overflow_count();
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/park.rs | tokio/src/runtime/scheduler/multi_thread/park.rs | //! Parks the runtime.
//!
//! A combination of the various resource driver park handles.
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::{Arc, Condvar, Mutex};
use crate::runtime::driver::{self, Driver};
use crate::util::TryLock;
use std::sync::atomic::Ordering::SeqCst;
use std::time::{Duration, Instant};
#[cfg(loom)]
use crate::runtime::park::CURRENT_THREAD_PARK_COUNT;
pub(crate) struct Parker {
inner: Arc<Inner>,
}
pub(crate) struct Unparker {
inner: Arc<Inner>,
}
struct Inner {
/// Avoids entering the park if possible
state: AtomicUsize,
/// Used to coordinate access to the driver / `condvar`
mutex: Mutex<()>,
/// `Condvar` to block on if the driver is unavailable.
condvar: Condvar,
/// Resource (I/O, time, ...) driver
shared: Arc<Shared>,
}
const EMPTY: usize = 0;
const PARKED_CONDVAR: usize = 1;
const PARKED_DRIVER: usize = 2;
const NOTIFIED: usize = 3;
/// Shared across multiple Parker handles
struct Shared {
/// Shared driver. Only one thread at a time can use this
driver: TryLock<Driver>,
}
impl Parker {
pub(crate) fn new(driver: Driver) -> Parker {
Parker {
inner: Arc::new(Inner {
state: AtomicUsize::new(EMPTY),
mutex: Mutex::new(()),
condvar: Condvar::new(),
shared: Arc::new(Shared {
driver: TryLock::new(driver),
}),
}),
}
}
pub(crate) fn unpark(&self) -> Unparker {
Unparker {
inner: self.inner.clone(),
}
}
pub(crate) fn park(&mut self, handle: &driver::Handle) {
self.inner.park(handle);
}
/// Parks the current thread for up to `duration`.
///
/// This function tries to acquire the driver lock. If it succeeds, it
/// parks using the driver. Otherwise, it fails back to using a condvar,
/// unless the duration is zero, in which case it returns immediately.
pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) {
if let Some(mut driver) = self.inner.shared.driver.try_lock() {
self.inner.park_driver(&mut driver, handle, Some(duration));
} else if !duration.is_zero() {
self.inner.park_condvar(Some(duration));
} else {
// https://github.com/tokio-rs/tokio/issues/6536
// Hacky, but it's just for loom tests. The counter gets incremented during
// `park_timeout`, but we still have to increment the counter if we can't acquire the
// lock.
#[cfg(loom)]
CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst));
}
}
pub(crate) fn shutdown(&mut self, handle: &driver::Handle) {
self.inner.shutdown(handle);
}
}
impl Clone for Parker {
fn clone(&self) -> Parker {
Parker {
inner: Arc::new(Inner {
state: AtomicUsize::new(EMPTY),
mutex: Mutex::new(()),
condvar: Condvar::new(),
shared: self.inner.shared.clone(),
}),
}
}
}
impl Unparker {
pub(crate) fn unpark(&self, driver: &driver::Handle) {
self.inner.unpark(driver);
}
}
impl Inner {
/// Parks the current thread for at most `dur`.
fn park(&self, handle: &driver::Handle) {
// If we were previously notified then we consume this notification and
// return quickly.
if self
.state
.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
.is_ok()
{
return;
}
if let Some(mut driver) = self.shared.driver.try_lock() {
self.park_driver(&mut driver, handle, None);
} else {
self.park_condvar(None);
}
}
/// Parks the current thread using a condvar for up to `duration`.
///
/// If `duration` is `None`, parks indefinitely until notified.
///
/// # Panics
///
/// Panics if `duration` is `Some` and the duration is zero.
fn park_condvar(&self, duration: Option<Duration>) {
// Otherwise we need to coordinate going to sleep
let mut m = self.mutex.lock();
match self
.state
.compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst)
{
Ok(_) => {}
Err(NOTIFIED) => {
// We must read here, even though we know it will be `NOTIFIED`.
// This is because `unpark` may have been called again since we read
// `NOTIFIED` in the `compare_exchange` above. We must perform an
// acquire operation that synchronizes with that `unpark` to observe
// any writes it made before the call to unpark. To do that we must
// read from the write it made to `state`.
let old = self.state.swap(EMPTY, SeqCst);
debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
return;
}
Err(actual) => panic!("inconsistent park state; actual = {actual}"),
}
let timeout_at = duration.map(|d| {
Instant::now()
.checked_add(d)
// best effort to avoid overflow and still provide a usable timeout
.unwrap_or(Instant::now() + Duration::from_secs(1))
});
loop {
let is_timeout;
(m, is_timeout) = match timeout_at {
Some(timeout_at) => {
let dur = timeout_at.saturating_duration_since(Instant::now());
if !dur.is_zero() {
// Ideally, we would use `condvar.wait_timeout_until` here, but it is not available
// in `loom`. So we manually compute the timeout.
let (m, res) = self.condvar.wait_timeout(m, dur).unwrap();
(m, res.timed_out())
} else {
(m, true)
}
}
None => (self.condvar.wait(m).unwrap(), false),
};
if is_timeout {
match self.state.swap(EMPTY, SeqCst) {
PARKED_CONDVAR => return, // timed out, and no notification received
NOTIFIED => return, // notification and timeout happened concurrently
actual @ (PARKED_DRIVER | EMPTY) => {
panic!("inconsistent park_timeout state, actual = {actual}")
}
invalid => panic!("invalid park_timeout state, actual = {invalid}"),
}
} else if self
.state
.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
.is_ok()
{
// got a notification
return;
}
// spurious wakeup, go back to sleep
}
}
fn park_driver(
&self,
driver: &mut Driver,
handle: &driver::Handle,
duration: Option<Duration>,
) {
if duration.as_ref().is_some_and(Duration::is_zero) {
// zero duration doesn't actually park the thread, it just
// polls the I/O events, timers, etc.
driver.park_timeout(handle, Duration::ZERO);
return;
}
match self
.state
.compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst)
{
Ok(_) => {}
Err(NOTIFIED) => {
// We must read here, even though we know it will be `NOTIFIED`.
// This is because `unpark` may have been called again since we read
// `NOTIFIED` in the `compare_exchange` above. We must perform an
// acquire operation that synchronizes with that `unpark` to observe
// any writes it made before the call to unpark. To do that we must
// read from the write it made to `state`.
let old = self.state.swap(EMPTY, SeqCst);
debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
return;
}
Err(actual) => panic!("inconsistent park state; actual = {actual}"),
}
if let Some(duration) = duration {
debug_assert_ne!(duration, Duration::ZERO);
driver.park_timeout(handle, duration);
} else {
driver.park(handle);
}
match self.state.swap(EMPTY, SeqCst) {
NOTIFIED => {} // got a notification, hurray!
PARKED_DRIVER => {} // no notification, alas
n => panic!("inconsistent park_timeout state: {n}"),
}
}
fn unpark(&self, driver: &driver::Handle) {
// To ensure the unparked thread will observe any writes we made before
// this call, we must perform a release operation that `park` can
// synchronize with. To do that we must write `NOTIFIED` even if `state`
// is already `NOTIFIED`. That is why this must be a swap rather than a
// compare-and-swap that returns if it reads `NOTIFIED` on failure.
match self.state.swap(NOTIFIED, SeqCst) {
EMPTY => {} // no one was waiting
NOTIFIED => {} // already unparked
PARKED_CONDVAR => self.unpark_condvar(),
PARKED_DRIVER => driver.unpark(),
actual => panic!("inconsistent state in unpark; actual = {actual}"),
}
}
fn unpark_condvar(&self) {
// There is a period between when the parked thread sets `state` to
// `PARKED` (or last checked `state` in the case of a spurious wake
// up) and when it actually waits on `cvar`. If we were to notify
// during this period it would be ignored and then when the parked
// thread went to sleep it would never wake up. Fortunately, it has
// `lock` locked at this stage so we can acquire `lock` to wait until
// it is ready to receive the notification.
//
// Releasing `lock` before the call to `notify_one` means that when the
// parked thread wakes it doesn't get woken only to have to wait for us
// to release `lock`.
drop(self.mutex.lock());
self.condvar.notify_one();
}
fn shutdown(&self, handle: &driver::Handle) {
if let Some(mut driver) = self.shared.driver.try_lock() {
driver.shutdown(handle);
}
self.condvar.notify_all();
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/overflow.rs | tokio/src/runtime/scheduler/multi_thread/overflow.rs | use crate::runtime::task;
#[cfg(test)]
use std::cell::RefCell;
pub(crate) trait Overflow<T: 'static> {
fn push(&self, task: task::Notified<T>);
fn push_batch<I>(&self, iter: I)
where
I: Iterator<Item = task::Notified<T>>;
}
#[cfg(test)]
impl<T: 'static> Overflow<T> for RefCell<Vec<task::Notified<T>>> {
fn push(&self, task: task::Notified<T>) {
self.borrow_mut().push(task);
}
fn push_batch<I>(&self, iter: I)
where
I: Iterator<Item = task::Notified<T>>,
{
self.borrow_mut().extend(iter);
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/trace.rs | tokio/src/runtime/scheduler/multi_thread/trace.rs | use crate::loom::sync::atomic::{AtomicBool, Ordering};
use crate::loom::sync::{Barrier, Mutex};
use crate::runtime::dump::Dump;
use crate::runtime::scheduler::multi_thread::Handle;
use crate::sync::notify::Notify;
/// Tracing status of the worker.
pub(super) struct TraceStatus {
pub(super) trace_requested: AtomicBool,
pub(super) trace_start: Barrier,
pub(super) trace_end: Barrier,
pub(super) result_ready: Notify,
pub(super) trace_result: Mutex<Option<Dump>>,
}
impl TraceStatus {
pub(super) fn new(remotes_len: usize) -> Self {
Self {
trace_requested: AtomicBool::new(false),
trace_start: Barrier::new(remotes_len),
trace_end: Barrier::new(remotes_len),
result_ready: Notify::new(),
trace_result: Mutex::new(None),
}
}
pub(super) fn trace_requested(&self) -> bool {
self.trace_requested.load(Ordering::Relaxed)
}
pub(super) async fn start_trace_request(&self, handle: &Handle) {
while self
.trace_requested
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
handle.notify_all();
crate::task::yield_now().await;
}
}
pub(super) fn stash_result(&self, dump: Dump) {
let _ = self.trace_result.lock().insert(dump);
self.result_ready.notify_one();
}
pub(super) fn take_result(&self) -> Option<Dump> {
self.trace_result.lock().take()
}
pub(super) async fn end_trace_request(&self, handle: &Handle) {
while self
.trace_requested
.compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
handle.notify_all();
crate::task::yield_now().await;
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/counters.rs | tokio/src/runtime/scheduler/multi_thread/counters.rs | #[cfg(tokio_internal_mt_counters)]
mod imp {
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
static NUM_MAINTENANCE: AtomicUsize = AtomicUsize::new(0);
static NUM_NOTIFY_LOCAL: AtomicUsize = AtomicUsize::new(0);
static NUM_UNPARKS_LOCAL: AtomicUsize = AtomicUsize::new(0);
static NUM_LIFO_SCHEDULES: AtomicUsize = AtomicUsize::new(0);
static NUM_LIFO_CAPPED: AtomicUsize = AtomicUsize::new(0);
impl Drop for super::Counters {
fn drop(&mut self) {
let notifies_local = NUM_NOTIFY_LOCAL.load(Relaxed);
let unparks_local = NUM_UNPARKS_LOCAL.load(Relaxed);
let maintenance = NUM_MAINTENANCE.load(Relaxed);
let lifo_scheds = NUM_LIFO_SCHEDULES.load(Relaxed);
let lifo_capped = NUM_LIFO_CAPPED.load(Relaxed);
println!("---");
println!("notifies (local): {}", notifies_local);
println!(" unparks (local): {}", unparks_local);
println!(" maintenance: {}", maintenance);
println!(" LIFO schedules: {}", lifo_scheds);
println!(" LIFO capped: {}", lifo_capped);
}
}
pub(crate) fn inc_num_inc_notify_local() {
NUM_NOTIFY_LOCAL.fetch_add(1, Relaxed);
}
pub(crate) fn inc_num_unparks_local() {
NUM_UNPARKS_LOCAL.fetch_add(1, Relaxed);
}
pub(crate) fn inc_num_maintenance() {
NUM_MAINTENANCE.fetch_add(1, Relaxed);
}
pub(crate) fn inc_lifo_schedules() {
NUM_LIFO_SCHEDULES.fetch_add(1, Relaxed);
}
pub(crate) fn inc_lifo_capped() {
NUM_LIFO_CAPPED.fetch_add(1, Relaxed);
}
}
#[cfg(not(tokio_internal_mt_counters))]
mod imp {
pub(crate) fn inc_num_inc_notify_local() {}
pub(crate) fn inc_num_unparks_local() {}
pub(crate) fn inc_num_maintenance() {}
pub(crate) fn inc_lifo_schedules() {}
pub(crate) fn inc_lifo_capped() {}
}
#[derive(Debug)]
pub(crate) struct Counters;
pub(super) use imp::*;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.