repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/worker.rs
tokio/src/runtime/scheduler/multi_thread/worker.rs
//! A scheduler is initialized with a fixed number of workers. Each worker is //! driven by a thread. Each worker has a "core" which contains data such as the //! run queue and other state. When `block_in_place` is called, the worker's //! "core" is handed off to a new thread allowing the scheduler to continue to //! make progress while the originating thread blocks. //! //! # Shutdown //! //! Shutting down the runtime involves the following steps: //! //! 1. The Shared::close method is called. This closes the inject queue and //! `OwnedTasks` instance and wakes up all worker threads. //! //! 2. Each worker thread observes the close signal next time it runs //! Core::maintenance by checking whether the inject queue is closed. //! The `Core::is_shutdown` flag is set to true. //! //! 3. The worker thread calls `pre_shutdown` in parallel. Here, the worker //! will keep removing tasks from `OwnedTasks` until it is empty. No new //! tasks can be pushed to the `OwnedTasks` during or after this step as it //! was closed in step 1. //! //! 5. The workers call Shared::shutdown to enter the single-threaded phase of //! shutdown. These calls will push their core to `Shared::shutdown_cores`, //! and the last thread to push its core will finish the shutdown procedure. //! //! 6. The local run queue of each core is emptied, then the inject queue is //! emptied. //! //! At this point, shutdown has completed. It is not possible for any of the //! collections to contain any tasks at this point, as each collection was //! closed first, then emptied afterwards. //! //! ## Spawns during shutdown //! //! When spawning tasks during shutdown, there are two cases: //! //! * The spawner observes the `OwnedTasks` being open, and the inject queue is //! closed. //! * The spawner observes the `OwnedTasks` being closed and doesn't check the //! inject queue. //! //! The first case can only happen if the `OwnedTasks::bind` call happens before //! or during step 1 of shutdown. In this case, the runtime will clean up the //! task in step 3 of shutdown. //! //! In the latter case, the task was not spawned and the task is immediately //! cancelled by the spawner. //! //! The correctness of shutdown requires both the inject queue and `OwnedTasks` //! collection to have a closed bit. With a close bit on only the inject queue, //! spawning could run in to a situation where a task is successfully bound long //! after the runtime has shut down. With a close bit on only the `OwnedTasks`, //! the first spawning situation could result in the notification being pushed //! to the inject queue after step 6 of shutdown, which would leave a task in //! the inject queue indefinitely. This would be a ref-count cycle and a memory //! leak. use crate::loom::sync::{Arc, Mutex}; use crate::runtime; use crate::runtime::scheduler::multi_thread::{ idle, queue, Counters, Handle, Idle, Overflow, Parker, Stats, TraceStatus, Unparker, }; use crate::runtime::scheduler::{inject, Defer, Lock}; use crate::runtime::task::OwnedTasks; use crate::runtime::{ blocking, driver, scheduler, task, Config, SchedulerMetrics, TimerFlavor, WorkerMetrics, }; use crate::runtime::{context, TaskHooks}; use crate::task::coop; use crate::util::atomic_cell::AtomicCell; use crate::util::rand::{FastRand, RngSeedGenerator}; use std::cell::RefCell; use std::task::Waker; use std::thread; use std::time::Duration; mod metrics; cfg_taskdump! { mod taskdump; } cfg_not_taskdump! { mod taskdump_mock; } #[cfg(all(tokio_unstable, feature = "time"))] use crate::loom::sync::atomic::AtomicBool; #[cfg(all(tokio_unstable, feature = "time"))] use crate::runtime::time_alt; #[cfg(all(tokio_unstable, feature = "time"))] use crate::runtime::scheduler::util; /// A scheduler worker pub(super) struct Worker { /// Reference to scheduler's handle handle: Arc<Handle>, /// Index holding this worker's remote state index: usize, /// Used to hand-off a worker's core to another thread. core: AtomicCell<Core>, } /// Core data struct Core { /// Used to schedule bookkeeping tasks every so often. tick: u32, /// When a task is scheduled from a worker, it is stored in this slot. The /// worker will check this slot for a task **before** checking the run /// queue. This effectively results in the **last** scheduled task to be run /// next (LIFO). This is an optimization for improving locality which /// benefits message passing patterns and helps to reduce latency. lifo_slot: Option<Notified>, /// When `true`, locally scheduled tasks go to the LIFO slot. When `false`, /// they go to the back of the `run_queue`. lifo_enabled: bool, /// The worker-local run queue. run_queue: queue::Local<Arc<Handle>>, #[cfg(all(tokio_unstable, feature = "time"))] time_context: time_alt::LocalContext, /// True if the worker is currently searching for more work. Searching /// involves attempting to steal from other workers. is_searching: bool, /// True if the scheduler is being shutdown is_shutdown: bool, /// True if the scheduler is being traced is_traced: bool, /// Parker /// /// Stored in an `Option` as the parker is added / removed to make the /// borrow checker happy. park: Option<Parker>, /// Per-worker runtime stats stats: Stats, /// How often to check the global queue global_queue_interval: u32, /// Fast random number generator. rand: FastRand, } /// State shared across all workers pub(crate) struct Shared { /// Per-worker remote state. All other workers have access to this and is /// how they communicate between each other. remotes: Box<[Remote]>, /// Global task queue used for: /// 1. Submit work to the scheduler while **not** currently on a worker thread. /// 2. Submit work to the scheduler when a worker run queue is saturated pub(super) inject: inject::Shared<Arc<Handle>>, /// Coordinates idle workers idle: Idle, /// Collection of all active tasks spawned onto this executor. pub(crate) owned: OwnedTasks<Arc<Handle>>, /// Data synchronized by the scheduler mutex pub(super) synced: Mutex<Synced>, /// Cores that have observed the shutdown signal /// /// The core is **not** placed back in the worker to avoid it from being /// stolen by a thread that was spawned as part of `block_in_place`. #[allow(clippy::vec_box)] // we're moving an already-boxed value shutdown_cores: Mutex<Vec<Box<Core>>>, /// The number of cores that have observed the trace signal. pub(super) trace_status: TraceStatus, /// Scheduler configuration options config: Config, /// Collects metrics from the runtime. pub(super) scheduler_metrics: SchedulerMetrics, pub(super) worker_metrics: Box<[WorkerMetrics]>, /// Only held to trigger some code on drop. This is used to get internal /// runtime metrics that can be useful when doing performance /// investigations. This does nothing (empty struct, no drop impl) unless /// the `tokio_internal_mt_counters` `cfg` flag is set. _counters: Counters, } /// Data synchronized by the scheduler mutex pub(crate) struct Synced { /// Synchronized state for `Idle`. pub(super) idle: idle::Synced, /// Synchronized state for `Inject`. pub(crate) inject: inject::Synced, #[cfg(all(tokio_unstable, feature = "time"))] /// Timers pending to be registered. /// This is used to register a timer but the [`Core`] /// is not available in the current thread. inject_timers: Vec<time_alt::EntryHandle>, } /// Used to communicate with a worker from other threads. struct Remote { /// Steals tasks from this worker. pub(super) steal: queue::Steal<Arc<Handle>>, /// Unparks the associated worker thread unpark: Unparker, } /// Thread-local context pub(crate) struct Context { /// Worker worker: Arc<Worker>, /// Core data core: RefCell<Option<Box<Core>>>, /// Tasks to wake after resource drivers are polled. This is mostly to /// handle yielded tasks. pub(crate) defer: Defer, } /// Starts the workers pub(crate) struct Launch(Vec<Arc<Worker>>); /// Running a task may consume the core. If the core is still available when /// running the task completes, it is returned. Otherwise, the worker will need /// to stop processing. type RunResult = Result<Box<Core>, ()>; /// A notified task handle type Notified = task::Notified<Arc<Handle>>; /// Value picked out of thin-air. Running the LIFO slot a handful of times /// seems sufficient to benefit from locality. More than 3 times probably is /// over-weighting. The value can be tuned in the future with data that shows /// improvements. const MAX_LIFO_POLLS_PER_TICK: usize = 3; pub(super) fn create( size: usize, park: Parker, driver_handle: driver::Handle, blocking_spawner: blocking::Spawner, seed_generator: RngSeedGenerator, config: Config, timer_flavor: TimerFlavor, ) -> (Arc<Handle>, Launch) { let mut cores = Vec::with_capacity(size); let mut remotes = Vec::with_capacity(size); let mut worker_metrics = Vec::with_capacity(size); // Create the local queues for _ in 0..size { let (steal, run_queue) = queue::local(); let park = park.clone(); let unpark = park.unpark(); let metrics = WorkerMetrics::from_config(&config); let stats = Stats::new(&metrics); cores.push(Box::new(Core { tick: 0, lifo_slot: None, lifo_enabled: !config.disable_lifo_slot, run_queue, #[cfg(all(tokio_unstable, feature = "time"))] time_context: time_alt::LocalContext::new(), is_searching: false, is_shutdown: false, is_traced: false, park: Some(park), global_queue_interval: stats.tuned_global_queue_interval(&config), stats, rand: FastRand::from_seed(config.seed_generator.next_seed()), })); remotes.push(Remote { steal, unpark }); worker_metrics.push(metrics); } let (idle, idle_synced) = Idle::new(size); let (inject, inject_synced) = inject::Shared::new(); let remotes_len = remotes.len(); let handle = Arc::new(Handle { task_hooks: TaskHooks::from_config(&config), shared: Shared { remotes: remotes.into_boxed_slice(), inject, idle, owned: OwnedTasks::new(size), synced: Mutex::new(Synced { idle: idle_synced, inject: inject_synced, #[cfg(all(tokio_unstable, feature = "time"))] inject_timers: Vec::new(), }), shutdown_cores: Mutex::new(vec![]), trace_status: TraceStatus::new(remotes_len), config, scheduler_metrics: SchedulerMetrics::new(), worker_metrics: worker_metrics.into_boxed_slice(), _counters: Counters, }, driver: driver_handle, blocking_spawner, seed_generator, timer_flavor, #[cfg(all(tokio_unstable, feature = "time"))] is_shutdown: AtomicBool::new(false), }); let mut launch = Launch(vec![]); for (index, core) in cores.drain(..).enumerate() { launch.0.push(Arc::new(Worker { handle: handle.clone(), index, core: AtomicCell::new(Some(core)), })); } (handle, launch) } #[track_caller] pub(crate) fn block_in_place<F, R>(f: F) -> R where F: FnOnce() -> R, { // Try to steal the worker core back struct Reset { take_core: bool, budget: coop::Budget, } impl Drop for Reset { fn drop(&mut self) { with_current(|maybe_cx| { if let Some(cx) = maybe_cx { if self.take_core { let core = cx.worker.core.take(); if core.is_some() { cx.worker.handle.shared.worker_metrics[cx.worker.index] .set_thread_id(thread::current().id()); } let mut cx_core = cx.core.borrow_mut(); assert!(cx_core.is_none()); *cx_core = core; } // Reset the task budget as we are re-entering the // runtime. coop::set(self.budget); } }); } } let mut had_entered = false; let mut take_core = false; let setup_result = with_current(|maybe_cx| { match ( crate::runtime::context::current_enter_context(), maybe_cx.is_some(), ) { (context::EnterRuntime::Entered { .. }, true) => { // We are on a thread pool runtime thread, so we just need to // set up blocking. had_entered = true; } ( context::EnterRuntime::Entered { allow_block_in_place, }, false, ) => { // We are on an executor, but _not_ on the thread pool. That is // _only_ okay if we are in a thread pool runtime's block_on // method: if allow_block_in_place { had_entered = true; return Ok(()); } else { // This probably means we are on the current_thread runtime or in a // LocalSet, where it is _not_ okay to block. return Err( "can call blocking only when running on the multi-threaded runtime", ); } } (context::EnterRuntime::NotEntered, true) => { // This is a nested call to block_in_place (we already exited). // All the necessary setup has already been done. return Ok(()); } (context::EnterRuntime::NotEntered, false) => { // We are outside of the tokio runtime, so blocking is fine. // We can also skip all of the thread pool blocking setup steps. return Ok(()); } } let cx = maybe_cx.expect("no .is_some() == false cases above should lead here"); // Get the worker core. If none is set, then blocking is fine! let mut core = match cx.core.borrow_mut().take() { Some(core) => core, None => return Ok(()), }; // If we heavily call `spawn_blocking`, there might be no available thread to // run this core. Except for the task in the lifo_slot, all tasks can be // stolen, so we move the task out of the lifo_slot to the run_queue. if let Some(task) = core.lifo_slot.take() { core.run_queue .push_back_or_overflow(task, &*cx.worker.handle, &mut core.stats); } // We are taking the core from the context and sending it to another // thread. take_core = true; // The parker should be set here assert!(core.park.is_some()); // In order to block, the core must be sent to another thread for // execution. // // First, move the core back into the worker's shared core slot. cx.worker.core.set(core); // Next, clone the worker handle and send it to a new thread for // processing. // // Once the blocking task is done executing, we will attempt to // steal the core back. let worker = cx.worker.clone(); runtime::spawn_blocking(move || run(worker)); Ok(()) }); if let Err(panic_message) = setup_result { panic!("{}", panic_message); } if had_entered { // Unset the current task's budget. Blocking sections are not // constrained by task budgets. let _reset = Reset { take_core, budget: coop::stop(), }; crate::runtime::context::exit_runtime(f) } else { f() } } impl Launch { pub(crate) fn launch(mut self) { for worker in self.0.drain(..) { runtime::spawn_blocking(move || run(worker)); } } } fn run(worker: Arc<Worker>) { #[allow(dead_code)] struct AbortOnPanic; impl Drop for AbortOnPanic { fn drop(&mut self) { if std::thread::panicking() { eprintln!("worker thread panicking; aborting process"); std::process::abort(); } } } // Catching panics on worker threads in tests is quite tricky. Instead, when // debug assertions are enabled, we just abort the process. #[cfg(debug_assertions)] let _abort_on_panic = AbortOnPanic; // Acquire a core. If this fails, then another thread is running this // worker and there is nothing further to do. let core = match worker.core.take() { Some(core) => core, None => return, }; worker.handle.shared.worker_metrics[worker.index].set_thread_id(thread::current().id()); let handle = scheduler::Handle::MultiThread(worker.handle.clone()); crate::runtime::context::enter_runtime(&handle, true, |_| { // Set the worker context. let cx = scheduler::Context::MultiThread(Context { worker, core: RefCell::new(None), defer: Defer::new(), }); context::set_scheduler(&cx, || { let cx = cx.expect_multi_thread(); // This should always be an error. It only returns a `Result` to support // using `?` to short circuit. assert!(cx.run(core).is_err()); // Check if there are any deferred tasks to notify. This can happen when // the worker core is lost due to `block_in_place()` being called from // within the task. cx.defer.wake(); }); }); } impl Context { fn run(&self, mut core: Box<Core>) -> RunResult { // Reset `lifo_enabled` here in case the core was previously stolen from // a task that had the LIFO slot disabled. self.reset_lifo_enabled(&mut core); // Start as "processing" tasks as polling tasks from the local queue // will be one of the first things we do. core.stats.start_processing_scheduled_tasks(); while !core.is_shutdown { self.assert_lifo_enabled_is_correct(&core); if core.is_traced { core = self.worker.handle.trace_core(core); } // Increment the tick core.tick(); // Run maintenance, if needed core = self.maintenance(core); // First, check work available to the current worker. if let Some(task) = core.next_task(&self.worker) { core = self.run_task(task, core)?; continue; } // We consumed all work in the queues and will start searching for work. core.stats.end_processing_scheduled_tasks(); // There is no more **local** work to process, try to steal work // from other workers. if let Some(task) = core.steal_work(&self.worker) { // Found work, switch back to processing core.stats.start_processing_scheduled_tasks(); core = self.run_task(task, core)?; } else { // Wait for work core = if !self.defer.is_empty() { self.park_yield(core) } else { self.park(core) }; core.stats.start_processing_scheduled_tasks(); } } #[cfg(all(tokio_unstable, feature = "time"))] { match self.worker.handle.timer_flavor { TimerFlavor::Traditional => {} TimerFlavor::Alternative => { util::time_alt::shutdown_local_timers( &mut core.time_context.wheel, &mut core.time_context.canc_rx, self.worker.handle.take_remote_timers(), &self.worker.handle.driver, ); } } } core.pre_shutdown(&self.worker); // Signal shutdown self.worker.handle.shutdown_core(core); Err(()) } fn run_task(&self, task: Notified, mut core: Box<Core>) -> RunResult { #[cfg(tokio_unstable)] let task_meta = task.task_meta(); let task = self.worker.handle.shared.owned.assert_owner(task); // Make sure the worker is not in the **searching** state. This enables // another idle worker to try to steal work. core.transition_from_searching(&self.worker); self.assert_lifo_enabled_is_correct(&core); // Measure the poll start time. Note that we may end up polling other // tasks under this measurement. In this case, the tasks came from the // LIFO slot and are considered part of the current task for scheduling // purposes. These tasks inherent the "parent"'s limits. core.stats.start_poll(); // Make the core available to the runtime context *self.core.borrow_mut() = Some(core); // Run the task coop::budget(|| { // Unlike the poll time above, poll start callback is attached to the task id, // so it is tightly associated with the actual poll invocation. #[cfg(tokio_unstable)] self.worker .handle .task_hooks .poll_start_callback(&task_meta); task.run(); #[cfg(tokio_unstable)] self.worker.handle.task_hooks.poll_stop_callback(&task_meta); let mut lifo_polls = 0; // As long as there is budget remaining and a task exists in the // `lifo_slot`, then keep running. loop { // Check if we still have the core. If not, the core was stolen // by another worker. let mut core = match self.core.borrow_mut().take() { Some(core) => core, None => { // In this case, we cannot call `reset_lifo_enabled()` // because the core was stolen. The stealer will handle // that at the top of `Context::run` return Err(()); } }; // Check for a task in the LIFO slot let task = match core.lifo_slot.take() { Some(task) => task, None => { self.reset_lifo_enabled(&mut core); core.stats.end_poll(); return Ok(core); } }; if !coop::has_budget_remaining() { core.stats.end_poll(); // Not enough budget left to run the LIFO task, push it to // the back of the queue and return. core.run_queue.push_back_or_overflow( task, &*self.worker.handle, &mut core.stats, ); // If we hit this point, the LIFO slot should be enabled. // There is no need to reset it. debug_assert!(core.lifo_enabled); return Ok(core); } // Track that we are about to run a task from the LIFO slot. lifo_polls += 1; super::counters::inc_lifo_schedules(); // Disable the LIFO slot if we reach our limit // // In ping-ping style workloads where task A notifies task B, // which notifies task A again, continuously prioritizing the // LIFO slot can cause starvation as these two tasks will // repeatedly schedule the other. To mitigate this, we limit the // number of times the LIFO slot is prioritized. if lifo_polls >= MAX_LIFO_POLLS_PER_TICK { core.lifo_enabled = false; super::counters::inc_lifo_capped(); } // Run the LIFO task, then loop *self.core.borrow_mut() = Some(core); let task = self.worker.handle.shared.owned.assert_owner(task); #[cfg(tokio_unstable)] let task_meta = task.task_meta(); #[cfg(tokio_unstable)] self.worker .handle .task_hooks .poll_start_callback(&task_meta); task.run(); #[cfg(tokio_unstable)] self.worker.handle.task_hooks.poll_stop_callback(&task_meta); } }) } fn reset_lifo_enabled(&self, core: &mut Core) { core.lifo_enabled = !self.worker.handle.shared.config.disable_lifo_slot; } fn assert_lifo_enabled_is_correct(&self, core: &Core) { debug_assert_eq!( core.lifo_enabled, !self.worker.handle.shared.config.disable_lifo_slot ); } fn maintenance(&self, mut core: Box<Core>) -> Box<Core> { if core.tick % self.worker.handle.shared.config.event_interval == 0 { super::counters::inc_num_maintenance(); core.stats.end_processing_scheduled_tasks(); // Call `park` with a 0 timeout. This enables the I/O driver, timer, ... // to run without actually putting the thread to sleep. core = self.park_yield(core); // Run regularly scheduled maintenance core.maintenance(&self.worker); core.stats.start_processing_scheduled_tasks(); } core } /// Parks the worker thread while waiting for tasks to execute. /// /// This function checks if indeed there's no more work left to be done before parking. /// Also important to notice that, before parking, the worker thread will try to take /// ownership of the Driver (IO/Time) and dispatch any events that might have fired. /// Whenever a worker thread executes the Driver loop, all waken tasks are scheduled /// in its own local queue until the queue saturates (ntasks > `LOCAL_QUEUE_CAPACITY`). /// When the local queue is saturated, the overflow tasks are added to the injection queue /// from where other workers can pick them up. /// Also, we rely on the workstealing algorithm to spread the tasks amongst workers /// after all the IOs get dispatched fn park(&self, mut core: Box<Core>) -> Box<Core> { if let Some(f) = &self.worker.handle.shared.config.before_park { f(); } if core.transition_to_parked(&self.worker) { while !core.is_shutdown && !core.is_traced { core.stats.about_to_park(); core.stats .submit(&self.worker.handle.shared.worker_metrics[self.worker.index]); core = self.park_internal(core, None); core.stats.unparked(); // Run regularly scheduled maintenance core.maintenance(&self.worker); if core.transition_from_parked(&self.worker) { break; } } } if let Some(f) = &self.worker.handle.shared.config.after_unpark { f(); } core } fn park_yield(&self, core: Box<Core>) -> Box<Core> { self.park_internal(core, Some(Duration::from_millis(0))) } fn park_internal(&self, mut core: Box<Core>, duration: Option<Duration>) -> Box<Core> { self.assert_lifo_enabled_is_correct(&core); // Take the parker out of core let mut park = core.park.take().expect("park missing"); // Store `core` in context *self.core.borrow_mut() = Some(core); #[cfg(feature = "time")] let (duration, auto_advance_duration) = match self.worker.handle.timer_flavor { TimerFlavor::Traditional => (duration, None::<Duration>), #[cfg(tokio_unstable)] TimerFlavor::Alternative => { // Must happens after taking out the parker, as the `Handle::schedule_local` // will delay the notify if the parker taken out. // // See comments in `Handle::schedule_local` for more details. let MaintainLocalTimer { park_duration: duration, auto_advance_duration, } = self.maintain_local_timers_before_parking(duration); (duration, auto_advance_duration) } }; // Park thread if let Some(timeout) = duration { park.park_timeout(&self.worker.handle.driver, timeout); } else { park.park(&self.worker.handle.driver); } self.defer.wake(); #[cfg(feature = "time")] match self.worker.handle.timer_flavor { TimerFlavor::Traditional => { // suppress unused variable warning let _ = auto_advance_duration; } #[cfg(tokio_unstable)] TimerFlavor::Alternative => { // Must happens before placing back the parker, as the `Handle::schedule_local` // will delay the notify if the parker is still in `core`. // // See comments in `Handle::schedule_local` for more details. self.maintain_local_timers_after_parking(auto_advance_duration); } } // Remove `core` from context core = self.core.borrow_mut().take().expect("core missing"); // Place `park` back in `core` core.park = Some(park); if core.should_notify_others() { self.worker.handle.notify_parked_local(); } core } pub(crate) fn defer(&self, waker: &Waker) { if self.core.borrow().is_none() { // If there is no core, then the worker is currently in a block_in_place. In this case, // we cannot use the defer queue as we aren't really in the current runtime. waker.wake_by_ref(); } else { self.defer.defer(waker); } } #[cfg(all(tokio_unstable, feature = "time"))] /// Maintain local timers before parking the resource driver. /// /// * Remove cancelled timers from the local timer wheel. /// * Register remote timers to the local timer wheel. /// * Adjust the park duration based on /// * the next timer expiration time. /// * whether auto-advancing is required (feature = "test-util"). /// /// # Returns /// /// `(Box<Core>, park_duration, auto_advance_duration)` fn maintain_local_timers_before_parking( &self, park_duration: Option<Duration>, ) -> MaintainLocalTimer { let handle = &self.worker.handle; let mut wake_queue = time_alt::WakeQueue::new(); let (should_yield, next_timer) = with_current(|maybe_cx| { let cx = maybe_cx.expect("function should be called when core is present"); assert_eq!( Arc::as_ptr(&cx.worker.handle), Arc::as_ptr(&self.worker.handle), "function should be called on the exact same worker" ); let mut maybe_core = cx.core.borrow_mut(); let core = maybe_core.as_mut().expect("core missing"); let time_cx = &mut core.time_context; util::time_alt::process_registration_queue( &mut time_cx.registration_queue, &mut time_cx.wheel, &time_cx.canc_tx, &mut wake_queue, ); util::time_alt::insert_inject_timers( &mut time_cx.wheel, &time_cx.canc_tx, handle.take_remote_timers(), &mut wake_queue, ); util::time_alt::remove_cancelled_timers(&mut time_cx.wheel, &mut time_cx.canc_rx); let should_yield = !wake_queue.is_empty();
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/trace_mock.rs
tokio/src/runtime/scheduler/multi_thread/trace_mock.rs
pub(super) struct TraceStatus {} impl TraceStatus { pub(super) fn new(_: usize) -> Self { Self {} } pub(super) fn trace_requested(&self) -> bool { false } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/mod.rs
tokio/src/runtime/scheduler/multi_thread/mod.rs
//! Multi-threaded runtime mod counters; use counters::Counters; mod handle; pub(crate) use handle::Handle; mod overflow; pub(crate) use overflow::Overflow; mod idle; use self::idle::Idle; mod stats; pub(crate) use stats::Stats; mod park; pub(crate) use park::{Parker, Unparker}; pub(crate) mod queue; mod worker; pub(crate) use worker::{Context, Launch, Shared}; cfg_taskdump! { mod trace; use trace::TraceStatus; pub(crate) use worker::Synced; } cfg_not_taskdump! { mod trace_mock; use trace_mock::TraceStatus; } pub(crate) use worker::block_in_place; use crate::loom::sync::Arc; use crate::runtime::{ blocking, driver::{self, Driver}, scheduler, Config, TimerFlavor, }; use crate::util::RngSeedGenerator; use std::fmt; use std::future::Future; /// Work-stealing based thread pool for executing futures. pub(crate) struct MultiThread; // ===== impl MultiThread ===== impl MultiThread { pub(crate) fn new( size: usize, driver: Driver, driver_handle: driver::Handle, blocking_spawner: blocking::Spawner, seed_generator: RngSeedGenerator, config: Config, timer_flavor: TimerFlavor, ) -> (MultiThread, Arc<Handle>, Launch) { let parker = Parker::new(driver); let (handle, launch) = worker::create( size, parker, driver_handle, blocking_spawner, seed_generator, config, timer_flavor, ); (MultiThread, handle, launch) } /// Blocks the current thread waiting for the future to complete. /// /// The future will execute on the current thread, but all spawned tasks /// will be executed on the thread pool. pub(crate) fn block_on<F>(&self, handle: &scheduler::Handle, future: F) -> F::Output where F: Future, { crate::runtime::context::enter_runtime(handle, true, |blocking| { blocking.block_on(future).expect("failed to park thread") }) } pub(crate) fn shutdown(&mut self, handle: &scheduler::Handle) { match handle { scheduler::Handle::MultiThread(handle) => handle.shutdown(), _ => panic!("expected MultiThread scheduler"), } } } impl fmt::Debug for MultiThread { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("MultiThread").finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/queue.rs
tokio/src/runtime/scheduler/multi_thread/queue.rs
//! Run-queue structures to support a work-stealing scheduler use crate::loom::cell::UnsafeCell; use crate::loom::sync::Arc; use crate::runtime::scheduler::multi_thread::{Overflow, Stats}; use crate::runtime::task; use std::mem::{self, MaybeUninit}; use std::ptr; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; // Use wider integers when possible to increase ABA resilience. // // See issue #5041: <https://github.com/tokio-rs/tokio/issues/5041>. cfg_has_atomic_u64! { type UnsignedShort = u32; type UnsignedLong = u64; type AtomicUnsignedShort = crate::loom::sync::atomic::AtomicU32; type AtomicUnsignedLong = crate::loom::sync::atomic::AtomicU64; } cfg_not_has_atomic_u64! { type UnsignedShort = u16; type UnsignedLong = u32; type AtomicUnsignedShort = crate::loom::sync::atomic::AtomicU16; type AtomicUnsignedLong = crate::loom::sync::atomic::AtomicU32; } /// Producer handle. May only be used from a single thread. pub(crate) struct Local<T: 'static> { inner: Arc<Inner<T>>, } /// Consumer handle. May be used from many threads. pub(crate) struct Steal<T: 'static>(Arc<Inner<T>>); pub(crate) struct Inner<T: 'static> { /// Concurrently updated by many threads. /// /// Contains two `UnsignedShort` values. The `LSB` byte is the "real" head of /// the queue. The `UnsignedShort` in the `MSB` is set by a stealer in process /// of stealing values. It represents the first value being stolen in the /// batch. The `UnsignedShort` indices are intentionally wider than strictly /// required for buffer indexing in order to provide ABA mitigation and make /// it possible to distinguish between full and empty buffers. /// /// When both `UnsignedShort` values are the same, there is no active /// stealer. /// /// Tracking an in-progress stealer prevents a wrapping scenario. head: AtomicUnsignedLong, /// Only updated by producer thread but read by many threads. tail: AtomicUnsignedShort, /// Elements buffer: Box<[UnsafeCell<MaybeUninit<task::Notified<T>>>; LOCAL_QUEUE_CAPACITY]>, } unsafe impl<T> Send for Inner<T> {} unsafe impl<T> Sync for Inner<T> {} #[cfg(not(loom))] const LOCAL_QUEUE_CAPACITY: usize = 256; // Shrink the size of the local queue when using loom. This shouldn't impact // logic, but allows loom to test more edge cases in a reasonable a mount of // time. #[cfg(loom)] const LOCAL_QUEUE_CAPACITY: usize = 4; const MASK: usize = LOCAL_QUEUE_CAPACITY - 1; // Constructing the fixed size array directly is very awkward. The only way to // do it is to repeat `UnsafeCell::new(MaybeUninit::uninit())` 256 times, as // the contents are not Copy. The trick with defining a const doesn't work for // generic types. fn make_fixed_size<T>(buffer: Box<[T]>) -> Box<[T; LOCAL_QUEUE_CAPACITY]> { assert_eq!(buffer.len(), LOCAL_QUEUE_CAPACITY); // safety: We check that the length is correct. unsafe { Box::from_raw(Box::into_raw(buffer).cast()) } } /// Create a new local run-queue pub(crate) fn local<T: 'static>() -> (Steal<T>, Local<T>) { let mut buffer = Vec::with_capacity(LOCAL_QUEUE_CAPACITY); for _ in 0..LOCAL_QUEUE_CAPACITY { buffer.push(UnsafeCell::new(MaybeUninit::uninit())); } let inner = Arc::new(Inner { head: AtomicUnsignedLong::new(0), tail: AtomicUnsignedShort::new(0), buffer: make_fixed_size(buffer.into_boxed_slice()), }); let local = Local { inner: inner.clone(), }; let remote = Steal(inner); (remote, local) } impl<T> Local<T> { /// Returns the number of entries in the queue pub(crate) fn len(&self) -> usize { let (_, head) = unpack(self.inner.head.load(Acquire)); // safety: this is the **only** thread that updates this cell. let tail = unsafe { self.inner.tail.unsync_load() }; len(head, tail) } /// How many tasks can be pushed into the queue pub(crate) fn remaining_slots(&self) -> usize { let (steal, _) = unpack(self.inner.head.load(Acquire)); // safety: this is the **only** thread that updates this cell. let tail = unsafe { self.inner.tail.unsync_load() }; LOCAL_QUEUE_CAPACITY - len(steal, tail) } pub(crate) fn max_capacity(&self) -> usize { LOCAL_QUEUE_CAPACITY } /// Returns false if there are any entries in the queue /// /// Separate to `is_stealable` so that refactors of `is_stealable` to "protect" /// some tasks from stealing won't affect this pub(crate) fn has_tasks(&self) -> bool { self.len() != 0 } /// Pushes a batch of tasks to the back of the queue. All tasks must fit in /// the local queue. /// /// # Panics /// /// The method panics if there is not enough capacity to fit in the queue. pub(crate) fn push_back(&mut self, tasks: impl ExactSizeIterator<Item = task::Notified<T>>) { let len = tasks.len(); assert!(len <= LOCAL_QUEUE_CAPACITY); if len == 0 { // Nothing to do return; } let head = self.inner.head.load(Acquire); let (steal, _) = unpack(head); // safety: this is the **only** thread that updates this cell. let mut tail = unsafe { self.inner.tail.unsync_load() }; if tail.wrapping_sub(steal) <= (LOCAL_QUEUE_CAPACITY - len) as UnsignedShort { // Yes, this if condition is structured a bit weird (first block // does nothing, second returns an error). It is this way to match // `push_back_or_overflow`. } else { panic!() } for task in tasks { let idx = tail as usize & MASK; self.inner.buffer[idx].with_mut(|ptr| { // Write the task to the slot // // Safety: There is only one producer and the above `if` // condition ensures we don't touch a cell if there is a // value, thus no consumer. unsafe { ptr::write((*ptr).as_mut_ptr(), task); } }); tail = tail.wrapping_add(1); } self.inner.tail.store(tail, Release); } /// Pushes a task to the back of the local queue, if there is not enough /// capacity in the queue, this triggers the overflow operation. /// /// When the queue overflows, half of the current contents of the queue is /// moved to the given Injection queue. This frees up capacity for more /// tasks to be pushed into the local queue. pub(crate) fn push_back_or_overflow<O: Overflow<T>>( &mut self, mut task: task::Notified<T>, overflow: &O, stats: &mut Stats, ) { let tail = loop { let head = self.inner.head.load(Acquire); let (steal, real) = unpack(head); // safety: this is the **only** thread that updates this cell. let tail = unsafe { self.inner.tail.unsync_load() }; if tail.wrapping_sub(steal) < LOCAL_QUEUE_CAPACITY as UnsignedShort { // There is capacity for the task break tail; } else if steal != real { // Concurrently stealing, this will free up capacity, so only // push the task onto the inject queue overflow.push(task); return; } else { // Push the current task and half of the queue into the // inject queue. match self.push_overflow(task, real, tail, overflow, stats) { Ok(_) => return, // Lost the race, try again Err(v) => { task = v; } } } }; self.push_back_finish(task, tail); } // Second half of `push_back` fn push_back_finish(&self, task: task::Notified<T>, tail: UnsignedShort) { // Map the position to a slot index. let idx = tail as usize & MASK; self.inner.buffer[idx].with_mut(|ptr| { // Write the task to the slot // // Safety: There is only one producer and the above `if` // condition ensures we don't touch a cell if there is a // value, thus no consumer. unsafe { ptr::write((*ptr).as_mut_ptr(), task); } }); // Make the task available. Synchronizes with a load in // `steal_into2`. self.inner.tail.store(tail.wrapping_add(1), Release); } /// Moves a batch of tasks into the inject queue. /// /// This will temporarily make some of the tasks unavailable to stealers. /// Once `push_overflow` is done, a notification is sent out, so if other /// workers "missed" some of the tasks during a steal, they will get /// another opportunity. #[inline(never)] fn push_overflow<O: Overflow<T>>( &mut self, task: task::Notified<T>, head: UnsignedShort, tail: UnsignedShort, overflow: &O, stats: &mut Stats, ) -> Result<(), task::Notified<T>> { /// How many elements are we taking from the local queue. /// /// This is one less than the number of tasks pushed to the inject /// queue as we are also inserting the `task` argument. const NUM_TASKS_TAKEN: UnsignedShort = (LOCAL_QUEUE_CAPACITY / 2) as UnsignedShort; assert_eq!( tail.wrapping_sub(head) as usize, LOCAL_QUEUE_CAPACITY, "queue is not full; tail = {tail}; head = {head}" ); let prev = pack(head, head); // Claim a bunch of tasks // // We are claiming the tasks **before** reading them out of the buffer. // This is safe because only the **current** thread is able to push new // tasks. // // There isn't really any need for memory ordering... Relaxed would // work. This is because all tasks are pushed into the queue from the // current thread (or memory has been acquired if the local queue handle // moved). if self .inner .head .compare_exchange( prev, pack( head.wrapping_add(NUM_TASKS_TAKEN), head.wrapping_add(NUM_TASKS_TAKEN), ), Release, Relaxed, ) .is_err() { // We failed to claim the tasks, losing the race. Return out of // this function and try the full `push` routine again. The queue // may not be full anymore. return Err(task); } /// An iterator that takes elements out of the run queue. struct BatchTaskIter<'a, T: 'static> { buffer: &'a [UnsafeCell<MaybeUninit<task::Notified<T>>>; LOCAL_QUEUE_CAPACITY], head: UnsignedLong, i: UnsignedLong, } impl<'a, T: 'static> Iterator for BatchTaskIter<'a, T> { type Item = task::Notified<T>; #[inline] fn next(&mut self) -> Option<task::Notified<T>> { if self.i == UnsignedLong::from(NUM_TASKS_TAKEN) { None } else { let i_idx = self.i.wrapping_add(self.head) as usize & MASK; let slot = &self.buffer[i_idx]; // safety: Our CAS from before has assumed exclusive ownership // of the task pointers in this range. let task = slot.with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); self.i += 1; Some(task) } } } // safety: The CAS above ensures that no consumer will look at these // values again, and we are the only producer. let batch_iter = BatchTaskIter { buffer: &self.inner.buffer, head: head as UnsignedLong, i: 0, }; overflow.push_batch(batch_iter.chain(std::iter::once(task))); // Add 1 to factor in the task currently being scheduled. stats.incr_overflow_count(); Ok(()) } /// Pops a task from the local queue. pub(crate) fn pop(&mut self) -> Option<task::Notified<T>> { let mut head = self.inner.head.load(Acquire); let idx = loop { let (steal, real) = unpack(head); // safety: this is the **only** thread that updates this cell. let tail = unsafe { self.inner.tail.unsync_load() }; if real == tail { // queue is empty return None; } let next_real = real.wrapping_add(1); // If `steal == real` there are no concurrent stealers. Both `steal` // and `real` are updated. let next = if steal == real { pack(next_real, next_real) } else { assert_ne!(steal, next_real); pack(steal, next_real) }; // Attempt to claim a task. let res = self .inner .head .compare_exchange(head, next, AcqRel, Acquire); match res { Ok(_) => break real as usize & MASK, Err(actual) => head = actual, } }; Some(self.inner.buffer[idx].with(|ptr| unsafe { ptr::read(ptr).assume_init() })) } } impl<T> Steal<T> { /// Returns the number of entries in the queue pub(crate) fn len(&self) -> usize { let (_, head) = unpack(self.0.head.load(Acquire)); let tail = self.0.tail.load(Acquire); len(head, tail) } /// Return true if the queue is empty, /// false if there are any entries in the queue pub(crate) fn is_empty(&self) -> bool { self.len() == 0 } /// Steals half the tasks from self and place them into `dst`. pub(crate) fn steal_into( &self, dst: &mut Local<T>, dst_stats: &mut Stats, ) -> Option<task::Notified<T>> { // Safety: the caller is the only thread that mutates `dst.tail` and // holds a mutable reference. let dst_tail = unsafe { dst.inner.tail.unsync_load() }; // To the caller, `dst` may **look** empty but still have values // contained in the buffer. If another thread is concurrently stealing // from `dst` there may not be enough capacity to steal. let (steal, _) = unpack(dst.inner.head.load(Acquire)); if dst_tail.wrapping_sub(steal) > LOCAL_QUEUE_CAPACITY as UnsignedShort / 2 { // we *could* try to steal less here, but for simplicity, we're just // going to abort. return None; } // Steal the tasks into `dst`'s buffer. This does not yet expose the // tasks in `dst`. let mut n = self.steal_into2(dst, dst_tail); if n == 0 { // No tasks were stolen return None; } dst_stats.incr_steal_count(n as u16); dst_stats.incr_steal_operations(); // We are returning a task here n -= 1; let ret_pos = dst_tail.wrapping_add(n); let ret_idx = ret_pos as usize & MASK; // safety: the value was written as part of `steal_into2` and not // exposed to stealers, so no other thread can access it. let ret = dst.inner.buffer[ret_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); if n == 0 { // The `dst` queue is empty, but a single task was stolen return Some(ret); } // Make the stolen items available to consumers dst.inner.tail.store(dst_tail.wrapping_add(n), Release); Some(ret) } // Steal tasks from `self`, placing them into `dst`. Returns the number of // tasks that were stolen. fn steal_into2(&self, dst: &mut Local<T>, dst_tail: UnsignedShort) -> UnsignedShort { let mut prev_packed = self.0.head.load(Acquire); let mut next_packed; let n = loop { let (src_head_steal, src_head_real) = unpack(prev_packed); let src_tail = self.0.tail.load(Acquire); // If these two do not match, another thread is concurrently // stealing from the queue. if src_head_steal != src_head_real { return 0; } // Number of available tasks to steal let n = src_tail.wrapping_sub(src_head_real); let n = n - n / 2; if n == 0 { // No tasks available to steal return 0; } // Update the real head index to acquire the tasks. let steal_to = src_head_real.wrapping_add(n); assert_ne!(src_head_steal, steal_to); next_packed = pack(src_head_steal, steal_to); // Claim all those tasks. This is done by incrementing the "real" // head but not the steal. By doing this, no other thread is able to // steal from this queue until the current thread completes. let res = self .0 .head .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); match res { Ok(_) => break n, Err(actual) => prev_packed = actual, } }; assert!( n <= LOCAL_QUEUE_CAPACITY as UnsignedShort / 2, "actual = {n}" ); let (first, _) = unpack(next_packed); // Take all the tasks for i in 0..n { // Compute the positions let src_pos = first.wrapping_add(i); let dst_pos = dst_tail.wrapping_add(i); // Map to slots let src_idx = src_pos as usize & MASK; let dst_idx = dst_pos as usize & MASK; // Read the task // // safety: We acquired the task with the atomic exchange above. let task = self.0.buffer[src_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); // Write the task to the new slot // // safety: `dst` queue is empty and we are the only producer to // this queue. dst.inner.buffer[dst_idx] .with_mut(|ptr| unsafe { ptr::write((*ptr).as_mut_ptr(), task) }); } let mut prev_packed = next_packed; // Update `src_head_steal` to match `src_head_real` signalling that the // stealing routine is complete. loop { let head = unpack(prev_packed).1; next_packed = pack(head, head); let res = self .0 .head .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); match res { Ok(_) => return n, Err(actual) => { let (actual_steal, actual_real) = unpack(actual); assert_ne!(actual_steal, actual_real); prev_packed = actual; } } } } } impl<T> Clone for Steal<T> { fn clone(&self) -> Steal<T> { Steal(self.0.clone()) } } impl<T> Drop for Local<T> { fn drop(&mut self) { if !std::thread::panicking() { assert!(self.pop().is_none(), "queue not empty"); } } } /// Calculate the length of the queue using the head and tail. /// The `head` can be the `steal` or `real` head. fn len(head: UnsignedShort, tail: UnsignedShort) -> usize { tail.wrapping_sub(head) as usize } /// Split the head value into the real head and the index a stealer is working /// on. fn unpack(n: UnsignedLong) -> (UnsignedShort, UnsignedShort) { let real = n & UnsignedShort::MAX as UnsignedLong; let steal = n >> (mem::size_of::<UnsignedShort>() * 8); (steal as UnsignedShort, real as UnsignedShort) } /// Join the two head values fn pack(steal: UnsignedShort, real: UnsignedShort) -> UnsignedLong { (real as UnsignedLong) | ((steal as UnsignedLong) << (mem::size_of::<UnsignedShort>() * 8)) } #[test] fn test_local_queue_capacity() { assert!(LOCAL_QUEUE_CAPACITY - 1 <= u8::MAX as usize); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/handle.rs
tokio/src/runtime/scheduler/multi_thread/handle.rs
use crate::future::Future; use crate::loom::sync::Arc; use crate::runtime::scheduler::multi_thread::worker; use crate::runtime::task::{Notified, Task, TaskHarnessScheduleHooks}; use crate::runtime::{ blocking, driver, task::{self, JoinHandle, SpawnLocation}, TaskHooks, TaskMeta, TimerFlavor, }; use crate::util::RngSeedGenerator; use std::fmt; use std::num::NonZeroU64; mod metrics; cfg_taskdump! { mod taskdump; } #[cfg(all(tokio_unstable, feature = "time"))] use crate::loom::sync::atomic::{AtomicBool, Ordering::SeqCst}; /// Handle to the multi thread scheduler pub(crate) struct Handle { /// Task spawner pub(super) shared: worker::Shared, /// Resource driver handles pub(crate) driver: driver::Handle, /// Blocking pool spawner pub(crate) blocking_spawner: blocking::Spawner, /// Current random number generator seed pub(crate) seed_generator: RngSeedGenerator, /// User-supplied hooks to invoke for things pub(crate) task_hooks: TaskHooks, #[cfg_attr(not(feature = "time"), allow(dead_code))] /// Timer flavor used by the runtime pub(crate) timer_flavor: TimerFlavor, #[cfg(all(tokio_unstable, feature = "time"))] /// Indicates that the runtime is shutting down. pub(crate) is_shutdown: AtomicBool, } impl Handle { /// Spawns a future onto the thread pool pub(crate) fn spawn<F>( me: &Arc<Self>, future: F, id: task::Id, spawned_at: SpawnLocation, ) -> JoinHandle<F::Output> where F: crate::future::Future + Send + 'static, F::Output: Send + 'static, { Self::bind_new_task(me, future, id, spawned_at) } #[cfg(all(tokio_unstable, feature = "time"))] pub(crate) fn is_shutdown(&self) -> bool { self.is_shutdown .load(crate::loom::sync::atomic::Ordering::SeqCst) } pub(crate) fn shutdown(&self) { self.close(); #[cfg(all(tokio_unstable, feature = "time"))] self.is_shutdown.store(true, SeqCst); } #[track_caller] pub(super) fn bind_new_task<T>( me: &Arc<Self>, future: T, id: task::Id, spawned_at: SpawnLocation, ) -> JoinHandle<T::Output> where T: Future + Send + 'static, T::Output: Send + 'static, { let (handle, notified) = me.shared.owned.bind(future, me.clone(), id, spawned_at); me.task_hooks.spawn(&TaskMeta { id, spawned_at, _phantom: Default::default(), }); me.schedule_option_task_without_yield(notified); handle } } impl task::Schedule for Arc<Handle> { fn release(&self, task: &Task<Self>) -> Option<Task<Self>> { self.shared.owned.remove(task) } fn schedule(&self, task: Notified<Self>) { self.schedule_task(task, false); } fn hooks(&self) -> TaskHarnessScheduleHooks { TaskHarnessScheduleHooks { task_terminate_callback: self.task_hooks.task_terminate_callback.clone(), } } fn yield_now(&self, task: Notified<Self>) { self.schedule_task(task, true); } } impl Handle { pub(crate) fn owned_id(&self) -> NonZeroU64 { self.shared.owned.id } } impl fmt::Debug for Handle { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("multi_thread::Handle { ... }").finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/worker/taskdump_mock.rs
tokio/src/runtime/scheduler/multi_thread/worker/taskdump_mock.rs
use super::{Core, Handle}; impl Handle { pub(super) fn trace_core(&self, core: Box<Core>) -> Box<Core> { core } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs
tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs
use super::{Core, Handle, Shared}; use crate::loom::sync::Arc; use crate::runtime::scheduler::multi_thread::Stats; use crate::runtime::task::trace::trace_multi_thread; use crate::runtime::{dump, WorkerMetrics}; use std::time::Duration; impl Handle { pub(super) fn trace_core(&self, mut core: Box<Core>) -> Box<Core> { core.is_traced = false; if core.is_shutdown { return core; } // wait for other workers, or timeout without tracing let timeout = Duration::from_millis(250); // a _very_ generous timeout let barrier = if let Some(barrier) = self.shared.trace_status.trace_start.wait_timeout(timeout) { barrier } else { // don't attempt to trace return core; }; if !barrier.is_leader() { // wait for leader to finish tracing self.shared.trace_status.trace_end.wait(); return core; } // trace let owned = &self.shared.owned; let mut local = self.shared.steal_all(); let synced = &self.shared.synced; let injection = &self.shared.inject; // safety: `trace_multi_thread` is invoked with the same `synced` that `injection` // was created with. let traces = unsafe { trace_multi_thread(owned, &mut local, synced, injection) } .into_iter() .map(|(id, trace)| dump::Task::new(id, trace)) .collect(); let result = dump::Dump::new(traces); // stash the result self.shared.trace_status.stash_result(result); // allow other workers to proceed self.shared.trace_status.trace_end.wait(); core } } impl Shared { /// Steal all tasks from remotes into a single local queue. pub(super) fn steal_all(&self) -> super::queue::Local<Arc<Handle>> { let (_steal, mut local) = super::queue::local(); let worker_metrics = WorkerMetrics::new(); let mut stats = Stats::new(&worker_metrics); for remote in self.remotes.iter() { let steal = &remote.steal; while !steal.is_empty() { if let Some(task) = steal.steal_into(&mut local, &mut stats) { local.push_back([task].into_iter()); } } } local } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/worker/metrics.rs
tokio/src/runtime/scheduler/multi_thread/worker/metrics.rs
use super::Shared; impl Shared { pub(crate) fn injection_queue_depth(&self) -> usize { self.inject.len() } } cfg_unstable_metrics! { impl Shared { pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { self.remotes[worker].steal.len() } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/handle/taskdump.rs
tokio/src/runtime/scheduler/multi_thread/handle/taskdump.rs
use super::Handle; use crate::runtime::Dump; impl Handle { pub(crate) async fn dump(&self) -> Dump { let trace_status = &self.shared.trace_status; // If a dump is in progress, block. trace_status.start_trace_request(self).await; let result = loop { if let Some(result) = trace_status.take_result() { break result; } else { self.notify_all(); trace_status.result_ready.notified().await; } }; // Allow other queued dumps to proceed. trace_status.end_trace_request(self).await; result } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs
tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs
use super::Handle; use crate::runtime::WorkerMetrics; cfg_unstable_metrics! { use crate::runtime::SchedulerMetrics; } impl Handle { pub(crate) fn num_workers(&self) -> usize { self.shared.worker_metrics.len() } pub(crate) fn num_alive_tasks(&self) -> usize { self.shared.owned.num_alive_tasks() } pub(crate) fn injection_queue_depth(&self) -> usize { self.shared.injection_queue_depth() } pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { &self.shared.worker_metrics[worker] } cfg_unstable_metrics! { cfg_64bit_metrics! { pub(crate) fn spawned_tasks_count(&self) -> u64 { self.shared.owned.spawned_tasks_count() } } pub(crate) fn num_blocking_threads(&self) -> usize { // workers are currently spawned using spawn_blocking self.blocking_spawner .num_threads() .saturating_sub(self.num_workers()) } pub(crate) fn num_idle_blocking_threads(&self) -> usize { self.blocking_spawner.num_idle_threads() } pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { &self.shared.scheduler_metrics } pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { self.shared.worker_local_queue_depth(worker) } pub(crate) fn blocking_queue_depth(&self) -> usize { self.blocking_spawner.queue_depth() } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/blocking/shutdown.rs
tokio/src/runtime/blocking/shutdown.rs
//! A shutdown channel. //! //! Each worker holds the `Sender` half. When all the `Sender` halves are //! dropped, the `Receiver` receives a notification. use crate::loom::sync::Arc; use crate::sync::oneshot; use std::time::Duration; #[derive(Debug, Clone)] pub(super) struct Sender { _tx: Arc<oneshot::Sender<()>>, } #[derive(Debug)] pub(super) struct Receiver { rx: oneshot::Receiver<()>, } pub(super) fn channel() -> (Sender, Receiver) { let (tx, rx) = oneshot::channel(); let tx = Sender { _tx: Arc::new(tx) }; let rx = Receiver { rx }; (tx, rx) } impl Receiver { /// Blocks the current thread until all `Sender` handles drop. /// /// If `timeout` is `Some`, the thread is blocked for **at most** `timeout` /// duration. If `timeout` is `None`, then the thread is blocked until the /// shutdown signal is received. /// /// If the timeout has elapsed, it returns `false`, otherwise it returns `true`. pub(crate) fn wait(&mut self, timeout: Option<Duration>) -> bool { use crate::runtime::context::try_enter_blocking_region; if timeout == Some(Duration::from_nanos(0)) { return false; } let mut e = match try_enter_blocking_region() { Some(enter) => enter, _ => { if std::thread::panicking() { // Don't panic in a panic return false; } else { panic!( "Cannot drop a runtime in a context where blocking is not allowed. \ This happens when a runtime is dropped from within an asynchronous context." ); } } }; // The oneshot completes with an Err // // If blocking fails to wait, this indicates a problem parking the // current thread (usually, shutting down a runtime stored in a // thread-local). if let Some(timeout) = timeout { e.block_on_timeout(&mut self.rx, timeout).is_ok() } else { let _ = e.block_on(&mut self.rx); true } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/blocking/schedule.rs
tokio/src/runtime/blocking/schedule.rs
#[cfg(feature = "test-util")] use crate::runtime::scheduler; use crate::runtime::task::{self, Task, TaskHarnessScheduleHooks}; use crate::runtime::Handle; /// `task::Schedule` implementation that does nothing (except some bookkeeping /// in test-util builds). This is unique to the blocking scheduler as tasks /// scheduled are not really futures but blocking operations. /// /// We avoid storing the task by forgetting it in `bind` and re-materializing it /// in `release`. pub(crate) struct BlockingSchedule { #[cfg(feature = "test-util")] handle: Handle, hooks: TaskHarnessScheduleHooks, } impl BlockingSchedule { #[cfg_attr(not(feature = "test-util"), allow(unused_variables))] pub(crate) fn new(handle: &Handle) -> Self { #[cfg(feature = "test-util")] { match &handle.inner { scheduler::Handle::CurrentThread(handle) => { handle.driver.clock.inhibit_auto_advance(); } #[cfg(feature = "rt-multi-thread")] scheduler::Handle::MultiThread(_) => {} } } BlockingSchedule { #[cfg(feature = "test-util")] handle: handle.clone(), hooks: TaskHarnessScheduleHooks { task_terminate_callback: handle.inner.hooks().task_terminate_callback.clone(), }, } } } impl task::Schedule for BlockingSchedule { fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> { #[cfg(feature = "test-util")] { match &self.handle.inner { scheduler::Handle::CurrentThread(handle) => { handle.driver.clock.allow_auto_advance(); handle.driver.unpark(); } #[cfg(feature = "rt-multi-thread")] scheduler::Handle::MultiThread(_) => {} } } None } fn schedule(&self, _task: task::Notified<Self>) { unreachable!(); } fn hooks(&self) -> TaskHarnessScheduleHooks { TaskHarnessScheduleHooks { task_terminate_callback: self.hooks.task_terminate_callback.clone(), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/blocking/mod.rs
tokio/src/runtime/blocking/mod.rs
//! Abstracts out the APIs necessary to `Runtime` for integrating the blocking //! pool. When the `blocking` feature flag is **not** enabled, these APIs are //! shells. This isolates the complexity of dealing with conditional //! compilation. mod pool; pub(crate) use pool::{spawn_blocking, BlockingPool, Spawner}; cfg_fs! { pub(crate) use pool::spawn_mandatory_blocking; } cfg_trace! { pub(crate) use pool::Mandatory; } mod schedule; mod shutdown; mod task; pub(crate) use task::BlockingTask; use crate::runtime::Builder; pub(crate) fn create_blocking_pool(builder: &Builder, thread_cap: usize) -> BlockingPool { BlockingPool::new(builder, thread_cap) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/blocking/task.rs
tokio/src/runtime/blocking/task.rs
use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; /// Converts a function to a future that completes on poll. pub(crate) struct BlockingTask<T> { func: Option<T>, } impl<T> BlockingTask<T> { /// Initializes a new blocking task from the given function. pub(crate) fn new(func: T) -> BlockingTask<T> { BlockingTask { func: Some(func) } } } // The closure `F` is never pinned impl<T> Unpin for BlockingTask<T> {} impl<T, R> Future for BlockingTask<T> where T: FnOnce() -> R + Send + 'static, R: Send + 'static, { type Output = R; fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<R> { let me = &mut *self; let func = me .func .take() .expect("[internal exception] blocking task ran twice."); // This is a little subtle: // For convenience, we'd like _every_ call tokio ever makes to Task::poll() to be budgeted // using coop. However, the way things are currently modeled, even running a blocking task // currently goes through Task::poll(), and so is subject to budgeting. That isn't really // what we want; a blocking task may itself want to run tasks (it might be a Worker!), so // we want it to start without any budgeting. crate::task::coop::stop(); Poll::Ready(func()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/blocking/pool.rs
tokio/src/runtime/blocking/pool.rs
//! Thread pool for blocking operations use crate::loom::sync::{Arc, Condvar, Mutex}; use crate::loom::thread; use crate::runtime::blocking::schedule::BlockingSchedule; use crate::runtime::blocking::{shutdown, BlockingTask}; use crate::runtime::builder::ThreadNameFn; use crate::runtime::task::{self, JoinHandle}; use crate::runtime::{Builder, Callback, Handle, BOX_FUTURE_THRESHOLD}; use crate::util::metric_atomics::MetricAtomicUsize; use crate::util::trace::{blocking_task, SpawnMeta}; use std::collections::{HashMap, VecDeque}; use std::fmt; use std::io; use std::sync::atomic::Ordering; use std::time::Duration; pub(crate) struct BlockingPool { spawner: Spawner, shutdown_rx: shutdown::Receiver, } #[derive(Clone)] pub(crate) struct Spawner { inner: Arc<Inner>, } #[derive(Default)] pub(crate) struct SpawnerMetrics { num_threads: MetricAtomicUsize, num_idle_threads: MetricAtomicUsize, queue_depth: MetricAtomicUsize, } impl SpawnerMetrics { fn num_threads(&self) -> usize { self.num_threads.load(Ordering::Relaxed) } fn num_idle_threads(&self) -> usize { self.num_idle_threads.load(Ordering::Relaxed) } cfg_unstable_metrics! { fn queue_depth(&self) -> usize { self.queue_depth.load(Ordering::Relaxed) } } fn inc_num_threads(&self) { self.num_threads.increment(); } fn dec_num_threads(&self) { self.num_threads.decrement(); } fn inc_num_idle_threads(&self) { self.num_idle_threads.increment(); } fn dec_num_idle_threads(&self) -> usize { self.num_idle_threads.decrement() } fn inc_queue_depth(&self) { self.queue_depth.increment(); } fn dec_queue_depth(&self) { self.queue_depth.decrement(); } } struct Inner { /// State shared between worker threads. shared: Mutex<Shared>, /// Pool threads wait on this. condvar: Condvar, /// Spawned threads use this name. thread_name: ThreadNameFn, /// Spawned thread stack size. stack_size: Option<usize>, /// Call after a thread starts. after_start: Option<Callback>, /// Call before a thread stops. before_stop: Option<Callback>, // Maximum number of threads. thread_cap: usize, // Customizable wait timeout. keep_alive: Duration, // Metrics about the pool. metrics: SpawnerMetrics, } struct Shared { queue: VecDeque<Task>, num_notify: u32, shutdown: bool, shutdown_tx: Option<shutdown::Sender>, /// Prior to shutdown, we clean up `JoinHandles` by having each timed-out /// thread join on the previous timed-out thread. This is not strictly /// necessary but helps avoid Valgrind false positives, see /// <https://github.com/tokio-rs/tokio/commit/646fbae76535e397ef79dbcaacb945d4c829f666> /// for more information. last_exiting_thread: Option<thread::JoinHandle<()>>, /// This holds the `JoinHandles` for all running threads; on shutdown, the thread /// calling shutdown handles joining on these. worker_threads: HashMap<usize, thread::JoinHandle<()>>, /// This is a counter used to iterate `worker_threads` in a consistent order (for loom's /// benefit). worker_thread_index: usize, } pub(crate) struct Task { task: task::UnownedTask<BlockingSchedule>, mandatory: Mandatory, } #[derive(PartialEq, Eq)] pub(crate) enum Mandatory { #[cfg_attr(not(feature = "fs"), allow(dead_code))] Mandatory, NonMandatory, } pub(crate) enum SpawnError { /// Pool is shutting down and the task was not scheduled ShuttingDown, /// There are no worker threads available to take the task /// and the OS failed to spawn a new one NoThreads(io::Error), } impl From<SpawnError> for io::Error { fn from(e: SpawnError) -> Self { match e { SpawnError::ShuttingDown => { io::Error::new(io::ErrorKind::Other, "blocking pool shutting down") } SpawnError::NoThreads(e) => e, } } } impl Task { pub(crate) fn new(task: task::UnownedTask<BlockingSchedule>, mandatory: Mandatory) -> Task { Task { task, mandatory } } fn run(self) { self.task.run(); } fn shutdown_or_run_if_mandatory(self) { match self.mandatory { Mandatory::NonMandatory => self.task.shutdown(), Mandatory::Mandatory => self.task.run(), } } } const KEEP_ALIVE: Duration = Duration::from_secs(10); /// Runs the provided function on an executor dedicated to blocking operations. /// Tasks will be scheduled as non-mandatory, meaning they may not get executed /// in case of runtime shutdown. #[track_caller] #[cfg_attr(target_os = "wasi", allow(dead_code))] pub(crate) fn spawn_blocking<F, R>(func: F) -> JoinHandle<R> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { let rt = Handle::current(); rt.spawn_blocking(func) } cfg_fs! { #[cfg_attr(any( all(loom, not(test)), // the function is covered by loom tests test ), allow(dead_code))] /// Runs the provided function on an executor dedicated to blocking /// operations. Tasks will be scheduled as mandatory, meaning they are /// guaranteed to run unless a shutdown is already taking place. In case a /// shutdown is already taking place, `None` will be returned. pub(crate) fn spawn_mandatory_blocking<F, R>(func: F) -> Option<JoinHandle<R>> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { let rt = Handle::current(); rt.inner.blocking_spawner().spawn_mandatory_blocking(&rt, func) } } // ===== impl BlockingPool ===== impl BlockingPool { pub(crate) fn new(builder: &Builder, thread_cap: usize) -> BlockingPool { let (shutdown_tx, shutdown_rx) = shutdown::channel(); let keep_alive = builder.keep_alive.unwrap_or(KEEP_ALIVE); BlockingPool { spawner: Spawner { inner: Arc::new(Inner { shared: Mutex::new(Shared { queue: VecDeque::new(), num_notify: 0, shutdown: false, shutdown_tx: Some(shutdown_tx), last_exiting_thread: None, worker_threads: HashMap::new(), worker_thread_index: 0, }), condvar: Condvar::new(), thread_name: builder.thread_name.clone(), stack_size: builder.thread_stack_size, after_start: builder.after_start.clone(), before_stop: builder.before_stop.clone(), thread_cap, keep_alive, metrics: SpawnerMetrics::default(), }), }, shutdown_rx, } } pub(crate) fn spawner(&self) -> &Spawner { &self.spawner } pub(crate) fn shutdown(&mut self, timeout: Option<Duration>) { let mut shared = self.spawner.inner.shared.lock(); // The function can be called multiple times. First, by explicitly // calling `shutdown` then by the drop handler calling `shutdown`. This // prevents shutting down twice. if shared.shutdown { return; } shared.shutdown = true; shared.shutdown_tx = None; self.spawner.inner.condvar.notify_all(); let last_exited_thread = std::mem::take(&mut shared.last_exiting_thread); let workers = std::mem::take(&mut shared.worker_threads); drop(shared); if self.shutdown_rx.wait(timeout) { let _ = last_exited_thread.map(thread::JoinHandle::join); // Loom requires that execution be deterministic, so sort by thread ID before joining. // (HashMaps use a randomly-seeded hash function, so the order is nondeterministic) #[cfg(loom)] let workers: Vec<(usize, thread::JoinHandle<()>)> = { let mut workers: Vec<_> = workers.into_iter().collect(); workers.sort_by_key(|(id, _)| *id); workers }; for (_id, handle) in workers { let _ = handle.join(); } } } } impl Drop for BlockingPool { fn drop(&mut self) { self.shutdown(None); } } impl fmt::Debug for BlockingPool { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BlockingPool").finish() } } // ===== impl Spawner ===== impl Spawner { #[track_caller] pub(crate) fn spawn_blocking<F, R>(&self, rt: &Handle, func: F) -> JoinHandle<R> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { let fn_size = std::mem::size_of::<F>(); let (join_handle, spawn_result) = if fn_size > BOX_FUTURE_THRESHOLD { self.spawn_blocking_inner( Box::new(func), Mandatory::NonMandatory, SpawnMeta::new_unnamed(fn_size), rt, ) } else { self.spawn_blocking_inner( func, Mandatory::NonMandatory, SpawnMeta::new_unnamed(fn_size), rt, ) }; match spawn_result { Ok(()) => join_handle, // Compat: do not panic here, return the join_handle even though it will never resolve Err(SpawnError::ShuttingDown) => join_handle, Err(SpawnError::NoThreads(e)) => { panic!("OS can't spawn worker thread: {e}") } } } cfg_fs! { #[track_caller] #[cfg_attr(any( all(loom, not(test)), // the function is covered by loom tests test ), allow(dead_code))] pub(crate) fn spawn_mandatory_blocking<F, R>(&self, rt: &Handle, func: F) -> Option<JoinHandle<R>> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { let fn_size = std::mem::size_of::<F>(); let (join_handle, spawn_result) = if fn_size > BOX_FUTURE_THRESHOLD { self.spawn_blocking_inner( Box::new(func), Mandatory::Mandatory, SpawnMeta::new_unnamed(fn_size), rt, ) } else { self.spawn_blocking_inner( func, Mandatory::Mandatory, SpawnMeta::new_unnamed(fn_size), rt, ) }; if spawn_result.is_ok() { Some(join_handle) } else { None } } } #[track_caller] pub(crate) fn spawn_blocking_inner<F, R>( &self, func: F, is_mandatory: Mandatory, spawn_meta: SpawnMeta<'_>, rt: &Handle, ) -> (JoinHandle<R>, Result<(), SpawnError>) where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { let id = task::Id::next(); let fut = blocking_task::<F, BlockingTask<F>>(BlockingTask::new(func), spawn_meta, id.as_u64()); let (task, handle) = task::unowned( fut, BlockingSchedule::new(rt), id, task::SpawnLocation::capture(), ); let spawned = self.spawn_task(Task::new(task, is_mandatory), rt); (handle, spawned) } fn spawn_task(&self, task: Task, rt: &Handle) -> Result<(), SpawnError> { let mut shared = self.inner.shared.lock(); if shared.shutdown { // Shutdown the task: it's fine to shutdown this task (even if // mandatory) because it was scheduled after the shutdown of the // runtime began. task.task.shutdown(); // no need to even push this task; it would never get picked up return Err(SpawnError::ShuttingDown); } shared.queue.push_back(task); self.inner.metrics.inc_queue_depth(); if self.inner.metrics.num_idle_threads() == 0 { // No threads are able to process the task. if self.inner.metrics.num_threads() == self.inner.thread_cap { // At max number of threads } else { assert!(shared.shutdown_tx.is_some()); let shutdown_tx = shared.shutdown_tx.clone(); if let Some(shutdown_tx) = shutdown_tx { let id = shared.worker_thread_index; match self.spawn_thread(shutdown_tx, rt, id) { Ok(handle) => { self.inner.metrics.inc_num_threads(); shared.worker_thread_index += 1; shared.worker_threads.insert(id, handle); } Err(ref e) if is_temporary_os_thread_error(e) && self.inner.metrics.num_threads() > 0 => { // OS temporarily failed to spawn a new thread. // The task will be picked up eventually by a currently // busy thread. } Err(e) => { // The OS refused to spawn the thread and there is no thread // to pick up the task that has just been pushed to the queue. return Err(SpawnError::NoThreads(e)); } } } } } else { // Notify an idle worker thread. The notification counter // is used to count the needed amount of notifications // exactly. Thread libraries may generate spurious // wakeups, this counter is used to keep us in a // consistent state. self.inner.metrics.dec_num_idle_threads(); shared.num_notify += 1; self.inner.condvar.notify_one(); } Ok(()) } fn spawn_thread( &self, shutdown_tx: shutdown::Sender, rt: &Handle, id: usize, ) -> io::Result<thread::JoinHandle<()>> { let mut builder = thread::Builder::new().name((self.inner.thread_name)()); if let Some(stack_size) = self.inner.stack_size { builder = builder.stack_size(stack_size); } let rt = rt.clone(); builder.spawn(move || { // Only the reference should be moved into the closure let _enter = rt.enter(); rt.inner.blocking_spawner().inner.run(id); drop(shutdown_tx); }) } } cfg_unstable_metrics! { impl Spawner { pub(crate) fn num_threads(&self) -> usize { self.inner.metrics.num_threads() } pub(crate) fn num_idle_threads(&self) -> usize { self.inner.metrics.num_idle_threads() } pub(crate) fn queue_depth(&self) -> usize { self.inner.metrics.queue_depth() } } } // Tells whether the error when spawning a thread is temporary. #[inline] fn is_temporary_os_thread_error(error: &io::Error) -> bool { matches!(error.kind(), io::ErrorKind::WouldBlock) } impl Inner { fn run(&self, worker_thread_id: usize) { if let Some(f) = &self.after_start { f(); } let mut shared = self.shared.lock(); let mut join_on_thread = None; 'main: loop { // BUSY while let Some(task) = shared.queue.pop_front() { self.metrics.dec_queue_depth(); drop(shared); task.run(); shared = self.shared.lock(); } // IDLE self.metrics.inc_num_idle_threads(); while !shared.shutdown { let lock_result = self.condvar.wait_timeout(shared, self.keep_alive).unwrap(); shared = lock_result.0; let timeout_result = lock_result.1; if shared.num_notify != 0 { // We have received a legitimate wakeup, // acknowledge it by decrementing the counter // and transition to the BUSY state. shared.num_notify -= 1; break; } // Even if the condvar "timed out", if the pool is entering the // shutdown phase, we want to perform the cleanup logic. if !shared.shutdown && timeout_result.timed_out() { // We'll join the prior timed-out thread's JoinHandle after dropping the lock. // This isn't done when shutting down, because the thread calling shutdown will // handle joining everything. let my_handle = shared.worker_threads.remove(&worker_thread_id); join_on_thread = std::mem::replace(&mut shared.last_exiting_thread, my_handle); break 'main; } // Spurious wakeup detected, go back to sleep. } if shared.shutdown { // Drain the queue while let Some(task) = shared.queue.pop_front() { self.metrics.dec_queue_depth(); drop(shared); task.shutdown_or_run_if_mandatory(); shared = self.shared.lock(); } // Work was produced, and we "took" it (by decrementing num_notify). // This means that num_idle was decremented once for our wakeup. // But, since we are exiting, we need to "undo" that, as we'll stay idle. self.metrics.inc_num_idle_threads(); // NOTE: Technically we should also do num_notify++ and notify again, // but since we're shutting down anyway, that won't be necessary. break; } } // Thread exit self.metrics.dec_num_threads(); // num_idle should now be tracked exactly, panic // with a descriptive message if it is not the // case. let prev_idle = self.metrics.dec_num_idle_threads(); assert!( prev_idle >= self.metrics.num_idle_threads(), "num_idle_threads underflowed on thread exit" ); if shared.shutdown && self.metrics.num_threads() == 0 { self.condvar.notify_one(); } drop(shared); if let Some(f) = &self.before_stop { f(); } if let Some(handle) = join_on_thread { let _ = handle.join(); } } } impl fmt::Debug for Spawner { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("blocking::Spawner").finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/local_runtime/runtime.rs
tokio/src/runtime/local_runtime/runtime.rs
#![allow(irrefutable_let_patterns)] use crate::runtime::blocking::BlockingPool; use crate::runtime::scheduler::CurrentThread; use crate::runtime::{context, Builder, EnterGuard, Handle, BOX_FUTURE_THRESHOLD}; use crate::task::JoinHandle; use crate::util::trace::SpawnMeta; use std::future::Future; use std::marker::PhantomData; use std::mem; use std::time::Duration; /// A local Tokio runtime. /// /// This runtime is capable of driving tasks which are not `Send + Sync` without the use of a /// `LocalSet`, and thus supports `spawn_local` without the need for a `LocalSet` context. /// /// This runtime cannot be moved between threads or driven from different threads. /// /// This runtime is incompatible with `LocalSet`. You should not attempt to drive a `LocalSet` within a /// `LocalRuntime`. /// /// Currently, this runtime supports one flavor, which is internally identical to `current_thread`, /// save for the aforementioned differences related to `spawn_local`. /// /// For more general information on how to use runtimes, see the [module] docs. /// /// [runtime]: crate::runtime::Runtime /// [module]: crate::runtime #[derive(Debug)] #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] pub struct LocalRuntime { /// Task scheduler scheduler: LocalRuntimeScheduler, /// Handle to runtime, also contains driver handles handle: Handle, /// Blocking pool handle, used to signal shutdown blocking_pool: BlockingPool, /// Marker used to make this !Send and !Sync. _phantom: PhantomData<*mut u8>, } /// The runtime scheduler is always a `current_thread` scheduler right now. #[derive(Debug)] pub(crate) enum LocalRuntimeScheduler { /// Execute all tasks on the current-thread. CurrentThread(CurrentThread), } impl LocalRuntime { pub(crate) fn from_parts( scheduler: LocalRuntimeScheduler, handle: Handle, blocking_pool: BlockingPool, ) -> LocalRuntime { LocalRuntime { scheduler, handle, blocking_pool, _phantom: Default::default(), } } /// Creates a new local runtime instance with default configuration values. /// /// This results in the scheduler, I/O driver, and time driver being /// initialized. /// /// When a more complex configuration is necessary, the [runtime builder] may be used. /// /// See [module level][mod] documentation for more details. /// /// # Examples /// /// Creating a new `LocalRuntime` with default configuration values. /// /// ``` /// use tokio::runtime::LocalRuntime; /// /// let rt = LocalRuntime::new() /// .unwrap(); /// /// // Use the runtime... /// ``` /// /// [mod]: crate::runtime /// [runtime builder]: crate::runtime::Builder pub fn new() -> std::io::Result<LocalRuntime> { Builder::new_current_thread() .enable_all() .build_local(Default::default()) } /// Returns a handle to the runtime's spawner. /// /// The returned handle can be used to spawn tasks that run on this runtime, and can /// be cloned to allow moving the `Handle` to other threads. /// /// As the handle can be sent to other threads, it can only be used to spawn tasks that are `Send`. /// /// Calling [`Handle::block_on`] on a handle to a `LocalRuntime` is error-prone. /// Refer to the documentation of [`Handle::block_on`] for more. /// /// # Examples /// /// ``` /// use tokio::runtime::LocalRuntime; /// /// let rt = LocalRuntime::new() /// .unwrap(); /// /// let handle = rt.handle(); /// /// // Use the handle... /// ``` pub fn handle(&self) -> &Handle { &self.handle } /// Spawns a task on the runtime. /// /// This is analogous to the [`spawn`] method on the standard [`Runtime`], but works even if the task is not thread-safe. /// /// [`spawn`]: crate::runtime::Runtime::spawn /// [`Runtime`]: crate::runtime::Runtime /// /// # Examples /// /// ``` /// use tokio::runtime::LocalRuntime; /// /// # fn dox() { /// // Create the runtime /// let rt = LocalRuntime::new().unwrap(); /// /// // Spawn a future onto the runtime /// rt.spawn_local(async { /// println!("now running on a worker thread"); /// }); /// # } /// ``` #[track_caller] pub fn spawn_local<F>(&self, future: F) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static, { let fut_size = std::mem::size_of::<F>(); let meta = SpawnMeta::new_unnamed(fut_size); // safety: spawn_local can only be called from `LocalRuntime`, which this is unsafe { if std::mem::size_of::<F>() > BOX_FUTURE_THRESHOLD { self.handle.spawn_local_named(Box::pin(future), meta) } else { self.handle.spawn_local_named(future, meta) } } } /// Runs the provided function on a thread from a dedicated blocking thread pool. /// /// This function _will_ be run on another thread. /// /// See the [documentation in the non-local runtime][Runtime] for more /// information. /// /// [Runtime]: crate::runtime::Runtime::spawn_blocking /// /// # Examples /// /// ``` /// use tokio::runtime::LocalRuntime; /// /// # fn dox() { /// // Create the runtime /// let rt = LocalRuntime::new().unwrap(); /// /// // Spawn a blocking function onto the runtime /// rt.spawn_blocking(|| { /// println!("now running on a worker thread"); /// }); /// # } /// ``` #[track_caller] pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { self.handle.spawn_blocking(func) } /// Runs a future to completion on the Tokio runtime. This is the /// runtime's entry point. /// /// See the documentation for [the equivalent method on Runtime][Runtime] /// for more information. /// /// [Runtime]: crate::runtime::Runtime::block_on /// /// # Examples /// /// ```no_run /// use tokio::runtime::LocalRuntime; /// /// // Create the runtime /// let rt = LocalRuntime::new().unwrap(); /// /// // Execute the future, blocking the current thread until completion /// rt.block_on(async { /// println!("hello"); /// }); /// ``` #[track_caller] pub fn block_on<F: Future>(&self, future: F) -> F::Output { let fut_size = mem::size_of::<F>(); let meta = SpawnMeta::new_unnamed(fut_size); if std::mem::size_of::<F>() > BOX_FUTURE_THRESHOLD { self.block_on_inner(Box::pin(future), meta) } else { self.block_on_inner(future, meta) } } #[track_caller] fn block_on_inner<F: Future>(&self, future: F, _meta: SpawnMeta<'_>) -> F::Output { #[cfg(all( tokio_unstable, feature = "taskdump", feature = "rt", target_os = "linux", any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") ))] let future = crate::runtime::task::trace::Trace::root(future); #[cfg(all(tokio_unstable, feature = "tracing"))] let future = crate::util::trace::task( future, "block_on", _meta, crate::runtime::task::Id::next().as_u64(), ); let _enter = self.enter(); if let LocalRuntimeScheduler::CurrentThread(exec) = &self.scheduler { exec.block_on(&self.handle.inner, future) } else { unreachable!("LocalRuntime only supports current_thread") } } /// Enters the runtime context. /// /// This allows you to construct types that must have an executor /// available on creation such as [`Sleep`] or [`TcpStream`]. It will /// also allow you to call methods such as [`tokio::spawn`]. /// /// If this is a handle to a [`LocalRuntime`], and this function is being invoked from the same /// thread that the runtime was created on, you will also be able to call /// [`tokio::task::spawn_local`]. /// /// [`Sleep`]: struct@crate::time::Sleep /// [`TcpStream`]: struct@crate::net::TcpStream /// [`tokio::spawn`]: fn@crate::spawn /// [`LocalRuntime`]: struct@crate::runtime::LocalRuntime /// [`tokio::task::spawn_local`]: fn@crate::task::spawn_local /// /// # Example /// /// ``` /// use tokio::runtime::LocalRuntime; /// use tokio::task::JoinHandle; /// /// fn function_that_spawns(msg: String) -> JoinHandle<()> { /// // Had we not used `rt.enter` below, this would panic. /// tokio::spawn(async move { /// println!("{}", msg); /// }) /// } /// /// fn main() { /// let rt = LocalRuntime::new().unwrap(); /// /// let s = "Hello World!".to_string(); /// /// // By entering the context, we tie `tokio::spawn` to this executor. /// let _guard = rt.enter(); /// let handle = function_that_spawns(s); /// /// // Wait for the task before we end the test. /// rt.block_on(handle).unwrap(); /// } /// ``` pub fn enter(&self) -> EnterGuard<'_> { self.handle.enter() } /// Shuts down the runtime, waiting for at most `duration` for all spawned /// work to stop. /// /// Note that `spawn_blocking` tasks, and only `spawn_blocking` tasks, can get left behind if /// the timeout expires. /// /// See the [struct level documentation](LocalRuntime#shutdown) for more details. /// /// # Examples /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::runtime::LocalRuntime; /// use tokio::task; /// /// use std::thread; /// use std::time::Duration; /// /// fn main() { /// let runtime = LocalRuntime::new().unwrap(); /// /// runtime.block_on(async move { /// task::spawn_blocking(move || { /// thread::sleep(Duration::from_secs(10_000)); /// }); /// }); /// /// runtime.shutdown_timeout(Duration::from_millis(100)); /// } /// # } /// ``` pub fn shutdown_timeout(mut self, duration: Duration) { // Wakeup and shutdown all the worker threads self.handle.inner.shutdown(); self.blocking_pool.shutdown(Some(duration)); } /// Shuts down the runtime, without waiting for any spawned work to stop. /// /// This can be useful if you want to drop a runtime from within another runtime. /// Normally, dropping a runtime will block indefinitely for spawned blocking tasks /// to complete, which would normally not be permitted within an asynchronous context. /// By calling `shutdown_background()`, you can drop the runtime from such a context. /// /// Note however, that because we do not wait for any blocking tasks to complete, this /// may result in a resource leak (in that any blocking tasks are still running until they /// return. No other tasks will leak. /// /// See the [struct level documentation](LocalRuntime#shutdown) for more details. /// /// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`. /// /// ``` /// use tokio::runtime::LocalRuntime; /// /// fn main() { /// let runtime = LocalRuntime::new().unwrap(); /// /// runtime.block_on(async move { /// let inner_runtime = LocalRuntime::new().unwrap(); /// // ... /// inner_runtime.shutdown_background(); /// }); /// } /// ``` pub fn shutdown_background(self) { self.shutdown_timeout(Duration::from_nanos(0)); } /// Returns a view that lets you get information about how the runtime /// is performing. pub fn metrics(&self) -> crate::runtime::RuntimeMetrics { self.handle.metrics() } } impl Drop for LocalRuntime { fn drop(&mut self) { if let LocalRuntimeScheduler::CurrentThread(current_thread) = &mut self.scheduler { // This ensures that tasks spawned on the current-thread // runtime are dropped inside the runtime's context. let _guard = context::try_set_current(&self.handle.inner); current_thread.shutdown(&self.handle.inner); } else { unreachable!("LocalRuntime only supports current-thread") } } } impl std::panic::UnwindSafe for LocalRuntime {} impl std::panic::RefUnwindSafe for LocalRuntime {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/local_runtime/options.rs
tokio/src/runtime/local_runtime/options.rs
use std::marker::PhantomData; /// [`LocalRuntime`]-only config options /// /// Currently, there are no such options, but in the future, things like `!Send + !Sync` hooks may /// be added. /// /// Use `LocalOptions::default()` to create the default set of options. This type is used with /// [`Builder::build_local`]. /// /// [`Builder::build_local`]: crate::runtime::Builder::build_local /// [`LocalRuntime`]: crate::runtime::LocalRuntime #[derive(Default, Debug)] #[non_exhaustive] pub struct LocalOptions { /// Marker used to make this !Send and !Sync. _phantom: PhantomData<*mut u8>, }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/local_runtime/mod.rs
tokio/src/runtime/local_runtime/mod.rs
mod runtime; mod options; pub use options::LocalOptions; pub use runtime::LocalRuntime; pub(super) use runtime::LocalRuntimeScheduler;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/signal/mod.rs
tokio/src/runtime/signal/mod.rs
#![cfg_attr(not(feature = "rt"), allow(dead_code))] //! Signal driver use crate::runtime::{driver, io}; use crate::signal::registry::globals; use mio::net::UnixStream; use std::io::{self as std_io, Read}; use std::sync::{Arc, Weak}; use std::time::Duration; /// Responsible for registering wakeups when an OS signal is received, and /// subsequently dispatching notifications to any signal listeners as appropriate. /// /// Note: this driver relies on having an enabled IO driver in order to listen to /// pipe write wakeups. #[derive(Debug)] pub(crate) struct Driver { /// Thread parker. The `Driver` park implementation delegates to this. io: io::Driver, /// A pipe for receiving wake events from the signal handler receiver: UnixStream, /// Shared state. The driver keeps a strong ref and the handle keeps a weak /// ref. The weak ref is used to check if the driver is still active before /// trying to register a signal handler. inner: Arc<()>, } #[derive(Debug, Default)] pub(crate) struct Handle { /// Paired w/ the `Arc` above and is used to check if the driver is still /// around before attempting to register a signal handler. inner: Weak<()>, } // ===== impl Driver ===== impl Driver { /// Creates a new signal `Driver` instance that delegates wakeups to `park`. pub(crate) fn new(io: io::Driver, io_handle: &io::Handle) -> std_io::Result<Self> { use std::mem::ManuallyDrop; use std::os::unix::io::{AsRawFd, FromRawFd}; // NB: We give each driver a "fresh" receiver file descriptor to avoid // the issues described in alexcrichton/tokio-process#42. // // In the past we would reuse the actual receiver file descriptor and // swallow any errors around double registration of the same descriptor. // I'm not sure if the second (failed) registration simply doesn't end // up receiving wake up notifications, or there could be some race // condition when consuming readiness events, but having distinct // descriptors appears to mitigate this. // // Unfortunately we cannot just use a single global UnixStream instance // either, since we can't assume they will always be registered with the // exact same reactor. // // Mio 0.7 removed `try_clone()` as an API due to unexpected behavior // with registering dups with the same reactor. In this case, duping is // safe as each dup is registered with separate reactors **and** we // only expect at least one dup to receive the notification. // Manually drop as we don't actually own this instance of UnixStream. let receiver_fd = globals().receiver.as_raw_fd(); // safety: there is nothing unsafe about this, but the `from_raw_fd` fn is marked as unsafe. let original = ManuallyDrop::new(unsafe { std::os::unix::net::UnixStream::from_raw_fd(receiver_fd) }); let mut receiver = UnixStream::from_std(original.try_clone()?); io_handle.register_signal_receiver(&mut receiver)?; Ok(Self { io, receiver, inner: Arc::new(()), }) } /// Returns a handle to this event loop which can be sent across threads /// and can be used as a proxy to the event loop itself. pub(crate) fn handle(&self) -> Handle { Handle { inner: Arc::downgrade(&self.inner), } } pub(crate) fn park(&mut self, handle: &driver::Handle) { self.io.park(handle); self.process(); } pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) { self.io.park_timeout(handle, duration); self.process(); } pub(crate) fn shutdown(&mut self, handle: &driver::Handle) { self.io.shutdown(handle); } fn process(&mut self) { // If the signal pipe has not received a readiness event, then there is // nothing else to do. if !self.io.consume_signal_ready() { return; } // Drain the pipe completely so we can receive a new readiness event // if another signal has come in. let mut buf = [0; 128]; #[allow(clippy::unused_io_amount)] loop { match self.receiver.read(&mut buf) { Ok(0) => panic!("EOF on self-pipe"), Ok(_) => continue, // Keep reading Err(e) if e.kind() == std_io::ErrorKind::WouldBlock => break, Err(e) => panic!("Bad read on self-pipe: {e}"), } } // Broadcast any signals which were received globals().broadcast(); } } // ===== impl Handle ===== impl Handle { pub(crate) fn check_inner(&self) -> std_io::Result<()> { if self.inner.strong_count() > 0 { Ok(()) } else { Err(std_io::Error::new( std_io::ErrorKind::Other, "signal driver gone", )) } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/id.rs
tokio/src/runtime/task/id.rs
use crate::runtime::context; use std::{fmt, num::NonZeroU64}; /// An opaque ID that uniquely identifies a task relative to all other currently /// running tasks. /// /// A task's ID may be re-used for another task only once *both* of the /// following happen: /// 1. The task itself exits. /// 2. There is no active [`JoinHandle`] associated with this task. /// /// A [`JoinHandle`] is considered active in the following situations: /// - You are explicitly holding a [`JoinHandle`], [`AbortHandle`], or /// `tokio_util::task::AbortOnDropHandle`. /// - The task is being tracked by a [`JoinSet`] or `tokio_util::task::JoinMap`. /// /// # Notes /// /// - Task IDs are *not* sequential, and do not indicate the order in which /// tasks are spawned, what runtime a task is spawned on, or any other data. /// - The task ID of the currently running task can be obtained from inside the /// task via the [`task::try_id()`](crate::task::try_id()) and /// [`task::id()`](crate::task::id()) functions and from outside the task via /// the [`JoinHandle::id()`](crate::task::JoinHandle::id()) function. /// /// [`JoinHandle`]: crate::task::JoinHandle /// [`AbortHandle`]: crate::task::AbortHandle /// [`JoinSet`]: crate::task::JoinSet #[cfg_attr(docsrs, doc(cfg(all(feature = "rt"))))] #[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)] pub struct Id(pub(crate) NonZeroU64); /// Returns the [`Id`] of the currently running task. /// /// # Panics /// /// This function panics if called from outside a task. Please note that calls /// to `block_on` do not have task IDs, so the method will panic if called from /// within a call to `block_on`. For a version of this function that doesn't /// panic, see [`task::try_id()`](crate::runtime::task::try_id()). /// /// [task ID]: crate::task::Id #[track_caller] pub fn id() -> Id { context::current_task_id().expect("Can't get a task id when not inside a task") } /// Returns the [`Id`] of the currently running task, or `None` if called outside /// of a task. /// /// This function is similar to [`task::id()`](crate::runtime::task::id()), except /// that it returns `None` rather than panicking if called outside of a task /// context. /// /// [task ID]: crate::task::Id #[track_caller] pub fn try_id() -> Option<Id> { context::current_task_id() } impl fmt::Display for Id { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl Id { pub(crate) fn next() -> Self { use crate::loom::sync::atomic::Ordering::Relaxed; use crate::loom::sync::atomic::StaticAtomicU64; #[cfg(all(test, loom))] crate::loom::lazy_static! { static ref NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(1); } #[cfg(not(all(test, loom)))] static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(1); loop { let id = NEXT_ID.fetch_add(1, Relaxed); if let Some(id) = NonZeroU64::new(id) { return Self(id); } } } pub(crate) fn as_u64(&self) -> u64 { self.0.get() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/list.rs
tokio/src/runtime/task/list.rs
//! This module has containers for storing the tasks spawned on a scheduler. The //! `OwnedTasks` container is thread-safe but can only store tasks that //! implement Send. The `LocalOwnedTasks` container is not thread safe, but can //! store non-Send tasks. //! //! The collections can be closed to prevent adding new tasks during shutdown of //! the scheduler with the collection. use crate::future::Future; use crate::loom::cell::UnsafeCell; use crate::runtime::task::{JoinHandle, LocalNotified, Notified, Schedule, SpawnLocation, Task}; use crate::util::linked_list::{Link, LinkedList}; use crate::util::sharded_list; use crate::loom::sync::atomic::{AtomicBool, Ordering}; use std::marker::PhantomData; use std::num::NonZeroU64; // The id from the module below is used to verify whether a given task is stored // in this OwnedTasks, or some other task. The counter starts at one so we can // use `None` for tasks not owned by any list. // // The safety checks in this file can technically be violated if the counter is // overflown, but the checks are not supposed to ever fail unless there is a // bug in Tokio, so we accept that certain bugs would not be caught if the two // mixed up runtimes happen to have the same id. cfg_has_atomic_u64! { use std::sync::atomic::AtomicU64; static NEXT_OWNED_TASKS_ID: AtomicU64 = AtomicU64::new(1); fn get_next_id() -> NonZeroU64 { loop { let id = NEXT_OWNED_TASKS_ID.fetch_add(1, Ordering::Relaxed); if let Some(id) = NonZeroU64::new(id) { return id; } } } } cfg_not_has_atomic_u64! { use std::sync::atomic::AtomicU32; static NEXT_OWNED_TASKS_ID: AtomicU32 = AtomicU32::new(1); fn get_next_id() -> NonZeroU64 { loop { let id = NEXT_OWNED_TASKS_ID.fetch_add(1, Ordering::Relaxed); if let Some(id) = NonZeroU64::new(u64::from(id)) { return id; } } } } pub(crate) struct OwnedTasks<S: 'static> { list: List<S>, pub(crate) id: NonZeroU64, closed: AtomicBool, } type List<S> = sharded_list::ShardedList<Task<S>, <Task<S> as Link>::Target>; pub(crate) struct LocalOwnedTasks<S: 'static> { inner: UnsafeCell<OwnedTasksInner<S>>, pub(crate) id: NonZeroU64, _not_send_or_sync: PhantomData<*const ()>, } struct OwnedTasksInner<S: 'static> { list: LinkedList<Task<S>, <Task<S> as Link>::Target>, closed: bool, } impl<S: 'static> OwnedTasks<S> { pub(crate) fn new(num_cores: usize) -> Self { let shard_size = Self::gen_shared_list_size(num_cores); Self { list: List::new(shard_size), closed: AtomicBool::new(false), id: get_next_id(), } } /// Binds the provided task to this `OwnedTasks` instance. This fails if the /// `OwnedTasks` has been closed. pub(crate) fn bind<T>( &self, task: T, scheduler: S, id: super::Id, spawned_at: SpawnLocation, ) -> (JoinHandle<T::Output>, Option<Notified<S>>) where S: Schedule, T: Future + Send + 'static, T::Output: Send + 'static, { let (task, notified, join) = super::new_task(task, scheduler, id, spawned_at); let notified = unsafe { self.bind_inner(task, notified) }; (join, notified) } /// Bind a task that isn't safe to transfer across thread boundaries. /// /// # Safety /// /// Only use this in `LocalRuntime` where the task cannot move pub(crate) unsafe fn bind_local<T>( &self, task: T, scheduler: S, id: super::Id, spawned_at: SpawnLocation, ) -> (JoinHandle<T::Output>, Option<Notified<S>>) where S: Schedule, T: Future + 'static, T::Output: 'static, { let (task, notified, join) = super::new_task(task, scheduler, id, spawned_at); let notified = unsafe { self.bind_inner(task, notified) }; (join, notified) } /// The part of `bind` that's the same for every type of future. unsafe fn bind_inner(&self, task: Task<S>, notified: Notified<S>) -> Option<Notified<S>> where S: Schedule, { unsafe { // safety: We just created the task, so we have exclusive access // to the field. task.header().set_owner_id(self.id); } let shard = self.list.lock_shard(&task); // Check the closed flag in the lock for ensuring all that tasks // will shut down after the OwnedTasks has been closed. if self.closed.load(Ordering::Acquire) { drop(shard); task.shutdown(); return None; } shard.push(task); Some(notified) } /// Asserts that the given task is owned by this `OwnedTasks` and convert it to /// a `LocalNotified`, giving the thread permission to poll this task. #[inline] pub(crate) fn assert_owner(&self, task: Notified<S>) -> LocalNotified<S> { debug_assert_eq!(task.header().get_owner_id(), Some(self.id)); // safety: All tasks bound to this OwnedTasks are Send, so it is safe // to poll it on this thread no matter what thread we are on. LocalNotified { task: task.0, _not_send: PhantomData, } } /// Shuts down all tasks in the collection. This call also closes the /// collection, preventing new items from being added. /// /// The parameter start determines which shard this method will start at. /// Using different values for each worker thread reduces contention. pub(crate) fn close_and_shutdown_all(&self, start: usize) where S: Schedule, { self.closed.store(true, Ordering::Release); for i in start..self.get_shard_size() + start { loop { let task = self.list.pop_back(i); match task { Some(task) => { task.shutdown(); } None => break, } } } } #[inline] pub(crate) fn get_shard_size(&self) -> usize { self.list.shard_size() } pub(crate) fn num_alive_tasks(&self) -> usize { self.list.len() } cfg_unstable_metrics! { cfg_64bit_metrics! { pub(crate) fn spawned_tasks_count(&self) -> u64 { self.list.added() } } } pub(crate) fn remove(&self, task: &Task<S>) -> Option<Task<S>> { // If the task's owner ID is `None` then it is not part of any list and // doesn't need removing. let task_id = task.header().get_owner_id()?; assert_eq!(task_id, self.id); // safety: We just checked that the provided task is not in some other // linked list. unsafe { self.list.remove(task.header_ptr()) } } pub(crate) fn is_empty(&self) -> bool { self.list.is_empty() } /// Generates the size of the sharded list based on the number of worker threads. /// /// The sharded lock design can effectively alleviate /// lock contention performance problems caused by high concurrency. /// /// However, as the number of shards increases, the memory continuity between /// nodes in the intrusive linked list will diminish. Furthermore, /// the construction time of the sharded list will also increase with a higher number of shards. /// /// Due to the above reasons, we set a maximum value for the shared list size, /// denoted as `MAX_SHARED_LIST_SIZE`. fn gen_shared_list_size(num_cores: usize) -> usize { const MAX_SHARED_LIST_SIZE: usize = 1 << 16; usize::min(MAX_SHARED_LIST_SIZE, num_cores.next_power_of_two() * 4) } } cfg_taskdump! { impl<S: 'static> OwnedTasks<S> { /// Locks the tasks, and calls `f` on an iterator over them. pub(crate) fn for_each<F>(&self, f: F) where F: FnMut(&Task<S>), { self.list.for_each(f); } } } impl<S: 'static> LocalOwnedTasks<S> { pub(crate) fn new() -> Self { Self { inner: UnsafeCell::new(OwnedTasksInner { list: LinkedList::new(), closed: false, }), id: get_next_id(), _not_send_or_sync: PhantomData, } } pub(crate) fn bind<T>( &self, task: T, scheduler: S, id: super::Id, spawned_at: SpawnLocation, ) -> (JoinHandle<T::Output>, Option<Notified<S>>) where S: Schedule, T: Future + 'static, T::Output: 'static, { let (task, notified, join) = super::new_task(task, scheduler, id, spawned_at); unsafe { // safety: We just created the task, so we have exclusive access // to the field. task.header().set_owner_id(self.id); } if self.is_closed() { drop(notified); task.shutdown(); (join, None) } else { self.with_inner(|inner| { inner.list.push_front(task); }); (join, Some(notified)) } } /// Shuts down all tasks in the collection. This call also closes the /// collection, preventing new items from being added. pub(crate) fn close_and_shutdown_all(&self) where S: Schedule, { self.with_inner(|inner| inner.closed = true); while let Some(task) = self.with_inner(|inner| inner.list.pop_back()) { task.shutdown(); } } pub(crate) fn remove(&self, task: &Task<S>) -> Option<Task<S>> { // If the task's owner ID is `None` then it is not part of any list and // doesn't need removing. let task_id = task.header().get_owner_id()?; assert_eq!(task_id, self.id); self.with_inner(|inner| // safety: We just checked that the provided task is not in some // other linked list. unsafe { inner.list.remove(task.header_ptr()) }) } /// Asserts that the given task is owned by this `LocalOwnedTasks` and convert /// it to a `LocalNotified`, giving the thread permission to poll this task. #[inline] pub(crate) fn assert_owner(&self, task: Notified<S>) -> LocalNotified<S> { assert_eq!(task.header().get_owner_id(), Some(self.id)); // safety: The task was bound to this LocalOwnedTasks, and the // LocalOwnedTasks is not Send or Sync, so we are on the right thread // for polling this task. LocalNotified { task: task.0, _not_send: PhantomData, } } #[inline] fn with_inner<F, T>(&self, f: F) -> T where F: FnOnce(&mut OwnedTasksInner<S>) -> T, { // safety: This type is not Sync, so concurrent calls of this method // can't happen. Furthermore, all uses of this method in this file make // sure that they don't call `with_inner` recursively. self.inner.with_mut(|ptr| unsafe { f(&mut *ptr) }) } pub(crate) fn is_closed(&self) -> bool { self.with_inner(|inner| inner.closed) } pub(crate) fn is_empty(&self) -> bool { self.with_inner(|inner| inner.list.is_empty()) } } #[cfg(test)] mod tests { use super::*; // This test may run in parallel with other tests, so we only test that ids // come in increasing order. #[test] fn test_id_not_broken() { let mut last_id = get_next_id(); for _ in 0..1000 { let next_id = get_next_id(); assert!(last_id < next_id); last_id = next_id; } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/state.rs
tokio/src/runtime/task/state.rs
use crate::loom::sync::atomic::AtomicUsize; use std::fmt; use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; pub(super) struct State { val: AtomicUsize, } /// Current state value. #[derive(Copy, Clone)] pub(super) struct Snapshot(usize); type UpdateResult = Result<Snapshot, Snapshot>; /// The task is currently being run. const RUNNING: usize = 0b0001; /// The task is complete. /// /// Once this bit is set, it is never unset. const COMPLETE: usize = 0b0010; /// Extracts the task's lifecycle value from the state. const LIFECYCLE_MASK: usize = 0b11; /// Flag tracking if the task has been pushed into a run queue. const NOTIFIED: usize = 0b100; /// The join handle is still around. const JOIN_INTEREST: usize = 0b1_000; /// A join handle waker has been set. const JOIN_WAKER: usize = 0b10_000; /// The task has been forcibly cancelled. const CANCELLED: usize = 0b100_000; /// All bits. const STATE_MASK: usize = LIFECYCLE_MASK | NOTIFIED | JOIN_INTEREST | JOIN_WAKER | CANCELLED; /// Bits used by the ref count portion of the state. const REF_COUNT_MASK: usize = !STATE_MASK; /// Number of positions to shift the ref count. const REF_COUNT_SHIFT: usize = REF_COUNT_MASK.count_zeros() as usize; /// One ref count. const REF_ONE: usize = 1 << REF_COUNT_SHIFT; /// State a task is initialized with. /// /// A task is initialized with three references: /// /// * A reference that will be stored in an `OwnedTasks` or `LocalOwnedTasks`. /// * A reference that will be sent to the scheduler as an ordinary notification. /// * A reference for the `JoinHandle`. /// /// As the task starts with a `JoinHandle`, `JOIN_INTEREST` is set. /// As the task starts with a `Notified`, `NOTIFIED` is set. const INITIAL_STATE: usize = (REF_ONE * 3) | JOIN_INTEREST | NOTIFIED; #[must_use] pub(super) enum TransitionToRunning { Success, Cancelled, Failed, Dealloc, } #[must_use] pub(super) enum TransitionToIdle { Ok, OkNotified, OkDealloc, Cancelled, } #[must_use] pub(super) enum TransitionToNotifiedByVal { DoNothing, Submit, Dealloc, } #[must_use] pub(crate) enum TransitionToNotifiedByRef { DoNothing, Submit, } #[must_use] pub(super) struct TransitionToJoinHandleDrop { pub(super) drop_waker: bool, pub(super) drop_output: bool, } /// All transitions are performed via RMW operations. This establishes an /// unambiguous modification order. impl State { /// Returns a task's initial state. pub(super) fn new() -> State { // The raw task returned by this method has a ref-count of three. See // the comment on INITIAL_STATE for more. State { val: AtomicUsize::new(INITIAL_STATE), } } /// Loads the current state, establishes `Acquire` ordering. pub(super) fn load(&self) -> Snapshot { Snapshot(self.val.load(Acquire)) } /// Attempts to transition the lifecycle to `Running`. This sets the /// notified bit to false so notifications during the poll can be detected. pub(super) fn transition_to_running(&self) -> TransitionToRunning { self.fetch_update_action(|mut next| { let action; assert!(next.is_notified()); if !next.is_idle() { // This happens if the task is either currently running or if it // has already completed, e.g. if it was cancelled during // shutdown. Consume the ref-count and return. next.ref_dec(); if next.ref_count() == 0 { action = TransitionToRunning::Dealloc; } else { action = TransitionToRunning::Failed; } } else { // We are able to lock the RUNNING bit. next.set_running(); next.unset_notified(); if next.is_cancelled() { action = TransitionToRunning::Cancelled; } else { action = TransitionToRunning::Success; } } (action, Some(next)) }) } /// Transitions the task from `Running` -> `Idle`. /// /// The transition to `Idle` fails if the task has been flagged to be /// cancelled. pub(super) fn transition_to_idle(&self) -> TransitionToIdle { self.fetch_update_action(|curr| { assert!(curr.is_running()); if curr.is_cancelled() { return (TransitionToIdle::Cancelled, None); } let mut next = curr; let action; next.unset_running(); if !next.is_notified() { // Polling the future consumes the ref-count of the Notified. next.ref_dec(); if next.ref_count() == 0 { action = TransitionToIdle::OkDealloc; } else { action = TransitionToIdle::Ok; } } else { // The caller will schedule a new notification, so we create a // new ref-count for the notification. Our own ref-count is kept // for now, and the caller will drop it shortly. next.ref_inc(); action = TransitionToIdle::OkNotified; } (action, Some(next)) }) } /// Transitions the task from `Running` -> `Complete`. pub(super) fn transition_to_complete(&self) -> Snapshot { const DELTA: usize = RUNNING | COMPLETE; let prev = Snapshot(self.val.fetch_xor(DELTA, AcqRel)); assert!(prev.is_running()); assert!(!prev.is_complete()); Snapshot(prev.0 ^ DELTA) } /// Transitions from `Complete` -> `Terminal`, decrementing the reference /// count the specified number of times. /// /// Returns true if the task should be deallocated. pub(super) fn transition_to_terminal(&self, count: usize) -> bool { let prev = Snapshot(self.val.fetch_sub(count * REF_ONE, AcqRel)); assert!( prev.ref_count() >= count, "current: {}, sub: {}", prev.ref_count(), count ); prev.ref_count() == count } /// Transitions the state to `NOTIFIED`. /// /// If no task needs to be submitted, a ref-count is consumed. /// /// If a task needs to be submitted, the ref-count is incremented for the /// new Notified. pub(super) fn transition_to_notified_by_val(&self) -> TransitionToNotifiedByVal { self.fetch_update_action(|mut snapshot| { let action; if snapshot.is_running() { // If the task is running, we mark it as notified, but we should // not submit anything as the thread currently running the // future is responsible for that. snapshot.set_notified(); snapshot.ref_dec(); // The thread that set the running bit also holds a ref-count. assert!(snapshot.ref_count() > 0); action = TransitionToNotifiedByVal::DoNothing; } else if snapshot.is_complete() || snapshot.is_notified() { // We do not need to submit any notifications, but we have to // decrement the ref-count. snapshot.ref_dec(); if snapshot.ref_count() == 0 { action = TransitionToNotifiedByVal::Dealloc; } else { action = TransitionToNotifiedByVal::DoNothing; } } else { // We create a new notified that we can submit. The caller // retains ownership of the ref-count they passed in. snapshot.set_notified(); snapshot.ref_inc(); action = TransitionToNotifiedByVal::Submit; } (action, Some(snapshot)) }) } /// Transitions the state to `NOTIFIED`. pub(super) fn transition_to_notified_by_ref(&self) -> TransitionToNotifiedByRef { self.fetch_update_action(|mut snapshot| { if snapshot.is_complete() { // The complete state is final (TransitionToNotifiedByRef::DoNothing, None) } else if snapshot.is_notified() { // Even hough we have nothing to do in this branch, // wake_by_ref() should synchronize-with the task starting execution, // therefore we must use an Release store (with the same value), // to pair with the Acquire in transition_to_running. (TransitionToNotifiedByRef::DoNothing, Some(snapshot)) } else if snapshot.is_running() { // If the task is running, we mark it as notified, but we should // not submit as the thread currently running the future is // responsible for that. snapshot.set_notified(); (TransitionToNotifiedByRef::DoNothing, Some(snapshot)) } else { // The task is idle and not notified. We should submit a // notification. snapshot.set_notified(); snapshot.ref_inc(); (TransitionToNotifiedByRef::Submit, Some(snapshot)) } }) } /// Transitions the state to `NOTIFIED`, unconditionally increasing the ref /// count. /// /// Returns `true` if the notified bit was transitioned from `0` to `1`; /// otherwise `false.` #[cfg(all( tokio_unstable, feature = "taskdump", feature = "rt", target_os = "linux", any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") ))] pub(super) fn transition_to_notified_for_tracing(&self) -> bool { self.fetch_update_action(|mut snapshot| { if snapshot.is_notified() { (false, None) } else { snapshot.set_notified(); snapshot.ref_inc(); (true, Some(snapshot)) } }) } /// Sets the cancelled bit and transitions the state to `NOTIFIED` if idle. /// /// Returns `true` if the task needs to be submitted to the pool for /// execution. pub(super) fn transition_to_notified_and_cancel(&self) -> bool { self.fetch_update_action(|mut snapshot| { if snapshot.is_cancelled() || snapshot.is_complete() { // Aborts to completed or cancelled tasks are no-ops. (false, None) } else if snapshot.is_running() { // If the task is running, we mark it as cancelled. The thread // running the task will notice the cancelled bit when it // stops polling and it will kill the task. // // The set_notified() call is not strictly necessary but it will // in some cases let a wake_by_ref call return without having // to perform a compare_exchange. snapshot.set_notified(); snapshot.set_cancelled(); (false, Some(snapshot)) } else { // The task is idle. We set the cancelled and notified bits and // submit a notification if the notified bit was not already // set. snapshot.set_cancelled(); if !snapshot.is_notified() { snapshot.set_notified(); snapshot.ref_inc(); (true, Some(snapshot)) } else { (false, Some(snapshot)) } } }) } /// Sets the `CANCELLED` bit and attempts to transition to `Running`. /// /// Returns `true` if the transition to `Running` succeeded. pub(super) fn transition_to_shutdown(&self) -> bool { let mut prev = Snapshot(0); let _ = self.fetch_update(|mut snapshot| { prev = snapshot; if snapshot.is_idle() { snapshot.set_running(); } // If the task was not idle, the thread currently running the task // will notice the cancelled bit and cancel it once the poll // completes. snapshot.set_cancelled(); Some(snapshot) }); prev.is_idle() } /// Optimistically tries to swap the state assuming the join handle is /// __immediately__ dropped on spawn. pub(super) fn drop_join_handle_fast(&self) -> Result<(), ()> { use std::sync::atomic::Ordering::Relaxed; // Relaxed is acceptable as if this function is called and succeeds, // then nothing has been done w/ the join handle. // // The moment the join handle is used (polled), the `JOIN_WAKER` flag is // set, at which point the CAS will fail. // // Given this, there is no risk if this operation is reordered. self.val .compare_exchange_weak( INITIAL_STATE, (INITIAL_STATE - REF_ONE) & !JOIN_INTEREST, Release, Relaxed, ) .map(|_| ()) .map_err(|_| ()) } /// Unsets the `JOIN_INTEREST` flag. If `COMPLETE` is not set, the `JOIN_WAKER` /// flag is also unset. /// The returned `TransitionToJoinHandleDrop` indicates whether the `JoinHandle` should drop /// the output of the future or the join waker after the transition. pub(super) fn transition_to_join_handle_dropped(&self) -> TransitionToJoinHandleDrop { self.fetch_update_action(|mut snapshot| { assert!(snapshot.is_join_interested()); let mut transition = TransitionToJoinHandleDrop { drop_waker: false, drop_output: false, }; snapshot.unset_join_interested(); if !snapshot.is_complete() { // If `COMPLETE` is unset we also unset `JOIN_WAKER` to give the // `JoinHandle` exclusive access to the waker following rule 6 in task/mod.rs. // The `JoinHandle` will drop the waker if it has exclusive access // to drop it. snapshot.unset_join_waker(); } else { // If `COMPLETE` is set the task is completed so the `JoinHandle` is responsible // for dropping the output. transition.drop_output = true; } if !snapshot.is_join_waker_set() { // If the `JOIN_WAKER` bit is unset and the `JOIN_HANDLE` has exclusive access to // the join waker and should drop it following this transition. // This might happen in two situations: // 1. The task is not completed and we just unset the `JOIN_WAKer` above in this // function. // 2. The task is completed. In that case the `JOIN_WAKER` bit was already unset // by the runtime during completion. transition.drop_waker = true; } (transition, Some(snapshot)) }) } /// Sets the `JOIN_WAKER` bit. /// /// Returns `Ok` if the bit is set, `Err` otherwise. This operation fails if /// the task has completed. pub(super) fn set_join_waker(&self) -> UpdateResult { self.fetch_update(|curr| { assert!(curr.is_join_interested()); assert!(!curr.is_join_waker_set()); if curr.is_complete() { return None; } let mut next = curr; next.set_join_waker(); Some(next) }) } /// Unsets the `JOIN_WAKER` bit. /// /// Returns `Ok` has been unset, `Err` otherwise. This operation fails if /// the task has completed. pub(super) fn unset_waker(&self) -> UpdateResult { self.fetch_update(|curr| { assert!(curr.is_join_interested()); if curr.is_complete() { return None; } // If the task is completed, this bit may have been unset by // `unset_waker_after_complete`. assert!(curr.is_join_waker_set()); let mut next = curr; next.unset_join_waker(); Some(next) }) } /// Unsets the `JOIN_WAKER` bit unconditionally after task completion. /// /// This operation requires the task to be completed. pub(super) fn unset_waker_after_complete(&self) -> Snapshot { let prev = Snapshot(self.val.fetch_and(!JOIN_WAKER, AcqRel)); assert!(prev.is_complete()); assert!(prev.is_join_waker_set()); Snapshot(prev.0 & !JOIN_WAKER) } pub(super) fn ref_inc(&self) { use std::process; use std::sync::atomic::Ordering::Relaxed; // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let prev = self.val.fetch_add(REF_ONE, Relaxed); // If the reference count overflowed, abort. if prev > isize::MAX as usize { process::abort(); } } /// Returns `true` if the task should be released. pub(super) fn ref_dec(&self) -> bool { let prev = Snapshot(self.val.fetch_sub(REF_ONE, AcqRel)); assert!(prev.ref_count() >= 1); prev.ref_count() == 1 } /// Returns `true` if the task should be released. pub(super) fn ref_dec_twice(&self) -> bool { let prev = Snapshot(self.val.fetch_sub(2 * REF_ONE, AcqRel)); assert!(prev.ref_count() >= 2); prev.ref_count() == 2 } fn fetch_update_action<F, T>(&self, mut f: F) -> T where F: FnMut(Snapshot) -> (T, Option<Snapshot>), { let mut curr = self.load(); loop { let (output, next) = f(curr); let next = match next { Some(next) => next, None => return output, }; let res = self.val.compare_exchange(curr.0, next.0, AcqRel, Acquire); match res { Ok(_) => return output, Err(actual) => curr = Snapshot(actual), } } } fn fetch_update<F>(&self, mut f: F) -> Result<Snapshot, Snapshot> where F: FnMut(Snapshot) -> Option<Snapshot>, { let mut curr = self.load(); loop { let next = match f(curr) { Some(next) => next, None => return Err(curr), }; let res = self.val.compare_exchange(curr.0, next.0, AcqRel, Acquire); match res { Ok(_) => return Ok(next), Err(actual) => curr = Snapshot(actual), } } } } // ===== impl Snapshot ===== impl Snapshot { /// Returns `true` if the task is in an idle state. pub(super) fn is_idle(self) -> bool { self.0 & (RUNNING | COMPLETE) == 0 } /// Returns `true` if the task has been flagged as notified. pub(super) fn is_notified(self) -> bool { self.0 & NOTIFIED == NOTIFIED } fn unset_notified(&mut self) { self.0 &= !NOTIFIED; } fn set_notified(&mut self) { self.0 |= NOTIFIED; } pub(super) fn is_running(self) -> bool { self.0 & RUNNING == RUNNING } fn set_running(&mut self) { self.0 |= RUNNING; } fn unset_running(&mut self) { self.0 &= !RUNNING; } pub(super) fn is_cancelled(self) -> bool { self.0 & CANCELLED == CANCELLED } fn set_cancelled(&mut self) { self.0 |= CANCELLED; } /// Returns `true` if the task's future has completed execution. pub(super) fn is_complete(self) -> bool { self.0 & COMPLETE == COMPLETE } pub(super) fn is_join_interested(self) -> bool { self.0 & JOIN_INTEREST == JOIN_INTEREST } fn unset_join_interested(&mut self) { self.0 &= !JOIN_INTEREST; } pub(super) fn is_join_waker_set(self) -> bool { self.0 & JOIN_WAKER == JOIN_WAKER } fn set_join_waker(&mut self) { self.0 |= JOIN_WAKER; } fn unset_join_waker(&mut self) { self.0 &= !JOIN_WAKER; } pub(super) fn ref_count(self) -> usize { (self.0 & REF_COUNT_MASK) >> REF_COUNT_SHIFT } fn ref_inc(&mut self) { assert!(self.0 <= isize::MAX as usize); self.0 += REF_ONE; } pub(super) fn ref_dec(&mut self) { assert!(self.ref_count() > 0); self.0 -= REF_ONE; } } impl fmt::Debug for State { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let snapshot = self.load(); snapshot.fmt(fmt) } } impl fmt::Debug for Snapshot { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Snapshot") .field("is_running", &self.is_running()) .field("is_complete", &self.is_complete()) .field("is_notified", &self.is_notified()) .field("is_cancelled", &self.is_cancelled()) .field("is_join_interested", &self.is_join_interested()) .field("is_join_waker_set", &self.is_join_waker_set()) .field("ref_count", &self.ref_count()) .finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/core.rs
tokio/src/runtime/task/core.rs
//! Core task module. //! //! # Safety //! //! The functions in this module are private to the `task` module. All of them //! should be considered `unsafe` to use, but are not marked as such since it //! would be too noisy. //! //! Make sure to consult the relevant safety section of each function before //! use. // It doesn't make sense to enforce `unsafe_op_in_unsafe_fn` for this module because // // * This module is doing the low-level task management that requires tons of unsafe // operations. // * Excessive `unsafe {}` blocks hurt readability significantly. // TODO: replace with `#[expect(unsafe_op_in_unsafe_fn)]` after bumpping // the MSRV to 1.81.0. #![allow(unsafe_op_in_unsafe_fn)] use crate::future::Future; use crate::loom::cell::UnsafeCell; use crate::runtime::context; use crate::runtime::task::raw::{self, Vtable}; use crate::runtime::task::state::State; use crate::runtime::task::{Id, Schedule, TaskHarnessScheduleHooks}; use crate::util::linked_list; use std::num::NonZeroU64; #[cfg(tokio_unstable)] use std::panic::Location; use std::pin::Pin; use std::ptr::NonNull; use std::task::{Context, Poll, Waker}; /// The task cell. Contains the components of the task. /// /// It is critical for `Header` to be the first field as the task structure will /// be referenced by both *mut Cell and *mut Header. /// /// Any changes to the layout of this struct _must_ also be reflected in the /// `const` fns in raw.rs. /// // # This struct should be cache padded to avoid false sharing. The cache padding rules are copied // from crossbeam-utils/src/cache_padded.rs // // Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache // lines at a time, so we have to align to 128 bytes rather than 64. // // Sources: // - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf // - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 // // ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. // // Sources: // - https://www.mono-project.com/news/2016/09/12/arm64-icache/ // // powerpc64 has 128-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 #[cfg_attr( any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", ), repr(align(128)) )] // arm, mips, mips64, sparc, and hexagon have 32-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12 #[cfg_attr( any( target_arch = "arm", target_arch = "mips", target_arch = "mips64", target_arch = "sparc", target_arch = "hexagon", ), repr(align(32)) )] // m68k has 16-byte cache line size. // // Sources: // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9 #[cfg_attr(target_arch = "m68k", repr(align(16)))] // s390x has 256-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13 #[cfg_attr(target_arch = "s390x", repr(align(256)))] // x86, riscv, wasm, and sparc64 have 64-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 // // All others are assumed to have 64-byte cache line size. #[cfg_attr( not(any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "arm", target_arch = "mips", target_arch = "mips64", target_arch = "sparc", target_arch = "hexagon", target_arch = "m68k", target_arch = "s390x", )), repr(align(64)) )] #[repr(C)] pub(super) struct Cell<T: Future, S> { /// Hot task state data pub(super) header: Header, /// Either the future or output, depending on the execution stage. pub(super) core: Core<T, S>, /// Cold data pub(super) trailer: Trailer, } pub(super) struct CoreStage<T: Future> { stage: UnsafeCell<Stage<T>>, } /// The core of the task. /// /// Holds the future or output, depending on the stage of execution. /// /// Any changes to the layout of this struct _must_ also be reflected in the /// `const` fns in raw.rs. #[repr(C)] pub(super) struct Core<T: Future, S> { /// Scheduler used to drive this future. pub(super) scheduler: S, /// The task's ID, used for populating `JoinError`s. pub(super) task_id: Id, /// The source code location where the task was spawned. /// /// This is used for populating the `TaskMeta` passed to the task runtime /// hooks. #[cfg(tokio_unstable)] pub(super) spawned_at: &'static Location<'static>, /// Either the future or the output. pub(super) stage: CoreStage<T>, } /// Crate public as this is also needed by the pool. #[repr(C)] pub(crate) struct Header { /// Task state. pub(super) state: State, /// Pointer to next task, used with the injection queue. pub(super) queue_next: UnsafeCell<Option<NonNull<Header>>>, /// Table of function pointers for executing actions on the task. pub(super) vtable: &'static Vtable, /// This integer contains the id of the `OwnedTasks` or `LocalOwnedTasks` /// that this task is stored in. If the task is not in any list, should be /// the id of the list that it was previously in, or `None` if it has never /// been in any list. /// /// Once a task has been bound to a list, it can never be bound to another /// list, even if removed from the first list. /// /// The id is not unset when removed from a list because we want to be able /// to read the id without synchronization, even if it is concurrently being /// removed from the list. pub(super) owner_id: UnsafeCell<Option<NonZeroU64>>, /// The tracing ID for this instrumented task. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) tracing_id: Option<tracing::Id>, } unsafe impl Send for Header {} unsafe impl Sync for Header {} /// Cold data is stored after the future. Data is considered cold if it is only /// used during creation or shutdown of the task. pub(super) struct Trailer { /// Pointers for the linked list in the `OwnedTasks` that owns this task. pub(super) owned: linked_list::Pointers<Header>, /// Consumer task waiting on completion of this task. pub(super) waker: UnsafeCell<Option<Waker>>, /// Optional hooks needed in the harness. #[cfg_attr(not(tokio_unstable), allow(dead_code))] //TODO: remove when hooks are stabilized pub(super) hooks: TaskHarnessScheduleHooks, } generate_addr_of_methods! { impl<> Trailer { pub(super) unsafe fn addr_of_owned(self: NonNull<Self>) -> NonNull<linked_list::Pointers<Header>> { &self.owned } } } /// Either the future or the output. #[repr(C)] // https://github.com/rust-lang/miri/issues/3780 pub(super) enum Stage<T: Future> { Running(T), Finished(super::Result<T::Output>), Consumed, } impl<T: Future, S: Schedule> Cell<T, S> { /// Allocates a new task cell, containing the header, trailer, and core /// structures. pub(super) fn new( future: T, scheduler: S, state: State, task_id: Id, #[cfg(tokio_unstable)] spawned_at: &'static Location<'static>, ) -> Box<Cell<T, S>> { // Separated into a non-generic function to reduce LLVM codegen fn new_header( state: State, vtable: &'static Vtable, #[cfg(all(tokio_unstable, feature = "tracing"))] tracing_id: Option<tracing::Id>, ) -> Header { Header { state, queue_next: UnsafeCell::new(None), vtable, owner_id: UnsafeCell::new(None), #[cfg(all(tokio_unstable, feature = "tracing"))] tracing_id, } } #[cfg(all(tokio_unstable, feature = "tracing"))] let tracing_id = future.id(); let vtable = raw::vtable::<T, S>(); let result = Box::new(Cell { trailer: Trailer::new(scheduler.hooks()), header: new_header( state, vtable, #[cfg(all(tokio_unstable, feature = "tracing"))] tracing_id, ), core: Core { scheduler, stage: CoreStage { stage: UnsafeCell::new(Stage::Running(future)), }, task_id, #[cfg(tokio_unstable)] spawned_at, }, }); #[cfg(debug_assertions)] { // Using a separate function for this code avoids instantiating it separately for every `T`. unsafe fn check<S>( header: &Header, trailer: &Trailer, scheduler: &S, task_id: &Id, #[cfg(tokio_unstable)] spawn_location: &&'static Location<'static>, ) { let trailer_addr = trailer as *const Trailer as usize; let trailer_ptr = unsafe { Header::get_trailer(NonNull::from(header)) }; assert_eq!(trailer_addr, trailer_ptr.as_ptr() as usize); let scheduler_addr = scheduler as *const S as usize; let scheduler_ptr = unsafe { Header::get_scheduler::<S>(NonNull::from(header)) }; assert_eq!(scheduler_addr, scheduler_ptr.as_ptr() as usize); let id_addr = task_id as *const Id as usize; let id_ptr = unsafe { Header::get_id_ptr(NonNull::from(header)) }; assert_eq!(id_addr, id_ptr.as_ptr() as usize); #[cfg(tokio_unstable)] { let spawn_location_addr = spawn_location as *const &'static Location<'static> as usize; let spawn_location_ptr = unsafe { Header::get_spawn_location_ptr(NonNull::from(header)) }; assert_eq!(spawn_location_addr, spawn_location_ptr.as_ptr() as usize); } } unsafe { check( &result.header, &result.trailer, &result.core.scheduler, &result.core.task_id, #[cfg(tokio_unstable)] &result.core.spawned_at, ); } } result } } impl<T: Future> CoreStage<T> { pub(super) fn with_mut<R>(&self, f: impl FnOnce(*mut Stage<T>) -> R) -> R { self.stage.with_mut(f) } } /// Set and clear the task id in the context when the future is executed or /// dropped, or when the output produced by the future is dropped. pub(crate) struct TaskIdGuard { parent_task_id: Option<Id>, } impl TaskIdGuard { fn enter(id: Id) -> Self { TaskIdGuard { parent_task_id: context::set_current_task_id(Some(id)), } } } impl Drop for TaskIdGuard { fn drop(&mut self) { context::set_current_task_id(self.parent_task_id); } } impl<T: Future, S: Schedule> Core<T, S> { /// Polls the future. /// /// # Safety /// /// The caller must ensure it is safe to mutate the `state` field. This /// requires ensuring mutual exclusion between any concurrent thread that /// might modify the future or output field. /// /// The mutual exclusion is implemented by `Harness` and the `Lifecycle` /// component of the task state. /// /// `self` must also be pinned. This is handled by storing the task on the /// heap. pub(super) fn poll(&self, mut cx: Context<'_>) -> Poll<T::Output> { let res = { self.stage.stage.with_mut(|ptr| { // Safety: The caller ensures mutual exclusion to the field. let future = match unsafe { &mut *ptr } { Stage::Running(future) => future, _ => unreachable!("unexpected stage"), }; // Safety: The caller ensures the future is pinned. let future = unsafe { Pin::new_unchecked(future) }; let _guard = TaskIdGuard::enter(self.task_id); future.poll(&mut cx) }) }; if res.is_ready() { self.drop_future_or_output(); } res } /// Drops the future. /// /// # Safety /// /// The caller must ensure it is safe to mutate the `stage` field. pub(super) fn drop_future_or_output(&self) { // Safety: the caller ensures mutual exclusion to the field. unsafe { self.set_stage(Stage::Consumed); } } /// Stores the task output. /// /// # Safety /// /// The caller must ensure it is safe to mutate the `stage` field. pub(super) fn store_output(&self, output: super::Result<T::Output>) { // Safety: the caller ensures mutual exclusion to the field. unsafe { self.set_stage(Stage::Finished(output)); } } /// Takes the task output. /// /// # Safety /// /// The caller must ensure it is safe to mutate the `stage` field. pub(super) fn take_output(&self) -> super::Result<T::Output> { use std::mem; self.stage.stage.with_mut(|ptr| { // Safety:: the caller ensures mutual exclusion to the field. match mem::replace(unsafe { &mut *ptr }, Stage::Consumed) { Stage::Finished(output) => output, _ => panic!("JoinHandle polled after completion"), } }) } unsafe fn set_stage(&self, stage: Stage<T>) { let _guard = TaskIdGuard::enter(self.task_id); self.stage.stage.with_mut(|ptr| *ptr = stage); } } impl Header { pub(super) unsafe fn set_next(&self, next: Option<NonNull<Header>>) { self.queue_next.with_mut(|ptr| *ptr = next); } // safety: The caller must guarantee exclusive access to this field, and // must ensure that the id is either `None` or the id of the OwnedTasks // containing this task. pub(super) unsafe fn set_owner_id(&self, owner: NonZeroU64) { self.owner_id.with_mut(|ptr| *ptr = Some(owner)); } pub(super) fn get_owner_id(&self) -> Option<NonZeroU64> { // safety: If there are concurrent writes, then that write has violated // the safety requirements on `set_owner_id`. unsafe { self.owner_id.with(|ptr| *ptr) } } /// Gets a pointer to the `Trailer` of the task containing this `Header`. /// /// # Safety /// /// The provided raw pointer must point at the header of a task. pub(super) unsafe fn get_trailer(me: NonNull<Header>) -> NonNull<Trailer> { let offset = me.as_ref().vtable.trailer_offset; let trailer = me.as_ptr().cast::<u8>().add(offset).cast::<Trailer>(); NonNull::new_unchecked(trailer) } /// Gets a pointer to the scheduler of the task containing this `Header`. /// /// # Safety /// /// The provided raw pointer must point at the header of a task. /// /// The generic type S must be set to the correct scheduler type for this /// task. pub(super) unsafe fn get_scheduler<S>(me: NonNull<Header>) -> NonNull<S> { let offset = me.as_ref().vtable.scheduler_offset; let scheduler = me.as_ptr().cast::<u8>().add(offset).cast::<S>(); NonNull::new_unchecked(scheduler) } /// Gets a pointer to the id of the task containing this `Header`. /// /// # Safety /// /// The provided raw pointer must point at the header of a task. pub(super) unsafe fn get_id_ptr(me: NonNull<Header>) -> NonNull<Id> { let offset = me.as_ref().vtable.id_offset; let id = me.as_ptr().cast::<u8>().add(offset).cast::<Id>(); NonNull::new_unchecked(id) } /// Gets the id of the task containing this `Header`. /// /// # Safety /// /// The provided raw pointer must point at the header of a task. pub(super) unsafe fn get_id(me: NonNull<Header>) -> Id { let ptr = Header::get_id_ptr(me).as_ptr(); *ptr } /// Gets a pointer to the source code location where the task containing /// this `Header` was spawned. /// /// # Safety /// /// The provided raw pointer must point at the header of a task. #[cfg(tokio_unstable)] pub(super) unsafe fn get_spawn_location_ptr( me: NonNull<Header>, ) -> NonNull<&'static Location<'static>> { let offset = me.as_ref().vtable.spawn_location_offset; let spawned_at = me .as_ptr() .cast::<u8>() .add(offset) .cast::<&'static Location<'static>>(); NonNull::new_unchecked(spawned_at) } /// Gets the source code location where the task containing /// this `Header` was spawned /// /// # Safety /// /// The provided raw pointer must point at the header of a task. #[cfg(tokio_unstable)] pub(super) unsafe fn get_spawn_location(me: NonNull<Header>) -> &'static Location<'static> { let ptr = Header::get_spawn_location_ptr(me).as_ptr(); *ptr } /// Gets the tracing id of the task containing this `Header`. /// /// # Safety /// /// The provided raw pointer must point at the header of a task. #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) unsafe fn get_tracing_id(me: &NonNull<Header>) -> Option<&tracing::Id> { me.as_ref().tracing_id.as_ref() } } impl Trailer { fn new(hooks: TaskHarnessScheduleHooks) -> Self { Trailer { waker: UnsafeCell::new(None), owned: linked_list::Pointers::new(), hooks, } } pub(super) unsafe fn set_waker(&self, waker: Option<Waker>) { self.waker.with_mut(|ptr| { *ptr = waker; }); } pub(super) unsafe fn will_wake(&self, waker: &Waker) -> bool { self.waker .with(|ptr| (*ptr).as_ref().unwrap().will_wake(waker)) } pub(super) fn wake_join(&self) { self.waker.with(|ptr| match unsafe { &*ptr } { Some(waker) => waker.wake_by_ref(), None => panic!("waker missing"), }); } } #[test] #[cfg(not(loom))] fn header_lte_cache_line() { assert!(std::mem::size_of::<Header>() <= 8 * std::mem::size_of::<*const ()>()); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/error.rs
tokio/src/runtime/task/error.rs
use std::any::Any; use std::fmt; use std::io; use super::Id; use crate::util::SyncWrapper; cfg_rt! { /// Task failed to execute to completion. pub struct JoinError { repr: Repr, id: Id, } } enum Repr { Cancelled, Panic(SyncWrapper<Box<dyn Any + Send + 'static>>), } impl JoinError { pub(crate) fn cancelled(id: Id) -> JoinError { JoinError { repr: Repr::Cancelled, id, } } pub(crate) fn panic(id: Id, err: Box<dyn Any + Send + 'static>) -> JoinError { JoinError { repr: Repr::Panic(SyncWrapper::new(err)), id, } } /// Returns true if the error was caused by the task being cancelled. /// /// See [the module level docs] for more information on cancellation. /// /// [the module level docs]: crate::task#cancellation pub fn is_cancelled(&self) -> bool { matches!(&self.repr, Repr::Cancelled) } /// Returns true if the error was caused by the task panicking. /// /// # Examples /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use std::panic; /// /// #[tokio::main] /// async fn main() { /// let err = tokio::spawn(async { /// panic!("boom"); /// }).await.unwrap_err(); /// /// assert!(err.is_panic()); /// } /// # } /// ``` pub fn is_panic(&self) -> bool { matches!(&self.repr, Repr::Panic(_)) } /// Consumes the join error, returning the object with which the task panicked. /// /// # Panics /// /// `into_panic()` panics if the `Error` does not represent the underlying /// task terminating with a panic. Use `is_panic` to check the error reason /// or `try_into_panic` for a variant that does not panic. /// /// # Examples /// /// ```should_panic,ignore-wasm /// use std::panic; /// /// #[tokio::main] /// async fn main() { /// let err = tokio::spawn(async { /// panic!("boom"); /// }).await.unwrap_err(); /// /// if err.is_panic() { /// // Resume the panic on the main task /// panic::resume_unwind(err.into_panic()); /// } /// } /// ``` #[track_caller] pub fn into_panic(self) -> Box<dyn Any + Send + 'static> { self.try_into_panic() .expect("`JoinError` reason is not a panic.") } /// Consumes the join error, returning the object with which the task /// panicked if the task terminated due to a panic. Otherwise, `self` is /// returned. /// /// # Examples /// /// ```should_panic,ignore-wasm /// use std::panic; /// /// #[tokio::main] /// async fn main() { /// let err = tokio::spawn(async { /// panic!("boom"); /// }).await.unwrap_err(); /// /// if let Ok(reason) = err.try_into_panic() { /// // Resume the panic on the main task /// panic::resume_unwind(reason); /// } /// } /// ``` pub fn try_into_panic(self) -> Result<Box<dyn Any + Send + 'static>, JoinError> { match self.repr { Repr::Panic(p) => Ok(p.into_inner()), _ => Err(self), } } /// Returns a [task ID] that identifies the task which errored relative to /// other currently spawned tasks. /// /// [task ID]: crate::task::Id pub fn id(&self) -> Id { self.id } } impl fmt::Display for JoinError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.repr { Repr::Cancelled => write!(fmt, "task {} was cancelled", self.id), Repr::Panic(p) => match panic_payload_as_str(p) { Some(panic_str) => { write!( fmt, "task {} panicked with message {:?}", self.id, panic_str ) } None => { write!(fmt, "task {} panicked", self.id) } }, } } } impl fmt::Debug for JoinError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.repr { Repr::Cancelled => write!(fmt, "JoinError::Cancelled({:?})", self.id), Repr::Panic(p) => match panic_payload_as_str(p) { Some(panic_str) => { write!(fmt, "JoinError::Panic({:?}, {:?}, ...)", self.id, panic_str) } None => write!(fmt, "JoinError::Panic({:?}, ...)", self.id), }, } } } impl std::error::Error for JoinError {} impl From<JoinError> for io::Error { fn from(src: JoinError) -> io::Error { io::Error::new( io::ErrorKind::Other, match src.repr { Repr::Cancelled => "task was cancelled", Repr::Panic(_) => "task panicked", }, ) } } fn panic_payload_as_str(payload: &SyncWrapper<Box<dyn Any + Send>>) -> Option<&str> { // Panic payloads are almost always `String` (if invoked with formatting arguments) // or `&'static str` (if invoked with a string literal). // // Non-string panic payloads have niche use-cases, // so we don't really need to worry about those. if let Some(s) = payload.downcast_ref_sync::<String>() { return Some(s); } if let Some(s) = payload.downcast_ref_sync::<&'static str>() { return Some(s); } None }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/raw.rs
tokio/src/runtime/task/raw.rs
// It doesn't make sense to enforce `unsafe_op_in_unsafe_fn` for this module because // // * This module is doing the low-level task management that requires tons of unsafe // operations. // * Excessive `unsafe {}` blocks hurt readability significantly. // TODO: replace with `#[expect(unsafe_op_in_unsafe_fn)]` after bumpping // the MSRV to 1.81.0. #![allow(unsafe_op_in_unsafe_fn)] use crate::future::Future; use crate::runtime::task::core::{Core, Trailer}; use crate::runtime::task::{Cell, Harness, Header, Id, Schedule, State}; #[cfg(tokio_unstable)] use std::panic::Location; use std::ptr::NonNull; use std::task::{Poll, Waker}; /// Raw task handle #[derive(Clone)] pub(crate) struct RawTask { ptr: NonNull<Header>, } pub(super) struct Vtable { /// Polls the future. pub(super) poll: unsafe fn(NonNull<Header>), /// Schedules the task for execution on the runtime. pub(super) schedule: unsafe fn(NonNull<Header>), /// Deallocates the memory. pub(super) dealloc: unsafe fn(NonNull<Header>), /// Reads the task output, if complete. pub(super) try_read_output: unsafe fn(NonNull<Header>, *mut (), &Waker), /// The join handle has been dropped. pub(super) drop_join_handle_slow: unsafe fn(NonNull<Header>), /// An abort handle has been dropped. pub(super) drop_abort_handle: unsafe fn(NonNull<Header>), /// Scheduler is being shutdown. pub(super) shutdown: unsafe fn(NonNull<Header>), /// The number of bytes that the `trailer` field is offset from the header. pub(super) trailer_offset: usize, /// The number of bytes that the `scheduler` field is offset from the header. pub(super) scheduler_offset: usize, /// The number of bytes that the `id` field is offset from the header. pub(super) id_offset: usize, /// The number of bytes that the `spawned_at` field is offset from the header. #[cfg(tokio_unstable)] pub(super) spawn_location_offset: usize, } /// Get the vtable for the requested `T` and `S` generics. pub(super) fn vtable<T: Future, S: Schedule>() -> &'static Vtable { &Vtable { poll: poll::<T, S>, schedule: schedule::<S>, dealloc: dealloc::<T, S>, try_read_output: try_read_output::<T, S>, drop_join_handle_slow: drop_join_handle_slow::<T, S>, drop_abort_handle: drop_abort_handle::<T, S>, shutdown: shutdown::<T, S>, trailer_offset: OffsetHelper::<T, S>::TRAILER_OFFSET, scheduler_offset: OffsetHelper::<T, S>::SCHEDULER_OFFSET, id_offset: OffsetHelper::<T, S>::ID_OFFSET, #[cfg(tokio_unstable)] spawn_location_offset: OffsetHelper::<T, S>::SPAWN_LOCATION_OFFSET, } } /// Calling `get_trailer_offset` directly in vtable doesn't work because it /// prevents the vtable from being promoted to a static reference. /// /// See this thread for more info: /// <https://users.rust-lang.org/t/custom-vtables-with-integers/78508> struct OffsetHelper<T, S>(T, S); impl<T: Future, S: Schedule> OffsetHelper<T, S> { // Pass `size_of`/`align_of` as arguments rather than calling them directly // inside `get_trailer_offset` because trait bounds on generic parameters // of const fn are unstable on our MSRV. const TRAILER_OFFSET: usize = get_trailer_offset( std::mem::size_of::<Header>(), std::mem::size_of::<Core<T, S>>(), std::mem::align_of::<Core<T, S>>(), std::mem::align_of::<Trailer>(), ); // The `scheduler` is the first field of `Core`, so it has the same // offset as `Core`. const SCHEDULER_OFFSET: usize = get_core_offset( std::mem::size_of::<Header>(), std::mem::align_of::<Core<T, S>>(), ); const ID_OFFSET: usize = get_id_offset( std::mem::size_of::<Header>(), std::mem::align_of::<Core<T, S>>(), std::mem::size_of::<S>(), std::mem::align_of::<Id>(), ); #[cfg(tokio_unstable)] const SPAWN_LOCATION_OFFSET: usize = get_spawn_location_offset( std::mem::size_of::<Header>(), std::mem::align_of::<Core<T, S>>(), std::mem::size_of::<S>(), std::mem::align_of::<Id>(), std::mem::size_of::<Id>(), std::mem::align_of::<&'static Location<'static>>(), ); } /// Compute the offset of the `Trailer` field in `Cell<T, S>` using the /// `#[repr(C)]` algorithm. /// /// Pseudo-code for the `#[repr(C)]` algorithm can be found here: /// <https://doc.rust-lang.org/reference/type-layout.html#reprc-structs> const fn get_trailer_offset( header_size: usize, core_size: usize, core_align: usize, trailer_align: usize, ) -> usize { let mut offset = header_size; let core_misalign = offset % core_align; if core_misalign > 0 { offset += core_align - core_misalign; } offset += core_size; let trailer_misalign = offset % trailer_align; if trailer_misalign > 0 { offset += trailer_align - trailer_misalign; } offset } /// Compute the offset of the `Core<T, S>` field in `Cell<T, S>` using the /// `#[repr(C)]` algorithm. /// /// Pseudo-code for the `#[repr(C)]` algorithm can be found here: /// <https://doc.rust-lang.org/reference/type-layout.html#reprc-structs> const fn get_core_offset(header_size: usize, core_align: usize) -> usize { let mut offset = header_size; let core_misalign = offset % core_align; if core_misalign > 0 { offset += core_align - core_misalign; } offset } /// Compute the offset of the `Id` field in `Cell<T, S>` using the /// `#[repr(C)]` algorithm. /// /// Pseudo-code for the `#[repr(C)]` algorithm can be found here: /// <https://doc.rust-lang.org/reference/type-layout.html#reprc-structs> const fn get_id_offset( header_size: usize, core_align: usize, scheduler_size: usize, id_align: usize, ) -> usize { let mut offset = get_core_offset(header_size, core_align); offset += scheduler_size; let id_misalign = offset % id_align; if id_misalign > 0 { offset += id_align - id_misalign; } offset } /// Compute the offset of the `&'static Location<'static>` field in `Cell<T, S>` /// using the `#[repr(C)]` algorithm. /// /// Pseudo-code for the `#[repr(C)]` algorithm can be found here: /// <https://doc.rust-lang.org/reference/type-layout.html#reprc-structs> #[cfg(tokio_unstable)] const fn get_spawn_location_offset( header_size: usize, core_align: usize, scheduler_size: usize, id_align: usize, id_size: usize, spawn_location_align: usize, ) -> usize { let mut offset = get_id_offset(header_size, core_align, scheduler_size, id_align); offset += id_size; let spawn_location_misalign = offset % spawn_location_align; if spawn_location_misalign > 0 { offset += spawn_location_align - spawn_location_misalign; } offset } impl RawTask { pub(super) fn new<T, S>( task: T, scheduler: S, id: Id, _spawned_at: super::SpawnLocation, ) -> RawTask where T: Future, S: Schedule, { let ptr = Box::into_raw(Cell::<_, S>::new( task, scheduler, State::new(), id, #[cfg(tokio_unstable)] _spawned_at.0, )); let ptr = unsafe { NonNull::new_unchecked(ptr.cast()) }; RawTask { ptr } } /// # Safety /// /// `ptr` must be a valid pointer to a [`Header`]. pub(super) unsafe fn from_raw(ptr: NonNull<Header>) -> RawTask { RawTask { ptr } } pub(super) fn header_ptr(&self) -> NonNull<Header> { self.ptr } pub(super) fn trailer_ptr(&self) -> NonNull<Trailer> { unsafe { Header::get_trailer(self.ptr) } } /// Returns a reference to the task's header. pub(super) fn header(&self) -> &Header { unsafe { self.ptr.as_ref() } } /// Returns a reference to the task's trailer. pub(super) fn trailer(&self) -> &Trailer { unsafe { &*self.trailer_ptr().as_ptr() } } /// Returns a reference to the task's state. pub(super) fn state(&self) -> &State { &self.header().state } /// Safety: mutual exclusion is required to call this function. pub(crate) fn poll(self) { let vtable = self.header().vtable; unsafe { (vtable.poll)(self.ptr) } } pub(super) fn schedule(self) { let vtable = self.header().vtable; unsafe { (vtable.schedule)(self.ptr) } } pub(super) fn dealloc(self) { let vtable = self.header().vtable; unsafe { (vtable.dealloc)(self.ptr); } } /// Safety: `dst` must be a `*mut Poll<super::Result<T::Output>>` where `T` /// is the future stored by the task. pub(super) unsafe fn try_read_output<O>(self, dst: *mut Poll<super::Result<O>>, waker: &Waker) { let vtable = self.header().vtable; (vtable.try_read_output)(self.ptr, dst as *mut _, waker); } pub(super) fn drop_join_handle_slow(self) { let vtable = self.header().vtable; unsafe { (vtable.drop_join_handle_slow)(self.ptr) } } pub(super) fn drop_abort_handle(self) { let vtable = self.header().vtable; unsafe { (vtable.drop_abort_handle)(self.ptr) } } pub(super) fn shutdown(self) { let vtable = self.header().vtable; unsafe { (vtable.shutdown)(self.ptr) } } /// Increment the task's reference count. /// /// Currently, this is used only when creating an `AbortHandle`. pub(super) fn ref_inc(self) { self.header().state.ref_inc(); } /// Get the queue-next pointer /// /// This is for usage by the injection queue /// /// Safety: make sure only one queue uses this and access is synchronized. pub(crate) unsafe fn get_queue_next(self) -> Option<RawTask> { self.header() .queue_next .with(|ptr| *ptr) .map(|p| RawTask::from_raw(p)) } /// Sets the queue-next pointer /// /// This is for usage by the injection queue /// /// Safety: make sure only one queue uses this and access is synchronized. pub(crate) unsafe fn set_queue_next(self, val: Option<RawTask>) { self.header().set_next(val.map(|task| task.ptr)); } } impl Copy for RawTask {} unsafe fn poll<T: Future, S: Schedule>(ptr: NonNull<Header>) { let harness = Harness::<T, S>::from_raw(ptr); harness.poll(); } unsafe fn schedule<S: Schedule>(ptr: NonNull<Header>) { use crate::runtime::task::{Notified, Task}; let scheduler = Header::get_scheduler::<S>(ptr); scheduler .as_ref() .schedule(Notified(Task::from_raw(ptr.cast()))); } unsafe fn dealloc<T: Future, S: Schedule>(ptr: NonNull<Header>) { let harness = Harness::<T, S>::from_raw(ptr); harness.dealloc(); } unsafe fn try_read_output<T: Future, S: Schedule>( ptr: NonNull<Header>, dst: *mut (), waker: &Waker, ) { let out = &mut *(dst as *mut Poll<super::Result<T::Output>>); let harness = Harness::<T, S>::from_raw(ptr); harness.try_read_output(out, waker); } unsafe fn drop_join_handle_slow<T: Future, S: Schedule>(ptr: NonNull<Header>) { let harness = Harness::<T, S>::from_raw(ptr); harness.drop_join_handle_slow(); } unsafe fn drop_abort_handle<T: Future, S: Schedule>(ptr: NonNull<Header>) { let harness = Harness::<T, S>::from_raw(ptr); harness.drop_reference(); } unsafe fn shutdown<T: Future, S: Schedule>(ptr: NonNull<Header>) { let harness = Harness::<T, S>::from_raw(ptr); harness.shutdown(); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/mod.rs
tokio/src/runtime/task/mod.rs
//! The task module. //! //! The task module contains the code that manages spawned tasks and provides a //! safe API for the rest of the runtime to use. Each task in a runtime is //! stored in an `OwnedTasks` or `LocalOwnedTasks` object. //! //! # Task reference types //! //! A task is usually referenced by multiple handles, and there are several //! types of handles. //! //! * `OwnedTask` - tasks stored in an `OwnedTasks` or `LocalOwnedTasks` are of this //! reference type. //! //! * `JoinHandle` - each task has a `JoinHandle` that allows access to the output //! of the task. //! //! * `Waker` - every waker for a task has this reference type. There can be any //! number of waker references. //! //! * `Notified` - tracks whether the task is notified. //! //! * `Unowned` - this task reference type is used for tasks not stored in any //! runtime. Mainly used for blocking tasks, but also in tests. //! //! The task uses a reference count to keep track of how many active references //! exist. The `Unowned` reference type takes up two ref-counts. All other //! reference types take up a single ref-count. //! //! Besides the waker type, each task has at most one of each reference type. //! //! # State //! //! The task stores its state in an atomic `usize` with various bitfields for the //! necessary information. The state has the following bitfields: //! //! * `RUNNING` - Tracks whether the task is currently being polled or cancelled. //! This bit functions as a lock around the task. //! //! * `COMPLETE` - Is one once the future has fully completed and has been //! dropped. Never unset once set. Never set together with RUNNING. //! //! * `NOTIFIED` - Tracks whether a Notified object currently exists. //! //! * `CANCELLED` - Is set to one for tasks that should be cancelled as soon as //! possible. May take any value for completed tasks. //! //! * `JOIN_INTEREST` - Is set to one if there exists a `JoinHandle`. //! //! * `JOIN_WAKER` - Acts as an access control bit for the join handle waker. The //! protocol for its usage is described below. //! //! The rest of the bits are used for the ref-count. //! //! # Fields in the task //! //! The task has various fields. This section describes how and when it is safe //! to access a field. //! //! * The state field is accessed with atomic instructions. //! //! * The `OwnedTask` reference has exclusive access to the `owned` field. //! //! * The Notified reference has exclusive access to the `queue_next` field. //! //! * The `owner_id` field can be set as part of construction of the task, but //! is otherwise immutable and anyone can access the field immutably without //! synchronization. //! //! * If COMPLETE is one, then the `JoinHandle` has exclusive access to the //! stage field. If COMPLETE is zero, then the RUNNING bitfield functions as //! a lock for the stage field, and it can be accessed only by the thread //! that set RUNNING to one. //! //! * The waker field may be concurrently accessed by different threads: in one //! thread the runtime may complete a task and *read* the waker field to //! invoke the waker, and in another thread the task's `JoinHandle` may be //! polled, and if the task hasn't yet completed, the `JoinHandle` may *write* //! a waker to the waker field. The `JOIN_WAKER` bit ensures safe access by //! multiple threads to the waker field using the following rules: //! //! 1. `JOIN_WAKER` is initialized to zero. //! //! 2. If `JOIN_WAKER` is zero, then the `JoinHandle` has exclusive (mutable) //! access to the waker field. //! //! 3. If `JOIN_WAKER` is one, then the `JoinHandle` has shared (read-only) //! access to the waker field. //! //! 4. If `JOIN_WAKER` is one and COMPLETE is one, then the runtime has shared //! (read-only) access to the waker field. //! //! 5. If the `JoinHandle` needs to write to the waker field, then the //! `JoinHandle` needs to (i) successfully set `JOIN_WAKER` to zero if it is //! not already zero to gain exclusive access to the waker field per rule //! 2, (ii) write a waker, and (iii) successfully set `JOIN_WAKER` to one. //! If the `JoinHandle` unsets `JOIN_WAKER` in the process of being dropped //! to clear the waker field, only steps (i) and (ii) are relevant. //! //! 6. The `JoinHandle` can change `JOIN_WAKER` only if COMPLETE is zero (i.e. //! the task hasn't yet completed). The runtime can change `JOIN_WAKER` only //! if COMPLETE is one. //! //! 7. If `JOIN_INTEREST` is zero and COMPLETE is one, then the runtime has //! exclusive (mutable) access to the waker field. This might happen if the //! `JoinHandle` gets dropped right after the task completes and the runtime //! sets the `COMPLETE` bit. In this case the runtime needs the mutable access //! to the waker field to drop it. //! //! Rule 6 implies that the steps (i) or (iii) of rule 5 may fail due to a //! race. If step (i) fails, then the attempt to write a waker is aborted. If //! step (iii) fails because COMPLETE is set to one by another thread after //! step (i), then the waker field is cleared. Once COMPLETE is one (i.e. //! task has completed), the `JoinHandle` will not modify `JOIN_WAKER`. After the //! runtime sets COMPLETE to one, it invokes the waker if there is one so in this //! case when a task completes the `JOIN_WAKER` bit implicates to the runtime //! whether it should invoke the waker or not. After the runtime is done with //! using the waker during task completion, it unsets the `JOIN_WAKER` bit to give //! the `JoinHandle` exclusive access again so that it is able to drop the waker //! at a later point. //! //! All other fields are immutable and can be accessed immutably without //! synchronization by anyone. //! //! # Safety //! //! This section goes through various situations and explains why the API is //! safe in that situation. //! //! ## Polling or dropping the future //! //! Any mutable access to the future happens after obtaining a lock by modifying //! the RUNNING field, so exclusive access is ensured. //! //! When the task completes, exclusive access to the output is transferred to //! the `JoinHandle`. If the `JoinHandle` is already dropped when the transition to //! complete happens, the thread performing that transition retains exclusive //! access to the output and should immediately drop it. //! //! ## Non-Send futures //! //! If a future is not Send, then it is bound to a `LocalOwnedTasks`. The future //! will only ever be polled or dropped given a `LocalNotified` or inside a call //! to `LocalOwnedTasks::shutdown_all`. In either case, it is guaranteed that the //! future is on the right thread. //! //! If the task is never removed from the `LocalOwnedTasks`, then it is leaked, so //! there is no risk that the task is dropped on some other thread when the last //! ref-count drops. //! //! ## Non-Send output //! //! When a task completes, the output is placed in the stage of the task. Then, //! a transition that sets COMPLETE to true is performed, and the value of //! `JOIN_INTEREST` when this transition happens is read. //! //! If `JOIN_INTEREST` is zero when the transition to COMPLETE happens, then the //! output is immediately dropped. //! //! If `JOIN_INTEREST` is one when the transition to COMPLETE happens, then the //! `JoinHandle` is responsible for cleaning up the output. If the output is not //! Send, then this happens: //! //! 1. The output is created on the thread that the future was polled on. Since //! only non-Send futures can have non-Send output, the future was polled on //! the thread that the future was spawned from. //! 2. Since `JoinHandle<Output>` is not Send if Output is not Send, the //! `JoinHandle` is also on the thread that the future was spawned from. //! 3. Thus, the `JoinHandle` will not move the output across threads when it //! takes or drops the output. //! //! ## Recursive poll/shutdown //! //! Calling poll from inside a shutdown call or vice-versa is not prevented by //! the API exposed by the task module, so this has to be safe. In either case, //! the lock in the RUNNING bitfield makes the inner call return immediately. If //! the inner call is a `shutdown` call, then the CANCELLED bit is set, and the //! poll call will notice it when the poll finishes, and the task is cancelled //! at that point. mod core; use self::core::Cell; use self::core::Header; mod error; pub use self::error::JoinError; mod harness; use self::harness::Harness; mod id; #[cfg_attr(not(tokio_unstable), allow(unreachable_pub, unused_imports))] pub use id::{id, try_id, Id}; #[cfg(feature = "rt")] mod abort; mod join; #[cfg(feature = "rt")] pub use self::abort::AbortHandle; pub use self::join::JoinHandle; mod list; pub(crate) use self::list::{LocalOwnedTasks, OwnedTasks}; mod raw; pub(crate) use self::raw::RawTask; mod state; use self::state::State; mod waker; pub(crate) use self::spawn_location::SpawnLocation; cfg_taskdump! { pub(crate) mod trace; } use crate::future::Future; use crate::util::linked_list; use crate::util::sharded_list; use crate::runtime::TaskCallback; use std::marker::PhantomData; use std::panic::Location; use std::ptr::NonNull; use std::{fmt, mem}; /// An owned handle to the task, tracked by ref count. #[repr(transparent)] pub(crate) struct Task<S: 'static> { raw: RawTask, _p: PhantomData<S>, } unsafe impl<S> Send for Task<S> {} unsafe impl<S> Sync for Task<S> {} /// A task was notified. #[repr(transparent)] pub(crate) struct Notified<S: 'static>(Task<S>); impl<S> Notified<S> { #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] #[inline] pub(crate) fn task_meta<'meta>(&self) -> crate::runtime::TaskMeta<'meta> { self.0.task_meta() } } // safety: This type cannot be used to touch the task without first verifying // that the value is on a thread where it is safe to poll the task. unsafe impl<S: Schedule> Send for Notified<S> {} unsafe impl<S: Schedule> Sync for Notified<S> {} /// A non-Send variant of Notified with the invariant that it is on a thread /// where it is safe to poll it. #[repr(transparent)] pub(crate) struct LocalNotified<S: 'static> { task: Task<S>, _not_send: PhantomData<*const ()>, } impl<S> LocalNotified<S> { #[cfg(tokio_unstable)] #[inline] pub(crate) fn task_meta<'meta>(&self) -> crate::runtime::TaskMeta<'meta> { self.task.task_meta() } } /// A task that is not owned by any `OwnedTasks`. Used for blocking tasks. /// This type holds two ref-counts. pub(crate) struct UnownedTask<S: 'static> { raw: RawTask, _p: PhantomData<S>, } // safety: This type can only be created given a Send task. unsafe impl<S> Send for UnownedTask<S> {} unsafe impl<S> Sync for UnownedTask<S> {} /// Task result sent back. pub(crate) type Result<T> = std::result::Result<T, JoinError>; /// Hooks for scheduling tasks which are needed in the task harness. #[derive(Clone)] pub(crate) struct TaskHarnessScheduleHooks { pub(crate) task_terminate_callback: Option<TaskCallback>, } pub(crate) trait Schedule: Sync + Sized + 'static { /// The task has completed work and is ready to be released. The scheduler /// should release it immediately and return it. The task module will batch /// the ref-dec with setting other options. /// /// If the scheduler has already released the task, then None is returned. fn release(&self, task: &Task<Self>) -> Option<Task<Self>>; /// Schedule the task fn schedule(&self, task: Notified<Self>); fn hooks(&self) -> TaskHarnessScheduleHooks; /// Schedule the task to run in the near future, yielding the thread to /// other tasks. fn yield_now(&self, task: Notified<Self>) { self.schedule(task); } /// Polling the task resulted in a panic. Should the runtime shutdown? fn unhandled_panic(&self) { // By default, do nothing. This maintains the 1.0 behavior. } } cfg_rt! { /// This is the constructor for a new task. Three references to the task are /// created. The first task reference is usually put into an `OwnedTasks` /// immediately. The Notified is sent to the scheduler as an ordinary /// notification. fn new_task<T, S>( task: T, scheduler: S, id: Id, spawned_at: SpawnLocation, ) -> (Task<S>, Notified<S>, JoinHandle<T::Output>) where S: Schedule, T: Future + 'static, T::Output: 'static, { let raw = RawTask::new::<T, S>( task, scheduler, id, spawned_at, ); let task = Task { raw, _p: PhantomData, }; let notified = Notified(Task { raw, _p: PhantomData, }); let join = JoinHandle::new(raw); (task, notified, join) } /// Creates a new task with an associated join handle. This method is used /// only when the task is not going to be stored in an `OwnedTasks` list. /// /// Currently only blocking tasks use this method. pub(crate) fn unowned<T, S>( task: T, scheduler: S, id: Id, spawned_at: SpawnLocation, ) -> (UnownedTask<S>, JoinHandle<T::Output>) where S: Schedule, T: Send + Future + 'static, T::Output: Send + 'static, { let (task, notified, join) = new_task( task, scheduler, id, spawned_at, ); // This transfers the ref-count of task and notified into an UnownedTask. // This is valid because an UnownedTask holds two ref-counts. let unowned = UnownedTask { raw: task.raw, _p: PhantomData, }; std::mem::forget(task); std::mem::forget(notified); (unowned, join) } } impl<S: 'static> Task<S> { unsafe fn new(raw: RawTask) -> Task<S> { Task { raw, _p: PhantomData, } } /// # Safety /// /// `ptr` must be a valid pointer to a [`Header`]. unsafe fn from_raw(ptr: NonNull<Header>) -> Task<S> { unsafe { Task::new(RawTask::from_raw(ptr)) } } #[cfg(all( tokio_unstable, feature = "taskdump", feature = "rt", target_os = "linux", any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") ))] pub(super) fn as_raw(&self) -> RawTask { self.raw } fn header(&self) -> &Header { self.raw.header() } fn header_ptr(&self) -> NonNull<Header> { self.raw.header_ptr() } /// Returns a [task ID] that uniquely identifies this task relative to other /// currently spawned tasks. /// /// [task ID]: crate::task::Id #[cfg(tokio_unstable)] pub(crate) fn id(&self) -> crate::task::Id { // Safety: The header pointer is valid. unsafe { Header::get_id(self.raw.header_ptr()) } } #[cfg(tokio_unstable)] pub(crate) fn spawned_at(&self) -> &'static Location<'static> { // Safety: The header pointer is valid. unsafe { Header::get_spawn_location(self.raw.header_ptr()) } } // Explicit `'task` and `'meta` lifetimes are necessary here, as otherwise, // the compiler infers the lifetimes to be the same, and considers the task // to be borrowed for the lifetime of the returned `TaskMeta`. #[cfg(tokio_unstable)] pub(crate) fn task_meta<'meta>(&self) -> crate::runtime::TaskMeta<'meta> { crate::runtime::TaskMeta { id: self.id(), spawned_at: self.spawned_at().into(), _phantom: PhantomData, } } cfg_taskdump! { /// Notify the task for task dumping. /// /// Returns `None` if the task has already been notified. pub(super) fn notify_for_tracing(&self) -> Option<Notified<S>> { if self.as_raw().state().transition_to_notified_for_tracing() { // SAFETY: `transition_to_notified_for_tracing` increments the // refcount. Some(unsafe { Notified(Task::new(self.raw)) }) } else { None } } } } impl<S: 'static> Notified<S> { fn header(&self) -> &Header { self.0.header() } #[cfg(tokio_unstable)] #[allow(dead_code)] pub(crate) fn task_id(&self) -> crate::task::Id { self.0.id() } } impl<S: 'static> Notified<S> { /// # Safety /// /// [`RawTask::ptr`] must be a valid pointer to a [`Header`]. pub(crate) unsafe fn from_raw(ptr: RawTask) -> Notified<S> { Notified(unsafe { Task::new(ptr) }) } } impl<S: 'static> Notified<S> { pub(crate) fn into_raw(self) -> RawTask { let raw = self.0.raw; mem::forget(self); raw } } impl<S: Schedule> Task<S> { /// Preemptively cancels the task as part of the shutdown process. pub(crate) fn shutdown(self) { let raw = self.raw; mem::forget(self); raw.shutdown(); } } impl<S: Schedule> LocalNotified<S> { /// Runs the task. pub(crate) fn run(self) { let raw = self.task.raw; mem::forget(self); raw.poll(); } } impl<S: Schedule> UnownedTask<S> { // Used in test of the inject queue. #[cfg(test)] #[cfg_attr(target_family = "wasm", allow(dead_code))] pub(super) fn into_notified(self) -> Notified<S> { Notified(self.into_task()) } fn into_task(self) -> Task<S> { // Convert into a task. let task = Task { raw: self.raw, _p: PhantomData, }; mem::forget(self); // Drop a ref-count since an UnownedTask holds two. task.header().state.ref_dec(); task } pub(crate) fn run(self) { let raw = self.raw; mem::forget(self); // Transfer one ref-count to a Task object. let task = Task::<S> { raw, _p: PhantomData, }; // Use the other ref-count to poll the task. raw.poll(); // Decrement our extra ref-count drop(task); } pub(crate) fn shutdown(self) { self.into_task().shutdown(); } } impl<S: 'static> Drop for Task<S> { fn drop(&mut self) { // Decrement the ref count if self.header().state.ref_dec() { // Deallocate if this is the final ref count self.raw.dealloc(); } } } impl<S: 'static> Drop for UnownedTask<S> { fn drop(&mut self) { // Decrement the ref count if self.raw.header().state.ref_dec_twice() { // Deallocate if this is the final ref count self.raw.dealloc(); } } } impl<S> fmt::Debug for Task<S> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "Task({:p})", self.header()) } } impl<S> fmt::Debug for Notified<S> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "task::Notified({:p})", self.0.header()) } } /// # Safety /// /// Tasks are pinned. unsafe impl<S> linked_list::Link for Task<S> { type Handle = Task<S>; type Target = Header; fn as_raw(handle: &Task<S>) -> NonNull<Header> { handle.raw.header_ptr() } unsafe fn from_raw(ptr: NonNull<Header>) -> Task<S> { unsafe { Task::from_raw(ptr) } } unsafe fn pointers(target: NonNull<Header>) -> NonNull<linked_list::Pointers<Header>> { unsafe { self::core::Trailer::addr_of_owned(Header::get_trailer(target)) } } } /// # Safety /// /// The id of a task is never changed after creation of the task, so the return value of /// `get_shard_id` will not change. (The cast may throw away the upper 32 bits of the task id, but /// the shard id still won't change from call to call.) unsafe impl<S> sharded_list::ShardedListItem for Task<S> { unsafe fn get_shard_id(target: NonNull<Self::Target>) -> usize { // SAFETY: The caller guarantees that `target` points at a valid task. let task_id = unsafe { Header::get_id(target) }; task_id.0.get() as usize } } /// Wrapper around [`std::panic::Location`] that's conditionally compiled out /// when `tokio_unstable` is not enabled. #[cfg(tokio_unstable)] mod spawn_location { use std::panic::Location; #[derive(Copy, Clone)] pub(crate) struct SpawnLocation(pub &'static Location<'static>); impl From<&'static Location<'static>> for SpawnLocation { fn from(location: &'static Location<'static>) -> Self { Self(location) } } } #[cfg(not(tokio_unstable))] mod spawn_location { use std::panic::Location; #[derive(Copy, Clone)] pub(crate) struct SpawnLocation(); impl From<&'static Location<'static>> for SpawnLocation { fn from(_: &'static Location<'static>) -> Self { Self() } } #[cfg(test)] #[test] fn spawn_location_is_zero_sized() { assert_eq!(std::mem::size_of::<SpawnLocation>(), 0); } } impl SpawnLocation { #[track_caller] #[inline] pub(crate) fn capture() -> Self { Self::from(Location::caller()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/join.rs
tokio/src/runtime/task/join.rs
use crate::runtime::task::{AbortHandle, Header, RawTask}; use std::fmt; use std::future::Future; use std::marker::PhantomData; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::pin::Pin; use std::task::{ready, Context, Poll, Waker}; cfg_rt! { /// An owned permission to join on a task (await its termination). /// /// This can be thought of as the equivalent of [`std::thread::JoinHandle`] /// for a Tokio task rather than a thread. Note that the background task /// associated with this `JoinHandle` started running immediately when you /// called spawn, even if you have not yet awaited the `JoinHandle`. /// /// A `JoinHandle` *detaches* the associated task when it is dropped, which /// means that there is no longer any handle to the task, and no way to `join` /// on it. /// /// This `struct` is created by the [`task::spawn`] and [`task::spawn_blocking`] /// functions. /// /// It is guaranteed that the destructor of the spawned task has finished /// before task completion is observed via `JoinHandle` `await`, /// [`JoinHandle::is_finished`] or [`AbortHandle::is_finished`]. /// /// # Cancel safety /// /// The `&mut JoinHandle<T>` type is cancel safe. If it is used as the event /// in a `tokio::select!` statement and some other branch completes first, /// then it is guaranteed that the output of the task is not lost. /// /// If a `JoinHandle` is dropped, then the task continues running in the /// background and its return value is lost. /// /// # Examples /// /// Creation from [`task::spawn`]: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<_> = task::spawn(async { /// // some work here /// }); /// # } /// ``` /// /// Creation from [`task::spawn_blocking`]: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<_> = task::spawn_blocking(|| { /// // some blocking work here /// }); /// # } /// ``` /// /// The generic parameter `T` in `JoinHandle<T>` is the return type of the spawned task. /// If the return value is an `i32`, the join handle has type `JoinHandle<i32>`: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<i32> = task::spawn(async { /// 5 + 3 /// }); /// # } /// /// ``` /// /// If the task does not have a return value, the join handle has type `JoinHandle<()>`: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<()> = task::spawn(async { /// println!("I return nothing."); /// }); /// # } /// ``` /// /// Note that `handle.await` doesn't give you the return type directly. It is wrapped in a /// `Result` because panics in the spawned task are caught by Tokio. The `?` operator has /// to be double chained to extract the returned value: /// /// ``` /// use tokio::task; /// use std::io; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let join_handle: task::JoinHandle<Result<i32, io::Error>> = tokio::spawn(async { /// Ok(5 + 3) /// }); /// /// let result = join_handle.await??; /// assert_eq!(result, 8); /// Ok(()) /// # } /// ``` /// /// If the task panics, the error is a [`JoinError`] that contains the panic: /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::task; /// use std::io; /// use std::panic; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let join_handle: task::JoinHandle<Result<i32, io::Error>> = tokio::spawn(async { /// panic!("boom"); /// }); /// /// let err = join_handle.await.unwrap_err(); /// assert!(err.is_panic()); /// Ok(()) /// } /// # } /// ``` /// Child being detached and outliving its parent: /// /// ```no_run /// use tokio::task; /// use tokio::time; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let original_task = task::spawn(async { /// let _detached_task = task::spawn(async { /// // Here we sleep to make sure that the first task returns before. /// time::sleep(Duration::from_millis(10)).await; /// // This will be called, even though the JoinHandle is dropped. /// println!("♫ Still alive ♫"); /// }); /// }); /// /// original_task.await.expect("The task being joined has panicked"); /// println!("Original task is joined."); /// /// // We make sure that the new task has time to run, before the main /// // task returns. /// /// time::sleep(Duration::from_millis(1000)).await; /// # } /// ``` /// /// [`task::spawn`]: crate::task::spawn() /// [`task::spawn_blocking`]: crate::task::spawn_blocking /// [`std::thread::JoinHandle`]: std::thread::JoinHandle /// [`JoinError`]: crate::task::JoinError pub struct JoinHandle<T> { raw: RawTask, _p: PhantomData<T>, } } unsafe impl<T: Send> Send for JoinHandle<T> {} unsafe impl<T: Send> Sync for JoinHandle<T> {} impl<T> UnwindSafe for JoinHandle<T> {} impl<T> RefUnwindSafe for JoinHandle<T> {} impl<T> JoinHandle<T> { pub(super) fn new(raw: RawTask) -> JoinHandle<T> { JoinHandle { raw, _p: PhantomData, } } /// Abort the task associated with the handle. /// /// Awaiting a cancelled task might complete as usual if the task was /// already completed at the time it was cancelled, but most likely it /// will fail with a [cancelled] `JoinError`. /// /// Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted /// because they are not async. If you call `abort` on a `spawn_blocking` /// task, then this *will not have any effect*, and the task will continue /// running normally. The exception is if the task has not started running /// yet; in that case, calling `abort` may prevent the task from starting. /// /// See also [the module level docs] for more information on cancellation. /// /// ```rust /// use tokio::time; /// /// # #[tokio::main(flavor = "current_thread", start_paused = true)] /// # async fn main() { /// let mut handles = Vec::new(); /// /// handles.push(tokio::spawn(async { /// time::sleep(time::Duration::from_secs(10)).await; /// true /// })); /// /// handles.push(tokio::spawn(async { /// time::sleep(time::Duration::from_secs(10)).await; /// false /// })); /// /// for handle in &handles { /// handle.abort(); /// } /// /// for handle in handles { /// assert!(handle.await.unwrap_err().is_cancelled()); /// } /// # } /// ``` /// /// [cancelled]: method@super::error::JoinError::is_cancelled /// [the module level docs]: crate::task#cancellation /// [`spawn_blocking`]: crate::task::spawn_blocking pub fn abort(&self) { self.raw.remote_abort(); } /// Checks if the task associated with this `JoinHandle` has finished. /// /// Please note that this method can return `false` even if [`abort`] has been /// called on the task. This is because the cancellation process may take /// some time, and this method does not return `true` until it has /// completed. /// /// ```rust /// use tokio::time; /// /// # #[tokio::main(flavor = "current_thread", start_paused = true)] /// # async fn main() { /// let handle1 = tokio::spawn(async { /// // do some stuff here /// }); /// let handle2 = tokio::spawn(async { /// // do some other stuff here /// time::sleep(time::Duration::from_secs(10)).await; /// }); /// // Wait for the task to finish /// handle2.abort(); /// time::sleep(time::Duration::from_secs(1)).await; /// assert!(handle1.is_finished()); /// assert!(handle2.is_finished()); /// # } /// ``` /// [`abort`]: method@JoinHandle::abort pub fn is_finished(&self) -> bool { let state = self.raw.header().state.load(); state.is_complete() } /// Set the waker that is notified when the task completes. pub(crate) fn set_join_waker(&mut self, waker: &Waker) { if self.raw.try_set_join_waker(waker) { // In this case the task has already completed. We wake the waker immediately. waker.wake_by_ref(); } } /// Returns a new `AbortHandle` that can be used to remotely abort this task. /// /// Awaiting a task cancelled by the `AbortHandle` might complete as usual if the task was /// already completed at the time it was cancelled, but most likely it /// will fail with a [cancelled] `JoinError`. /// /// ```rust /// use tokio::{time, task}; /// /// # #[tokio::main(flavor = "current_thread", start_paused = true)] /// # async fn main() { /// let mut handles = Vec::new(); /// /// handles.push(tokio::spawn(async { /// time::sleep(time::Duration::from_secs(10)).await; /// true /// })); /// /// handles.push(tokio::spawn(async { /// time::sleep(time::Duration::from_secs(10)).await; /// false /// })); /// /// let abort_handles: Vec<task::AbortHandle> = handles.iter().map(|h| h.abort_handle()).collect(); /// /// for handle in abort_handles { /// handle.abort(); /// } /// /// for handle in handles { /// assert!(handle.await.unwrap_err().is_cancelled()); /// } /// # } /// ``` /// [cancelled]: method@super::error::JoinError::is_cancelled #[must_use = "abort handles do nothing unless `.abort` is called"] pub fn abort_handle(&self) -> AbortHandle { self.raw.ref_inc(); AbortHandle::new(self.raw) } /// Returns a [task ID] that uniquely identifies this task relative to other /// currently spawned tasks. /// /// [task ID]: crate::task::Id pub fn id(&self) -> super::Id { // Safety: The header pointer is valid. unsafe { Header::get_id(self.raw.header_ptr()) } } } impl<T> Unpin for JoinHandle<T> {} impl<T> Future for JoinHandle<T> { type Output = super::Result<T>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { ready!(crate::trace::trace_leaf(cx)); let mut ret = Poll::Pending; // Keep track of task budget let coop = ready!(crate::task::coop::poll_proceed(cx)); // Try to read the task output. If the task is not yet complete, the // waker is stored and is notified once the task does complete. // // The function must go via the vtable, which requires erasing generic // types. To do this, the function "return" is placed on the stack // **before** calling the function and is passed into the function using // `*mut ()`. // // Safety: // // The type of `T` must match the task's output type. unsafe { self.raw.try_read_output(&mut ret, cx.waker()); } if ret.is_ready() { coop.made_progress(); } ret } } impl<T> Drop for JoinHandle<T> { fn drop(&mut self) { if self.raw.state().drop_join_handle_fast().is_ok() { return; } self.raw.drop_join_handle_slow(); } } impl<T> fmt::Debug for JoinHandle<T> where T: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { // Safety: The header pointer is valid. let id_ptr = unsafe { Header::get_id_ptr(self.raw.header_ptr()) }; let id = unsafe { id_ptr.as_ref() }; fmt.debug_struct("JoinHandle").field("id", id).finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/waker.rs
tokio/src/runtime/task/waker.rs
use crate::runtime::task::{Header, RawTask, Schedule}; use std::marker::PhantomData; use std::mem::ManuallyDrop; use std::ops; use std::ptr::NonNull; use std::task::{RawWaker, RawWakerVTable, Waker}; pub(super) struct WakerRef<'a, S: 'static> { waker: ManuallyDrop<Waker>, _p: PhantomData<(&'a Header, S)>, } /// Returns a `WakerRef` which avoids having to preemptively increase the /// refcount if there is no need to do so. pub(super) fn waker_ref<S>(header: &NonNull<Header>) -> WakerRef<'_, S> where S: Schedule, { // `Waker::will_wake` uses the VTABLE pointer as part of the check. This // means that `will_wake` will always return false when using the current // task's waker. (discussion at rust-lang/rust#66281). // // To fix this, we use a single vtable. Since we pass in a reference at this // point and not an *owned* waker, we must ensure that `drop` is never // called on this waker instance. This is done by wrapping it with // `ManuallyDrop` and then never calling drop. let waker = unsafe { ManuallyDrop::new(Waker::from_raw(raw_waker(*header))) }; WakerRef { waker, _p: PhantomData, } } impl<S> ops::Deref for WakerRef<'_, S> { type Target = Waker; fn deref(&self) -> &Waker { &self.waker } } cfg_trace! { /// # Safety /// /// `$header` must be a valid pointer to a [`Header`]. macro_rules! trace { ($header:expr, $op:expr) => { if let Some(id) = Header::get_tracing_id(&$header) { tracing::trace!( target: "tokio::task::waker", op = $op, task.id = id.into_u64(), ); } } } } cfg_not_trace! { macro_rules! trace { ($header:expr, $op:expr) => { // noop let _ = &$header; } } } unsafe fn clone_waker(ptr: *const ()) -> RawWaker { // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. let header = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] unsafe { trace!(header, "waker.clone"); } unsafe { header.as_ref() }.state.ref_inc(); raw_waker(header) } unsafe fn drop_waker(ptr: *const ()) { // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; // TODO; replace to #[expect(unused_unsafe)] after bumping MSRV to 1.81.0. #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] unsafe { trace!(ptr, "waker.drop"); } let raw = unsafe { RawTask::from_raw(ptr) }; raw.drop_reference(); } unsafe fn wake_by_val(ptr: *const ()) { // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; // TODO; replace to #[expect(unused_unsafe)] after bumping MSRV to 1.81.0. #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] unsafe { trace!(ptr, "waker.wake"); } let raw = unsafe { RawTask::from_raw(ptr) }; raw.wake_by_val(); } // Wake without consuming the waker unsafe fn wake_by_ref(ptr: *const ()) { // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; // TODO; replace to #[expect(unused_unsafe)] after bumping MSRV to 1.81.0. #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] unsafe { trace!(ptr, "waker.wake_by_ref"); } let raw = unsafe { RawTask::from_raw(ptr) }; raw.wake_by_ref(); } static WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(clone_waker, wake_by_val, wake_by_ref, drop_waker); fn raw_waker(header: NonNull<Header>) -> RawWaker { let ptr = header.as_ptr() as *const (); RawWaker::new(ptr, &WAKER_VTABLE) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/abort.rs
tokio/src/runtime/task/abort.rs
use crate::runtime::task::{Header, RawTask}; use std::fmt; use std::panic::{RefUnwindSafe, UnwindSafe}; /// An owned permission to abort a spawned task, without awaiting its completion. /// /// Unlike a [`JoinHandle`], an `AbortHandle` does *not* represent the /// permission to await the task's completion, only to terminate it. /// /// The task may be aborted by calling the [`AbortHandle::abort`] method. /// Dropping an `AbortHandle` releases the permission to terminate the task /// --- it does *not* abort the task. /// /// Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted /// because they are not async. If you call `abort` on a `spawn_blocking` task, /// then this *will not have any effect*, and the task will continue running /// normally. The exception is if the task has not started running yet; in that /// case, calling `abort` may prevent the task from starting. /// /// [`JoinHandle`]: crate::task::JoinHandle /// [`spawn_blocking`]: crate::task::spawn_blocking #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub struct AbortHandle { raw: RawTask, } impl AbortHandle { pub(super) fn new(raw: RawTask) -> Self { Self { raw } } /// Abort the task associated with the handle. /// /// Awaiting a cancelled task might complete as usual if the task was /// already completed at the time it was cancelled, but most likely it /// will fail with a [cancelled] `JoinError`. /// /// If the task was already cancelled, such as by [`JoinHandle::abort`], /// this method will do nothing. /// /// Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted /// because they are not async. If you call `abort` on a `spawn_blocking` /// task, then this *will not have any effect*, and the task will continue /// running normally. The exception is if the task has not started running /// yet; in that case, calling `abort` may prevent the task from starting. /// /// See also [the module level docs] for more information on cancellation. /// /// [cancelled]: method@super::error::JoinError::is_cancelled /// [`JoinHandle::abort`]: method@super::JoinHandle::abort /// [the module level docs]: crate::task#cancellation /// [`spawn_blocking`]: crate::task::spawn_blocking pub fn abort(&self) { self.raw.remote_abort(); } /// Checks if the task associated with this `AbortHandle` has finished. /// /// Please note that this method can return `false` even if `abort` has been /// called on the task. This is because the cancellation process may take /// some time, and this method does not return `true` until it has /// completed. pub fn is_finished(&self) -> bool { let state = self.raw.state().load(); state.is_complete() } /// Returns a [task ID] that uniquely identifies this task relative to other /// currently spawned tasks. /// /// [task ID]: crate::task::Id pub fn id(&self) -> super::Id { // Safety: The header pointer is valid. unsafe { Header::get_id(self.raw.header_ptr()) } } } unsafe impl Send for AbortHandle {} unsafe impl Sync for AbortHandle {} impl UnwindSafe for AbortHandle {} impl RefUnwindSafe for AbortHandle {} impl fmt::Debug for AbortHandle { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { // Safety: The header pointer is valid. let id_ptr = unsafe { Header::get_id_ptr(self.raw.header_ptr()) }; let id = unsafe { id_ptr.as_ref() }; fmt.debug_struct("AbortHandle").field("id", id).finish() } } impl Drop for AbortHandle { fn drop(&mut self) { self.raw.drop_abort_handle(); } } impl Clone for AbortHandle { /// Returns a cloned `AbortHandle` that can be used to remotely abort this task. fn clone(&self) -> Self { self.raw.ref_inc(); Self::new(self.raw) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/harness.rs
tokio/src/runtime/task/harness.rs
use crate::future::Future; use crate::runtime::task::core::{Cell, Core, Header, Trailer}; use crate::runtime::task::state::{Snapshot, State}; use crate::runtime::task::waker::waker_ref; use crate::runtime::task::{Id, JoinError, Notified, RawTask, Schedule, Task}; #[cfg(tokio_unstable)] use crate::runtime::TaskMeta; use std::any::Any; use std::mem; use std::mem::ManuallyDrop; use std::panic; use std::ptr::NonNull; use std::task::{Context, Poll, Waker}; /// Typed raw task handle. pub(super) struct Harness<T: Future, S: 'static> { cell: NonNull<Cell<T, S>>, } impl<T, S> Harness<T, S> where T: Future, S: 'static, { pub(super) unsafe fn from_raw(ptr: NonNull<Header>) -> Harness<T, S> { Harness { cell: ptr.cast::<Cell<T, S>>(), } } fn header_ptr(&self) -> NonNull<Header> { self.cell.cast() } fn header(&self) -> &Header { unsafe { &*self.header_ptr().as_ptr() } } fn state(&self) -> &State { &self.header().state } fn trailer(&self) -> &Trailer { unsafe { &self.cell.as_ref().trailer } } fn core(&self) -> &Core<T, S> { unsafe { &self.cell.as_ref().core } } } /// Task operations that can be implemented without being generic over the /// scheduler or task. Only one version of these methods should exist in the /// final binary. impl RawTask { pub(super) fn drop_reference(self) { if self.state().ref_dec() { self.dealloc(); } } /// This call consumes a ref-count and notifies the task. This will create a /// new Notified and submit it if necessary. /// /// The caller does not need to hold a ref-count besides the one that was /// passed to this call. pub(super) fn wake_by_val(&self) { use super::state::TransitionToNotifiedByVal; match self.state().transition_to_notified_by_val() { TransitionToNotifiedByVal::Submit => { // The caller has given us a ref-count, and the transition has // created a new ref-count, so we now hold two. We turn the new // ref-count Notified and pass it to the call to `schedule`. // // The old ref-count is retained for now to ensure that the task // is not dropped during the call to `schedule` if the call // drops the task it was given. self.schedule(); // Now that we have completed the call to schedule, we can // release our ref-count. self.drop_reference(); } TransitionToNotifiedByVal::Dealloc => { self.dealloc(); } TransitionToNotifiedByVal::DoNothing => {} } } /// This call notifies the task. It will not consume any ref-counts, but the /// caller should hold a ref-count. This will create a new Notified and /// submit it if necessary. pub(super) fn wake_by_ref(&self) { use super::state::TransitionToNotifiedByRef; match self.state().transition_to_notified_by_ref() { TransitionToNotifiedByRef::Submit => { // The transition above incremented the ref-count for a new task // and the caller also holds a ref-count. The caller's ref-count // ensures that the task is not destroyed even if the new task // is dropped before `schedule` returns. self.schedule(); } TransitionToNotifiedByRef::DoNothing => {} } } /// Remotely aborts the task. /// /// The caller should hold a ref-count, but we do not consume it. /// /// This is similar to `shutdown` except that it asks the runtime to perform /// the shutdown. This is necessary to avoid the shutdown happening in the /// wrong thread for non-Send tasks. pub(super) fn remote_abort(&self) { if self.state().transition_to_notified_and_cancel() { // The transition has created a new ref-count, which we turn into // a Notified and pass to the task. // // Since the caller holds a ref-count, the task cannot be destroyed // before the call to `schedule` returns even if the call drops the // `Notified` internally. self.schedule(); } } /// Try to set the waker notified when the task is complete. Returns true if /// the task has already completed. If this call returns false, then the /// waker will not be notified. pub(super) fn try_set_join_waker(&self, waker: &Waker) -> bool { can_read_output(self.header(), self.trailer(), waker) } } impl<T, S> Harness<T, S> where T: Future, S: Schedule, { pub(super) fn drop_reference(self) { if self.state().ref_dec() { self.dealloc(); } } /// Polls the inner future. A ref-count is consumed. /// /// All necessary state checks and transitions are performed. /// Panics raised while polling the future are handled. pub(super) fn poll(self) { // We pass our ref-count to `poll_inner`. match self.poll_inner() { PollFuture::Notified => { // The `poll_inner` call has given us two ref-counts back. // We give one of them to a new task and call `yield_now`. self.core() .scheduler .yield_now(Notified(self.get_new_task())); // The remaining ref-count is now dropped. We kept the extra // ref-count until now to ensure that even if the `yield_now` // call drops the provided task, the task isn't deallocated // before after `yield_now` returns. self.drop_reference(); } PollFuture::Complete => { self.complete(); } PollFuture::Dealloc => { self.dealloc(); } PollFuture::Done => (), } } /// Polls the task and cancel it if necessary. This takes ownership of a /// ref-count. /// /// If the return value is Notified, the caller is given ownership of two /// ref-counts. /// /// If the return value is Complete, the caller is given ownership of a /// single ref-count, which should be passed on to `complete`. /// /// If the return value is `Dealloc`, then this call consumed the last /// ref-count and the caller should call `dealloc`. /// /// Otherwise the ref-count is consumed and the caller should not access /// `self` again. fn poll_inner(&self) -> PollFuture { use super::state::{TransitionToIdle, TransitionToRunning}; match self.state().transition_to_running() { TransitionToRunning::Success => { // Separated to reduce LLVM codegen fn transition_result_to_poll_future(result: TransitionToIdle) -> PollFuture { match result { TransitionToIdle::Ok => PollFuture::Done, TransitionToIdle::OkNotified => PollFuture::Notified, TransitionToIdle::OkDealloc => PollFuture::Dealloc, TransitionToIdle::Cancelled => PollFuture::Complete, } } let header_ptr = self.header_ptr(); let waker_ref = waker_ref::<S>(&header_ptr); let cx = Context::from_waker(&waker_ref); let res = poll_future(self.core(), cx); if res == Poll::Ready(()) { // The future completed. Move on to complete the task. return PollFuture::Complete; } let transition_res = self.state().transition_to_idle(); if let TransitionToIdle::Cancelled = transition_res { // The transition to idle failed because the task was // cancelled during the poll. cancel_task(self.core()); } transition_result_to_poll_future(transition_res) } TransitionToRunning::Cancelled => { cancel_task(self.core()); PollFuture::Complete } TransitionToRunning::Failed => PollFuture::Done, TransitionToRunning::Dealloc => PollFuture::Dealloc, } } /// Forcibly shuts down the task. /// /// Attempt to transition to `Running` in order to forcibly shutdown the /// task. If the task is currently running or in a state of completion, then /// there is nothing further to do. When the task completes running, it will /// notice the `CANCELLED` bit and finalize the task. pub(super) fn shutdown(self) { if !self.state().transition_to_shutdown() { // The task is concurrently running. No further work needed. self.drop_reference(); return; } // By transitioning the lifecycle to `Running`, we have permission to // drop the future. cancel_task(self.core()); self.complete(); } pub(super) fn dealloc(self) { // Observe that we expect to have mutable access to these objects // because we are going to drop them. This only matters when running // under loom. self.trailer().waker.with_mut(|_| ()); self.core().stage.with_mut(|_| ()); // Safety: The caller of this method just transitioned our ref-count to // zero, so it is our responsibility to release the allocation. // // We don't hold any references into the allocation at this point, but // it is possible for another thread to still hold a `&State` into the // allocation if that other thread has decremented its last ref-count, // but has not yet returned from the relevant method on `State`. // // However, the `State` type consists of just an `AtomicUsize`, and an // `AtomicUsize` wraps the entirety of its contents in an `UnsafeCell`. // As explained in the documentation for `UnsafeCell`, such references // are allowed to be dangling after their last use, even if the // reference has not yet gone out of scope. unsafe { drop(Box::from_raw(self.cell.as_ptr())); } } // ===== join handle ===== /// Read the task output into `dst`. pub(super) fn try_read_output(self, dst: &mut Poll<super::Result<T::Output>>, waker: &Waker) { if can_read_output(self.header(), self.trailer(), waker) { *dst = Poll::Ready(self.core().take_output()); } } pub(super) fn drop_join_handle_slow(self) { // Try to unset `JOIN_INTEREST` and `JOIN_WAKER`. This must be done as a first step in // case the task concurrently completed. let transition = self.state().transition_to_join_handle_dropped(); if transition.drop_output { // It is our responsibility to drop the output. This is critical as // the task output may not be `Send` and as such must remain with // the scheduler or `JoinHandle`. i.e. if the output remains in the // task structure until the task is deallocated, it may be dropped // by a Waker on any arbitrary thread. // // Panics are delivered to the user via the `JoinHandle`. Given that // they are dropping the `JoinHandle`, we assume they are not // interested in the panic and swallow it. let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { self.core().drop_future_or_output(); })); } if transition.drop_waker { // If the JOIN_WAKER flag is unset at this point, the task is either // already terminal or not complete so the `JoinHandle` is responsible // for dropping the waker. // Safety: // If the JOIN_WAKER bit is not set the join handle has exclusive // access to the waker as per rule 2 in task/mod.rs. // This can only be the case at this point in two scenarios: // 1. The task completed and the runtime unset `JOIN_WAKER` flag // after accessing the waker during task completion. So the // `JoinHandle` is the only one to access the join waker here. // 2. The task is not completed so the `JoinHandle` was able to unset // `JOIN_WAKER` bit itself to get mutable access to the waker. // The runtime will not access the waker when this flag is unset. unsafe { self.trailer().set_waker(None) }; } // Drop the `JoinHandle` reference, possibly deallocating the task self.drop_reference(); } // ====== internal ====== /// Completes the task. This method assumes that the state is RUNNING. fn complete(self) { // The future has completed and its output has been written to the task // stage. We transition from running to complete. let snapshot = self.state().transition_to_complete(); // We catch panics here in case dropping the future or waking the // JoinHandle panics. let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { if !snapshot.is_join_interested() { // The `JoinHandle` is not interested in the output of // this task. It is our responsibility to drop the // output. The join waker was already dropped by the // `JoinHandle` before. self.core().drop_future_or_output(); } else if snapshot.is_join_waker_set() { // Notify the waker. Reading the waker field is safe per rule 4 // in task/mod.rs, since the JOIN_WAKER bit is set and the call // to transition_to_complete() above set the COMPLETE bit. self.trailer().wake_join(); // Inform the `JoinHandle` that we are done waking the waker by // unsetting the `JOIN_WAKER` bit. If the `JoinHandle` has // already been dropped and `JOIN_INTEREST` is unset, then we must // drop the waker ourselves. if !self .state() .unset_waker_after_complete() .is_join_interested() { // SAFETY: We have COMPLETE=1 and JOIN_INTEREST=0, so // we have exclusive access to the waker. unsafe { self.trailer().set_waker(None) }; } } })); // We catch panics here in case invoking a hook panics. // // We call this in a separate block so that it runs after the task appears to have // completed and will still run if the destructor panics. #[cfg(tokio_unstable)] if let Some(f) = self.trailer().hooks.task_terminate_callback.as_ref() { let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { f(&TaskMeta { id: self.core().task_id, spawned_at: self.core().spawned_at.into(), _phantom: Default::default(), }) })); } // The task has completed execution and will no longer be scheduled. let num_release = self.release(); if self.state().transition_to_terminal(num_release) { self.dealloc(); } } /// Releases the task from the scheduler. Returns the number of ref-counts /// that should be decremented. fn release(&self) -> usize { // We don't actually increment the ref-count here, but the new task is // never destroyed, so that's ok. let me = ManuallyDrop::new(self.get_new_task()); if let Some(task) = self.core().scheduler.release(&me) { mem::forget(task); 2 } else { 1 } } /// Creates a new task that holds its own ref-count. /// /// # Safety /// /// Any use of `self` after this call must ensure that a ref-count to the /// task holds the task alive until after the use of `self`. Passing the /// returned Task to any method on `self` is unsound if dropping the Task /// could drop `self` before the call on `self` returned. fn get_new_task(&self) -> Task<S> { // safety: The header is at the beginning of the cell, so this cast is // safe. unsafe { Task::from_raw(self.cell.cast()) } } } fn can_read_output(header: &Header, trailer: &Trailer, waker: &Waker) -> bool { // Load a snapshot of the current task state let snapshot = header.state.load(); debug_assert!(snapshot.is_join_interested()); if !snapshot.is_complete() { // If the task is not complete, try storing the provided waker in the // task's waker field. let res = if snapshot.is_join_waker_set() { // If JOIN_WAKER is set, then JoinHandle has previously stored a // waker in the waker field per step (iii) of rule 5 in task/mod.rs. // Optimization: if the stored waker and the provided waker wake the // same task, then return without touching the waker field. (Reading // the waker field below is safe per rule 3 in task/mod.rs.) if unsafe { trailer.will_wake(waker) } { return false; } // Otherwise swap the stored waker with the provided waker by // following the rule 5 in task/mod.rs. header .state .unset_waker() .and_then(|snapshot| set_join_waker(header, trailer, waker.clone(), snapshot)) } else { // If JOIN_WAKER is unset, then JoinHandle has mutable access to the // waker field per rule 2 in task/mod.rs; therefore, skip step (i) // of rule 5 and try to store the provided waker in the waker field. set_join_waker(header, trailer, waker.clone(), snapshot) }; match res { Ok(_) => return false, Err(snapshot) => { assert!(snapshot.is_complete()); } } } true } fn set_join_waker( header: &Header, trailer: &Trailer, waker: Waker, snapshot: Snapshot, ) -> Result<Snapshot, Snapshot> { assert!(snapshot.is_join_interested()); assert!(!snapshot.is_join_waker_set()); // Safety: Only the `JoinHandle` may set the `waker` field. When // `JOIN_INTEREST` is **not** set, nothing else will touch the field. unsafe { trailer.set_waker(Some(waker)); } // Update the `JoinWaker` state accordingly let res = header.state.set_join_waker(); // If the state could not be updated, then clear the join waker if res.is_err() { unsafe { trailer.set_waker(None); } } res } enum PollFuture { Complete, Notified, Done, Dealloc, } /// Cancels the task and store the appropriate error in the stage field. fn cancel_task<T: Future, S: Schedule>(core: &Core<T, S>) { // Drop the future from a panic guard. let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { core.drop_future_or_output(); })); core.store_output(Err(panic_result_to_join_error(core.task_id, res))); } fn panic_result_to_join_error( task_id: Id, res: Result<(), Box<dyn Any + Send + 'static>>, ) -> JoinError { match res { Ok(()) => JoinError::cancelled(task_id), Err(panic) => JoinError::panic(task_id, panic), } } /// Polls the future. If the future completes, the output is written to the /// stage field. fn poll_future<T: Future, S: Schedule>(core: &Core<T, S>, cx: Context<'_>) -> Poll<()> { // Poll the future. let output = panic::catch_unwind(panic::AssertUnwindSafe(|| { struct Guard<'a, T: Future, S: Schedule> { core: &'a Core<T, S>, } impl<'a, T: Future, S: Schedule> Drop for Guard<'a, T, S> { fn drop(&mut self) { // If the future panics on poll, we drop it inside the panic // guard. self.core.drop_future_or_output(); } } let guard = Guard { core }; let res = guard.core.poll(cx); mem::forget(guard); res })); // Prepare output for being placed in the core stage. let output = match output { Ok(Poll::Pending) => return Poll::Pending, Ok(Poll::Ready(output)) => Ok(output), Err(panic) => Err(panic_to_error(&core.scheduler, core.task_id, panic)), }; // Catch and ignore panics if the future panics on drop. let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { core.store_output(output); })); if res.is_err() { core.scheduler.unhandled_panic(); } Poll::Ready(()) } #[cold] fn panic_to_error<S: Schedule>( scheduler: &S, task_id: Id, panic: Box<dyn Any + Send + 'static>, ) -> JoinError { scheduler.unhandled_panic(); JoinError::panic(task_id, panic) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/trace/tree.rs
tokio/src/runtime/task/trace/tree.rs
use std::collections::{hash_map::DefaultHasher, HashMap, HashSet}; use std::fmt; use std::hash::{Hash, Hasher}; use super::{Backtrace, Symbol, SymbolTrace, Trace}; /// An adjacency list representation of an execution tree. /// /// This tree provides a convenient intermediate representation for formatting /// [`Trace`] as a tree. pub(super) struct Tree { /// The roots of the trees. /// /// There should only be one root, but the code is robust to multiple roots. roots: HashSet<Symbol>, /// The adjacency list of symbols in the execution tree(s). edges: HashMap<Symbol, HashSet<Symbol>>, } impl Tree { /// Constructs a [`Tree`] from [`Trace`] pub(super) fn from_trace(trace: Trace) -> Self { let mut roots: HashSet<Symbol> = HashSet::default(); let mut edges: HashMap<Symbol, HashSet<Symbol>> = HashMap::default(); for trace in trace.backtraces { let trace = to_symboltrace(trace); if let Some(first) = trace.first() { roots.insert(first.to_owned()); } let mut trace = trace.into_iter().peekable(); while let Some(frame) = trace.next() { let subframes = edges.entry(frame).or_default(); if let Some(subframe) = trace.peek() { subframes.insert(subframe.clone()); } } } Tree { roots, edges } } /// Produces the sub-symbols of a given symbol. fn consequences(&self, frame: &Symbol) -> Option<impl ExactSizeIterator<Item = &Symbol>> { Some(self.edges.get(frame)?.iter()) } /// Format this [`Tree`] as a textual tree. fn display<W: fmt::Write>( &self, f: &mut W, root: &Symbol, is_last: bool, prefix: &str, ) -> fmt::Result { let root_fmt = format!("{root}"); let current; let next; if is_last { current = format!("{prefix}└╼\u{a0}{root_fmt}"); next = format!("{prefix}\u{a0}\u{a0}\u{a0}"); } else { current = format!("{prefix}├╼\u{a0}{root_fmt}"); next = format!("{prefix}│\u{a0}\u{a0}"); } write!(f, "{}", { let mut current = current.chars(); current.next().unwrap(); current.next().unwrap(); &current.as_str() })?; if let Some(consequences) = self.consequences(root) { let len = consequences.len(); for (i, consequence) in consequences.enumerate() { let is_last = i == len - 1; writeln!(f)?; self.display(f, consequence, is_last, &next)?; } } Ok(()) } } impl fmt::Display for Tree { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for root in &self.roots { self.display(f, root, true, " ")?; } Ok(()) } } /// Resolve a sequence of [`backtrace::BacktraceFrame`]s into a sequence of /// [`Symbol`]s. fn to_symboltrace(backtrace: Backtrace) -> SymbolTrace { // Resolve the backtrace frames to symbols. let backtrace: Backtrace = { let mut backtrace = backtrace::Backtrace::from(backtrace); backtrace.resolve(); backtrace.into() }; // Accumulate the symbols in descending order into `symboltrace`. let mut symboltrace: SymbolTrace = vec![]; let mut state = DefaultHasher::new(); for frame in backtrace.into_iter().rev() { for symbol in frame.symbols().iter().rev() { let symbol = Symbol { symbol: symbol.clone(), parent_hash: state.finish(), }; symbol.hash(&mut state); symboltrace.push(symbol); } } symboltrace }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/trace/mod.rs
tokio/src/runtime/task/trace/mod.rs
use crate::loom::sync::Arc; use crate::runtime::context; use crate::runtime::scheduler::{self, current_thread, Inject}; use crate::task::Id; use backtrace::BacktraceFrame; use std::cell::Cell; use std::collections::VecDeque; use std::ffi::c_void; use std::fmt; use std::future::Future; use std::pin::Pin; use std::ptr::{self, NonNull}; use std::task::{self, Poll}; mod symbol; mod tree; use symbol::Symbol; use tree::Tree; use super::{Notified, OwnedTasks, Schedule}; type Backtrace = Vec<BacktraceFrame>; type SymbolTrace = Vec<Symbol>; /// The ambient backtracing context. pub(crate) struct Context { /// The address of [`Trace::root`] establishes an upper unwinding bound on /// the backtraces in `Trace`. active_frame: Cell<Option<NonNull<Frame>>>, /// The place to stash backtraces. collector: Cell<Option<Trace>>, } /// A [`Frame`] in an intrusive, doubly-linked tree of [`Frame`]s. struct Frame { /// The location associated with this frame. inner_addr: *const c_void, /// The parent frame, if any. parent: Option<NonNull<Frame>>, } /// An tree execution trace. /// /// Traces are captured with [`Trace::capture`], rooted with [`Trace::root`] /// and leaved with [`trace_leaf`]. #[derive(Clone, Debug)] pub(crate) struct Trace { // The linear backtraces that comprise this trace. These linear traces can // be re-knitted into a tree. backtraces: Vec<Backtrace>, } pin_project_lite::pin_project! { #[derive(Debug, Clone)] #[must_use = "futures do nothing unless you `.await` or poll them"] /// A future wrapper that roots traces (captured with [`Trace::capture`]). pub struct Root<T> { #[pin] future: T, } } const FAIL_NO_THREAD_LOCAL: &str = "The Tokio thread-local has been destroyed \ as part of shutting down the current \ thread, so collecting a taskdump is not \ possible."; impl Context { pub(crate) const fn new() -> Self { Context { active_frame: Cell::new(None), collector: Cell::new(None), } } /// SAFETY: Callers of this function must ensure that trace frames always /// form a valid linked list. unsafe fn try_with_current<F, R>(f: F) -> Option<R> where F: FnOnce(&Self) -> R, { unsafe { crate::runtime::context::with_trace(f) } } /// SAFETY: Callers of this function must ensure that trace frames always /// form a valid linked list. unsafe fn with_current_frame<F, R>(f: F) -> R where F: FnOnce(&Cell<Option<NonNull<Frame>>>) -> R, { unsafe { Self::try_with_current(|context| f(&context.active_frame)).expect(FAIL_NO_THREAD_LOCAL) } } fn with_current_collector<F, R>(f: F) -> R where F: FnOnce(&Cell<Option<Trace>>) -> R, { // SAFETY: This call can only access the collector field, so it cannot // break the trace frame linked list. unsafe { Self::try_with_current(|context| f(&context.collector)).expect(FAIL_NO_THREAD_LOCAL) } } /// Produces `true` if the current task is being traced; otherwise false. pub(crate) fn is_tracing() -> bool { Self::with_current_collector(|maybe_collector| { let collector = maybe_collector.take(); let result = collector.is_some(); maybe_collector.set(collector); result }) } } impl Trace { /// Invokes `f`, returning both its result and the collection of backtraces /// captured at each sub-invocation of [`trace_leaf`]. #[inline(never)] pub(crate) fn capture<F, R>(f: F) -> (R, Trace) where F: FnOnce() -> R, { let collector = Trace { backtraces: vec![] }; let previous = Context::with_current_collector(|current| current.replace(Some(collector))); let result = f(); let collector = Context::with_current_collector(|current| current.replace(previous)).unwrap(); (result, collector) } /// The root of a trace. #[inline(never)] pub(crate) fn root<F>(future: F) -> Root<F> { Root { future } } pub(crate) fn backtraces(&self) -> &[Backtrace] { &self.backtraces } } /// If this is a sub-invocation of [`Trace::capture`], capture a backtrace. /// /// The captured backtrace will be returned by [`Trace::capture`]. /// /// Invoking this function does nothing when it is not a sub-invocation /// [`Trace::capture`]. // This function is marked `#[inline(never)]` to ensure that it gets a distinct `Frame` in the // backtrace, below which frames should not be included in the backtrace (since they reflect the // internal implementation details of this crate). #[inline(never)] pub(crate) fn trace_leaf(cx: &mut task::Context<'_>) -> Poll<()> { // Safety: We don't manipulate the current context's active frame. let did_trace = unsafe { Context::try_with_current(|context_cell| { if let Some(mut collector) = context_cell.collector.take() { let mut frames = vec![]; let mut above_leaf = false; if let Some(active_frame) = context_cell.active_frame.get() { let active_frame = active_frame.as_ref(); backtrace::trace(|frame| { let below_root = !ptr::eq(frame.symbol_address(), active_frame.inner_addr); // only capture frames above `Trace::leaf` and below // `Trace::root`. if above_leaf && below_root { frames.push(frame.to_owned().into()); } if ptr::eq(frame.symbol_address(), trace_leaf as *const _) { above_leaf = true; } // only continue unwinding if we're below `Trace::root` below_root }); } collector.backtraces.push(frames); context_cell.collector.set(Some(collector)); true } else { false } }) .unwrap_or(false) }; if did_trace { // Use the same logic that `yield_now` uses to send out wakeups after // the task yields. context::with_scheduler(|scheduler| { if let Some(scheduler) = scheduler { match scheduler { scheduler::Context::CurrentThread(s) => s.defer.defer(cx.waker()), #[cfg(feature = "rt-multi-thread")] scheduler::Context::MultiThread(s) => s.defer.defer(cx.waker()), } } }); Poll::Pending } else { Poll::Ready(()) } } impl fmt::Display for Trace { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Tree::from_trace(self.clone()).fmt(f) } } fn defer<F: FnOnce() -> R, R>(f: F) -> impl Drop { use std::mem::ManuallyDrop; struct Defer<F: FnOnce() -> R, R>(ManuallyDrop<F>); impl<F: FnOnce() -> R, R> Drop for Defer<F, R> { #[inline(always)] fn drop(&mut self) { unsafe { ManuallyDrop::take(&mut self.0)(); } } } Defer(ManuallyDrop::new(f)) } impl<T: Future> Future for Root<T> { type Output = T::Output; #[inline(never)] fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { // SAFETY: The context's current frame is restored to its original state // before `frame` is dropped. unsafe { let mut frame = Frame { inner_addr: Self::poll as *const c_void, parent: None, }; Context::with_current_frame(|current| { frame.parent = current.take(); current.set(Some(NonNull::from(&frame))); }); let _restore = defer(|| { Context::with_current_frame(|current| { current.set(frame.parent); }); }); let this = self.project(); this.future.poll(cx) } } } /// Trace and poll all tasks of the `current_thread` runtime. pub(in crate::runtime) fn trace_current_thread( owned: &OwnedTasks<Arc<current_thread::Handle>>, local: &mut VecDeque<Notified<Arc<current_thread::Handle>>>, injection: &Inject<Arc<current_thread::Handle>>, ) -> Vec<(Id, Trace)> { // clear the local and injection queues let mut dequeued = Vec::new(); while let Some(task) = local.pop_back() { dequeued.push(task); } while let Some(task) = injection.pop() { dequeued.push(task); } // precondition: We have drained the tasks from the injection queue. trace_owned(owned, dequeued) } cfg_rt_multi_thread! { use crate::loom::sync::Mutex; use crate::runtime::scheduler::multi_thread; use crate::runtime::scheduler::multi_thread::Synced; use crate::runtime::scheduler::inject::Shared; /// Trace and poll all tasks of the `current_thread` runtime. /// /// ## Safety /// /// Must be called with the same `synced` that `injection` was created with. pub(in crate::runtime) unsafe fn trace_multi_thread( owned: &OwnedTasks<Arc<multi_thread::Handle>>, local: &mut multi_thread::queue::Local<Arc<multi_thread::Handle>>, synced: &Mutex<Synced>, injection: &Shared<Arc<multi_thread::Handle>>, ) -> Vec<(Id, Trace)> { let mut dequeued = Vec::new(); // clear the local queue while let Some(notified) = local.pop() { dequeued.push(notified); } // clear the injection queue let mut synced = synced.lock(); // Safety: exactly the same safety requirements as `trace_multi_thread` function. while let Some(notified) = unsafe { injection.pop(&mut synced.inject) } { dequeued.push(notified); } drop(synced); // precondition: we have drained the tasks from the local and injection // queues. trace_owned(owned, dequeued) } } /// Trace the `OwnedTasks`. /// /// # Preconditions /// /// This helper presumes exclusive access to each task. The tasks must not exist /// in any other queue. fn trace_owned<S: Schedule>(owned: &OwnedTasks<S>, dequeued: Vec<Notified<S>>) -> Vec<(Id, Trace)> { let mut tasks = dequeued; // Notify and trace all un-notified tasks. The dequeued tasks are already // notified and so do not need to be re-notified. owned.for_each(|task| { // Notify the task (and thus make it poll-able) and stash it. This fails // if the task is already notified. In these cases, we skip tracing the // task. if let Some(notified) = task.notify_for_tracing() { tasks.push(notified); } // We do not poll tasks here, since we hold a lock on `owned` and the // task may complete and need to remove itself from `owned`. Polling // such a task here would result in a deadlock. }); tasks .into_iter() .map(|task| { let local_notified = owned.assert_owner(task); let id = local_notified.task.id(); let ((), trace) = Trace::capture(|| local_notified.run()); (id, trace) }) .collect() }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/task/trace/symbol.rs
tokio/src/runtime/task/trace/symbol.rs
use backtrace::BacktraceSymbol; use std::fmt; use std::hash::{Hash, Hasher}; use std::ptr; /// A symbol in a backtrace. /// /// This wrapper type serves two purposes. The first is that it provides a /// representation of a symbol that can be inserted into hashmaps and hashsets; /// the [`backtrace`] crate does not define [`Hash`], [`PartialEq`], or [`Eq`] /// on [`BacktraceSymbol`], and recommends that users define their own wrapper /// which implements these traits. /// /// Second, this wrapper includes a `parent_hash` field that uniquely /// identifies this symbol's position in its trace. Otherwise, e.g., our code /// would not be able to distinguish between recursive calls of a function at /// different depths. #[derive(Clone)] pub(super) struct Symbol { pub(super) symbol: BacktraceSymbol, pub(super) parent_hash: u64, } impl Hash for Symbol { fn hash<H>(&self, state: &mut H) where H: Hasher, { if let Some(name) = self.symbol.name() { name.as_bytes().hash(state); } if let Some(addr) = self.symbol.addr() { ptr::hash(addr, state); } self.symbol.filename().hash(state); self.symbol.lineno().hash(state); self.symbol.colno().hash(state); self.parent_hash.hash(state); } } impl PartialEq for Symbol { fn eq(&self, other: &Self) -> bool { (self.parent_hash == other.parent_hash) && match (self.symbol.name(), other.symbol.name()) { (None, None) => true, (Some(lhs_name), Some(rhs_name)) => lhs_name.as_bytes() == rhs_name.as_bytes(), _ => false, } && match (self.symbol.addr(), other.symbol.addr()) { (None, None) => true, (Some(lhs_addr), Some(rhs_addr)) => ptr::eq(lhs_addr, rhs_addr), _ => false, } && (self.symbol.filename() == other.symbol.filename()) && (self.symbol.lineno() == other.symbol.lineno()) && (self.symbol.colno() == other.symbol.colno()) } } impl Eq for Symbol {} impl fmt::Display for Symbol { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(name) = self.symbol.name() { let name = name.to_string(); let name = if let Some((name, _)) = name.rsplit_once("::") { name } else { &name }; fmt::Display::fmt(&name, f)?; } if let Some(filename) = self.symbol.filename() { f.write_str(" at ")?; filename.to_string_lossy().fmt(f)?; if let Some(lineno) = self.symbol.lineno() { f.write_str(":")?; fmt::Display::fmt(&lineno, f)?; if let Some(colno) = self.symbol.colno() { f.write_str(":")?; fmt::Display::fmt(&colno, f)?; } } } Ok(()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/scheduled_io.rs
tokio/src/runtime/io/scheduled_io.rs
use crate::io::interest::Interest; use crate::io::ready::Ready; use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::Mutex; use crate::runtime::io::{Direction, ReadyEvent, Tick}; use crate::util::bit; use crate::util::linked_list::{self, LinkedList}; use crate::util::WakeList; use std::cell::UnsafeCell; use std::future::Future; use std::marker::PhantomPinned; use std::pin::Pin; use std::ptr::NonNull; use std::sync::atomic::Ordering::{AcqRel, Acquire}; use std::task::{Context, Poll, Waker}; /// Stored in the I/O driver resource slab. #[derive(Debug)] // # This struct should be cache padded to avoid false sharing. The cache padding rules are copied // from crossbeam-utils/src/cache_padded.rs // // Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache // lines at a time, so we have to align to 128 bytes rather than 64. // // Sources: // - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf // - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 // // ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. // // Sources: // - https://www.mono-project.com/news/2016/09/12/arm64-icache/ // // powerpc64 has 128-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 #[cfg_attr( any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", ), repr(align(128)) )] // arm, mips, mips64, sparc, and hexagon have 32-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12 #[cfg_attr( any( target_arch = "arm", target_arch = "mips", target_arch = "mips64", target_arch = "sparc", target_arch = "hexagon", ), repr(align(32)) )] // m68k has 16-byte cache line size. // // Sources: // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9 #[cfg_attr(target_arch = "m68k", repr(align(16)))] // s390x has 256-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13 #[cfg_attr(target_arch = "s390x", repr(align(256)))] // x86, riscv, wasm, and sparc64 have 64-byte cache line size. // // Sources: // - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19 // - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 // // All others are assumed to have 64-byte cache line size. #[cfg_attr( not(any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "arm", target_arch = "mips", target_arch = "mips64", target_arch = "sparc", target_arch = "hexagon", target_arch = "m68k", target_arch = "s390x", )), repr(align(64)) )] pub(crate) struct ScheduledIo { pub(super) linked_list_pointers: UnsafeCell<linked_list::Pointers<Self>>, /// Packs the resource's readiness and I/O driver latest tick. readiness: AtomicUsize, waiters: Mutex<Waiters>, } type WaitList = LinkedList<Waiter, <Waiter as linked_list::Link>::Target>; #[derive(Debug, Default)] struct Waiters { /// List of all current waiters. list: WaitList, /// Waker used for `AsyncRead`. reader: Option<Waker>, /// Waker used for `AsyncWrite`. writer: Option<Waker>, } #[derive(Debug)] struct Waiter { pointers: linked_list::Pointers<Waiter>, /// The waker for this task. waker: Option<Waker>, /// The interest this waiter is waiting on. interest: Interest, is_ready: bool, /// Should never be `Unpin`. _p: PhantomPinned, } generate_addr_of_methods! { impl<> Waiter { unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<Waiter>> { &self.pointers } } } /// Future returned by `readiness()`. struct Readiness<'a> { scheduled_io: &'a ScheduledIo, state: State, /// Entry in the waiter `LinkedList`. waiter: UnsafeCell<Waiter>, } enum State { Init, Waiting, Done, } // The `ScheduledIo::readiness` (`AtomicUsize`) is packed full of goodness. // // | shutdown | driver tick | readiness | // |----------+-------------+-----------| // | 1 bit | 15 bits | 16 bits | const READINESS: bit::Pack = bit::Pack::least_significant(16); const TICK: bit::Pack = READINESS.then(15); const SHUTDOWN: bit::Pack = TICK.then(1); // ===== impl ScheduledIo ===== impl Default for ScheduledIo { fn default() -> ScheduledIo { ScheduledIo { linked_list_pointers: UnsafeCell::new(linked_list::Pointers::new()), readiness: AtomicUsize::new(0), waiters: Mutex::new(Waiters::default()), } } } impl ScheduledIo { pub(crate) fn token(&self) -> mio::Token { mio::Token(super::EXPOSE_IO.expose_provenance(self)) } /// Invoked when the IO driver is shut down; forces this `ScheduledIo` into a /// permanently shutdown state. pub(super) fn shutdown(&self) { let mask = SHUTDOWN.pack(1, 0); self.readiness.fetch_or(mask, AcqRel); self.wake(Ready::ALL); } /// Sets the readiness on this `ScheduledIo` by invoking the given closure on /// the current value, returning the previous readiness value. /// /// # Arguments /// - `tick`: whether setting the tick or trying to clear readiness for a /// specific tick. /// - `f`: a closure returning a new readiness value given the previous /// readiness. pub(super) fn set_readiness(&self, tick_op: Tick, f: impl Fn(Ready) -> Ready) { let _ = self.readiness.fetch_update(AcqRel, Acquire, |curr| { // If the io driver is shut down, then you are only allowed to clear readiness. debug_assert!(SHUTDOWN.unpack(curr) == 0 || matches!(tick_op, Tick::Clear(_))); const MAX_TICK: usize = TICK.max_value() + 1; let tick = TICK.unpack(curr); let new_tick = match tick_op { // Trying to clear readiness with an old event! Tick::Clear(t) if tick as u8 != t => return None, Tick::Clear(t) => t as usize, Tick::Set => tick.wrapping_add(1) % MAX_TICK, }; let ready = Ready::from_usize(READINESS.unpack(curr)); Some(TICK.pack(new_tick, f(ready).as_usize())) }); } /// Notifies all pending waiters that have registered interest in `ready`. /// /// There may be many waiters to notify. Waking the pending task **must** be /// done from outside of the lock otherwise there is a potential for a /// deadlock. /// /// A stack array of wakers is created and filled with wakers to notify, the /// lock is released, and the wakers are notified. Because there may be more /// than 32 wakers to notify, if the stack array fills up, the lock is /// released, the array is cleared, and the iteration continues. pub(super) fn wake(&self, ready: Ready) { let mut wakers = WakeList::new(); let mut waiters = self.waiters.lock(); // check for AsyncRead slot if ready.is_readable() { if let Some(waker) = waiters.reader.take() { wakers.push(waker); } } // check for AsyncWrite slot if ready.is_writable() { if let Some(waker) = waiters.writer.take() { wakers.push(waker); } } 'outer: loop { let mut iter = waiters.list.drain_filter(|w| ready.satisfies(w.interest)); while wakers.can_push() { match iter.next() { Some(waiter) => { let waiter = unsafe { &mut *waiter.as_ptr() }; if let Some(waker) = waiter.waker.take() { waiter.is_ready = true; wakers.push(waker); } } None => { break 'outer; } } } drop(waiters); wakers.wake_all(); // Acquire the lock again. waiters = self.waiters.lock(); } // Release the lock before notifying drop(waiters); wakers.wake_all(); } pub(super) fn ready_event(&self, interest: Interest) -> ReadyEvent { let curr = self.readiness.load(Acquire); ReadyEvent { tick: TICK.unpack(curr) as u8, ready: interest.mask() & Ready::from_usize(READINESS.unpack(curr)), is_shutdown: SHUTDOWN.unpack(curr) != 0, } } /// Polls for readiness events in a given direction. /// /// These are to support `AsyncRead` and `AsyncWrite` polling methods, /// which cannot use the `async fn` version. This uses reserved reader /// and writer slots. pub(super) fn poll_readiness( &self, cx: &mut Context<'_>, direction: Direction, ) -> Poll<ReadyEvent> { let curr = self.readiness.load(Acquire); let ready = direction.mask() & Ready::from_usize(READINESS.unpack(curr)); let is_shutdown = SHUTDOWN.unpack(curr) != 0; if ready.is_empty() && !is_shutdown { // Update the task info let mut waiters = self.waiters.lock(); let waker = match direction { Direction::Read => &mut waiters.reader, Direction::Write => &mut waiters.writer, }; // Avoid cloning the waker if one is already stored that matches the // current task. match waker { Some(waker) => waker.clone_from(cx.waker()), None => *waker = Some(cx.waker().clone()), } // Try again, in case the readiness was changed while we were // taking the waiters lock let curr = self.readiness.load(Acquire); let ready = direction.mask() & Ready::from_usize(READINESS.unpack(curr)); let is_shutdown = SHUTDOWN.unpack(curr) != 0; if is_shutdown { Poll::Ready(ReadyEvent { tick: TICK.unpack(curr) as u8, ready: direction.mask(), is_shutdown, }) } else if ready.is_empty() { Poll::Pending } else { Poll::Ready(ReadyEvent { tick: TICK.unpack(curr) as u8, ready, is_shutdown, }) } } else { Poll::Ready(ReadyEvent { tick: TICK.unpack(curr) as u8, ready, is_shutdown, }) } } pub(crate) fn clear_readiness(&self, event: ReadyEvent) { // This consumes the current readiness state **except** for closed // states. Closed states are excluded because they are final states. let mask_no_closed = event.ready - Ready::READ_CLOSED - Ready::WRITE_CLOSED; self.set_readiness(Tick::Clear(event.tick), |curr| curr - mask_no_closed); } pub(crate) fn clear_wakers(&self) { let mut waiters = self.waiters.lock(); waiters.reader.take(); waiters.writer.take(); } } impl Drop for ScheduledIo { fn drop(&mut self) { self.wake(Ready::ALL); } } unsafe impl Send for ScheduledIo {} unsafe impl Sync for ScheduledIo {} impl ScheduledIo { /// An async version of `poll_readiness` which uses a linked list of wakers. pub(crate) async fn readiness(&self, interest: Interest) -> ReadyEvent { self.readiness_fut(interest).await } // This is in a separate function so that the borrow checker doesn't think // we are borrowing the `UnsafeCell` possibly over await boundaries. // // Go figure. fn readiness_fut(&self, interest: Interest) -> Readiness<'_> { Readiness { scheduled_io: self, state: State::Init, waiter: UnsafeCell::new(Waiter { pointers: linked_list::Pointers::new(), waker: None, is_ready: false, interest, _p: PhantomPinned, }), } } } unsafe impl linked_list::Link for Waiter { type Handle = NonNull<Waiter>; type Target = Waiter; fn as_raw(handle: &NonNull<Waiter>) -> NonNull<Waiter> { *handle } unsafe fn from_raw(ptr: NonNull<Waiter>) -> NonNull<Waiter> { ptr } unsafe fn pointers(target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> { unsafe { Waiter::addr_of_pointers(target) } } } // ===== impl Readiness ===== impl Future for Readiness<'_> { type Output = ReadyEvent; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { use std::sync::atomic::Ordering::SeqCst; let (scheduled_io, state, waiter) = { // Safety: `Self` is `!Unpin` // // While we could use `pin_project!` to remove // this unsafe block, there are already unsafe blocks here, // so it wouldn't significantly ease the mental burden // and would actually complicate the code. // That's why we didn't use it. let me = unsafe { self.get_unchecked_mut() }; (me.scheduled_io, &mut me.state, &me.waiter) }; loop { match *state { State::Init => { // Optimistically check existing readiness let curr = scheduled_io.readiness.load(SeqCst); let is_shutdown = SHUTDOWN.unpack(curr) != 0; // Safety: `waiter.interest` never changes let interest = unsafe { (*waiter.get()).interest }; let ready = Ready::from_usize(READINESS.unpack(curr)).intersection(interest); if !ready.is_empty() || is_shutdown { // Currently ready! let tick = TICK.unpack(curr) as u8; *state = State::Done; return Poll::Ready(ReadyEvent { tick, ready, is_shutdown, }); } // Wasn't ready, take the lock (and check again while locked). let mut waiters = scheduled_io.waiters.lock(); let curr = scheduled_io.readiness.load(SeqCst); let mut ready = Ready::from_usize(READINESS.unpack(curr)); let is_shutdown = SHUTDOWN.unpack(curr) != 0; if is_shutdown { ready = Ready::ALL; } let ready = ready.intersection(interest); if !ready.is_empty() || is_shutdown { // Currently ready! let tick = TICK.unpack(curr) as u8; *state = State::Done; return Poll::Ready(ReadyEvent { tick, ready, is_shutdown, }); } // Not ready even after locked, insert into list... // Safety: Since the `waiter` is not in the intrusive list yet, // we have exclusive access to it. The Mutex ensures // that this modification is visible to other threads that // acquire the same Mutex. let waker = unsafe { &mut (*waiter.get()).waker }; let old = waker.replace(cx.waker().clone()); debug_assert!(old.is_none(), "waker should be None at the first poll"); // Insert the waiter into the linked list // // safety: pointers from `UnsafeCell` are never null. waiters .list .push_front(unsafe { NonNull::new_unchecked(waiter.get()) }); *state = State::Waiting; } State::Waiting => { // Currently in the "Waiting" state, implying the caller has // a waiter stored in the waiter list (guarded by // `notify.waiters`). In order to access the waker fields, // we must hold the lock. let waiters = scheduled_io.waiters.lock(); // Safety: With the lock held, we have exclusive access to // the waiter. In other words, `ScheduledIo::wake()` // cannot access the waiter concurrently. let w = unsafe { &mut *waiter.get() }; if w.is_ready { // Our waker has been notified. *state = State::Done; } else { // Update the waker, if necessary. w.waker.as_mut().unwrap().clone_from(cx.waker()); return Poll::Pending; } // Explicit drop of the lock to indicate the scope that the // lock is held. Because holding the lock is required to // ensure safe access to fields not held within the lock, it // is helpful to visualize the scope of the critical // section. drop(waiters); } State::Done => { let curr = scheduled_io.readiness.load(Acquire); let is_shutdown = SHUTDOWN.unpack(curr) != 0; // The returned tick might be newer than the event // which notified our waker. This is ok because the future // still didn't return `Poll::Ready`. let tick = TICK.unpack(curr) as u8; // Safety: We don't need to acquire the lock here because // 1. `State::Done`` means `waiter` is no longer shared, // this means no concurrent access to `waiter` can happen // at this point. // 2. `waiter.interest` is never changed, this means // no side effects need to be synchronized by the lock. let interest = unsafe { (*waiter.get()).interest }; // The readiness state could have been cleared in the meantime, // but we allow the returned ready set to be empty. let ready = Ready::from_usize(READINESS.unpack(curr)).intersection(interest); return Poll::Ready(ReadyEvent { tick, ready, is_shutdown, }); } } } } } impl Drop for Readiness<'_> { fn drop(&mut self) { let mut waiters = self.scheduled_io.waiters.lock(); // Safety: `waiter` is only ever stored in `waiters` unsafe { waiters .list .remove(NonNull::new_unchecked(self.waiter.get())) }; } } unsafe impl Send for Readiness<'_> {} unsafe impl Sync for Readiness<'_> {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/registration_set.rs
tokio/src/runtime/io/registration_set.rs
use crate::loom::sync::atomic::AtomicUsize; use crate::runtime::io::ScheduledIo; use crate::util::linked_list::{self, LinkedList}; use std::io; use std::ptr::NonNull; use std::sync::atomic::Ordering::{Acquire, Release}; use std::sync::Arc; // Kind of arbitrary, but buffering 16 `ScheduledIo`s doesn't seem like much const NOTIFY_AFTER: usize = 16; pub(super) struct RegistrationSet { num_pending_release: AtomicUsize, } pub(super) struct Synced { // True when the I/O driver shutdown. At this point, no more registrations // should be added to the set. is_shutdown: bool, // List of all registrations tracked by the set registrations: LinkedList<Arc<ScheduledIo>, ScheduledIo>, // Registrations that are pending drop. When a `Registration` is dropped, it // stores its `ScheduledIo` in this list. The I/O driver is responsible for // dropping it. This ensures the `ScheduledIo` is not freed while it can // still be included in an I/O event. pending_release: Vec<Arc<ScheduledIo>>, } impl RegistrationSet { pub(super) fn new() -> (RegistrationSet, Synced) { let set = RegistrationSet { num_pending_release: AtomicUsize::new(0), }; let synced = Synced { is_shutdown: false, registrations: LinkedList::new(), pending_release: Vec::with_capacity(NOTIFY_AFTER), }; (set, synced) } pub(super) fn is_shutdown(&self, synced: &Synced) -> bool { synced.is_shutdown } /// Returns `true` if there are registrations that need to be released pub(super) fn needs_release(&self) -> bool { self.num_pending_release.load(Acquire) != 0 } pub(super) fn allocate(&self, synced: &mut Synced) -> io::Result<Arc<ScheduledIo>> { if synced.is_shutdown { return Err(io::Error::new( io::ErrorKind::Other, crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR, )); } let ret = Arc::new(ScheduledIo::default()); // Push a ref into the list of all resources. synced.registrations.push_front(ret.clone()); Ok(ret) } // Returns `true` if the caller should unblock the I/O driver to purge // registrations pending release. pub(super) fn deregister(&self, synced: &mut Synced, registration: &Arc<ScheduledIo>) -> bool { synced.pending_release.push(registration.clone()); let len = synced.pending_release.len(); self.num_pending_release.store(len, Release); len == NOTIFY_AFTER } pub(super) fn shutdown(&self, synced: &mut Synced) -> Vec<Arc<ScheduledIo>> { if synced.is_shutdown { return vec![]; } synced.is_shutdown = true; synced.pending_release.clear(); // Building a vec of all outstanding I/O handles could be expensive, but // this is the shutdown operation. In theory, shutdowns should be // "clean" with no outstanding I/O resources. Even if it is slow, we // aren't optimizing for shutdown. let mut ret = vec![]; while let Some(io) = synced.registrations.pop_back() { ret.push(io); } ret } pub(super) fn release(&self, synced: &mut Synced) { let pending = std::mem::take(&mut synced.pending_release); for io in pending { // safety: the registration is part of our list unsafe { self.remove(synced, &io) } } self.num_pending_release.store(0, Release); } // This function is marked as unsafe, because the caller must make sure that // `io` is part of the registration set. pub(super) unsafe fn remove(&self, synced: &mut Synced, io: &Arc<ScheduledIo>) { // SAFETY: Pointers into an Arc are never null. let io = unsafe { NonNull::new_unchecked(Arc::as_ptr(io).cast_mut()) }; super::EXPOSE_IO.unexpose_provenance(io.as_ptr()); // SAFETY: the caller guarantees that `io` is part of this list. let _ = unsafe { synced.registrations.remove(io) }; } } // Safety: `Arc` pins the inner data unsafe impl linked_list::Link for Arc<ScheduledIo> { type Handle = Arc<ScheduledIo>; type Target = ScheduledIo; fn as_raw(handle: &Self::Handle) -> NonNull<ScheduledIo> { // safety: Arc::as_ptr never returns null unsafe { NonNull::new_unchecked(Arc::as_ptr(handle) as *mut _) } } unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Arc<ScheduledIo> { // safety: the linked list currently owns a ref count unsafe { Arc::from_raw(ptr.as_ptr() as *const _) } } unsafe fn pointers( target: NonNull<Self::Target>, ) -> NonNull<linked_list::Pointers<ScheduledIo>> { // safety: `target.as_ref().linked_list_pointers` is a `UnsafeCell` that // always returns a non-null pointer. unsafe { NonNull::new_unchecked(target.as_ref().linked_list_pointers.get()) } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/mod.rs
tokio/src/runtime/io/mod.rs
#![cfg_attr( not(all(feature = "rt", feature = "net", feature = "io-uring", tokio_unstable)), allow(dead_code) )] mod driver; use driver::{Direction, Tick}; pub(crate) use driver::{Driver, Handle, ReadyEvent}; mod registration; pub(crate) use registration::Registration; mod registration_set; use registration_set::RegistrationSet; mod scheduled_io; use scheduled_io::ScheduledIo; mod metrics; use metrics::IoDriverMetrics; use crate::util::ptr_expose::PtrExposeDomain; static EXPOSE_IO: PtrExposeDomain<ScheduledIo> = PtrExposeDomain::new();
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/metrics.rs
tokio/src/runtime/io/metrics.rs
//! This file contains mocks of the metrics types used in the I/O driver. //! //! The reason these mocks don't live in `src/runtime/mock.rs` is because //! these need to be available in the case when `net` is enabled but //! `rt` is not. cfg_not_rt_and_metrics_and_net! { #[derive(Default)] pub(crate) struct IoDriverMetrics {} impl IoDriverMetrics { pub(crate) fn incr_fd_count(&self) {} pub(crate) fn dec_fd_count(&self) {} pub(crate) fn incr_ready_count_by(&self, _amt: u64) {} } } cfg_net! { cfg_rt! { cfg_unstable_metrics! { pub(crate) use crate::runtime::IoDriverMetrics; } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/registration.rs
tokio/src/runtime/io/registration.rs
#![cfg_attr(not(feature = "net"), allow(dead_code))] use crate::io::interest::Interest; use crate::runtime::io::{Direction, Handle, ReadyEvent, ScheduledIo}; use crate::runtime::scheduler; use mio::event::Source; use std::io; use std::sync::Arc; use std::task::{ready, Context, Poll}; cfg_io_driver! { /// Associates an I/O resource with the reactor instance that drives it. /// /// A registration represents an I/O resource registered with a Reactor such /// that it will receive task notifications on readiness. This is the lowest /// level API for integrating with a reactor. /// /// The association between an I/O resource is made by calling /// [`new_with_interest_and_handle`]. /// Once the association is established, it remains established until the /// registration instance is dropped. /// /// A registration instance represents two separate readiness streams. One /// for the read readiness and one for write readiness. These streams are /// independent and can be consumed from separate tasks. /// /// **Note**: while `Registration` is `Sync`, the caller must ensure that /// there are at most two tasks that use a registration instance /// concurrently. One task for [`poll_read_ready`] and one task for /// [`poll_write_ready`]. While violating this requirement is "safe" from a /// Rust memory safety point of view, it will result in unexpected behavior /// in the form of lost notifications and tasks hanging. /// /// ## Platform-specific events /// /// `Registration` also allows receiving platform-specific `mio::Ready` /// events. These events are included as part of the read readiness event /// stream. The write readiness event stream is only for `Ready::writable()` /// events. /// /// [`new_with_interest_and_handle`]: method@Self::new_with_interest_and_handle /// [`poll_read_ready`]: method@Self::poll_read_ready` /// [`poll_write_ready`]: method@Self::poll_write_ready` #[derive(Debug)] pub(crate) struct Registration { /// Handle to the associated runtime. /// /// TODO: this can probably be moved into `ScheduledIo`. handle: scheduler::Handle, /// Reference to state stored by the driver. shared: Arc<ScheduledIo>, } } unsafe impl Send for Registration {} unsafe impl Sync for Registration {} // ===== impl Registration ===== impl Registration { /// Registers the I/O resource with the reactor for the provided handle, for /// a specific `Interest`. This does not add `hup` or `error` so if you are /// interested in those states, you will need to add them to the readiness /// state passed to this function. /// /// # Return /// /// - `Ok` if the registration happened successfully /// - `Err` if an error was encountered during registration #[track_caller] pub(crate) fn new_with_interest_and_handle( io: &mut impl Source, interest: Interest, handle: scheduler::Handle, ) -> io::Result<Registration> { let shared = handle.driver().io().add_source(io, interest)?; Ok(Registration { handle, shared }) } /// Deregisters the I/O resource from the reactor it is associated with. /// /// This function must be called before the I/O resource associated with the /// registration is dropped. /// /// Note that deregistering does not guarantee that the I/O resource can be /// registered with a different reactor. Some I/O resource types can only be /// associated with a single reactor instance for their lifetime. /// /// # Return /// /// If the deregistration was successful, `Ok` is returned. Any calls to /// `Reactor::turn` that happen after a successful call to `deregister` will /// no longer result in notifications getting sent for this registration. /// /// `Err` is returned if an error is encountered. pub(crate) fn deregister(&mut self, io: &mut impl Source) -> io::Result<()> { self.handle().deregister_source(&self.shared, io) } pub(crate) fn clear_readiness(&self, event: ReadyEvent) { self.shared.clear_readiness(event); } // Uses the poll path, requiring the caller to ensure mutual exclusion for // correctness. Only the last task to call this function is notified. pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.poll_ready(cx, Direction::Read) } // Uses the poll path, requiring the caller to ensure mutual exclusion for // correctness. Only the last task to call this function is notified. pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.poll_ready(cx, Direction::Write) } // Uses the poll path, requiring the caller to ensure mutual exclusion for // correctness. Only the last task to call this function is notified. #[cfg(not(target_os = "wasi"))] pub(crate) fn poll_read_io<R>( &self, cx: &mut Context<'_>, f: impl FnMut() -> io::Result<R>, ) -> Poll<io::Result<R>> { self.poll_io(cx, Direction::Read, f) } // Uses the poll path, requiring the caller to ensure mutual exclusion for // correctness. Only the last task to call this function is notified. pub(crate) fn poll_write_io<R>( &self, cx: &mut Context<'_>, f: impl FnMut() -> io::Result<R>, ) -> Poll<io::Result<R>> { self.poll_io(cx, Direction::Write, f) } /// Polls for events on the I/O resource's `direction` readiness stream. /// /// If called with a task context, notify the task when a new event is /// received. fn poll_ready( &self, cx: &mut Context<'_>, direction: Direction, ) -> Poll<io::Result<ReadyEvent>> { ready!(crate::trace::trace_leaf(cx)); // Keep track of task budget let coop = ready!(crate::task::coop::poll_proceed(cx)); let ev = ready!(self.shared.poll_readiness(cx, direction)); if ev.is_shutdown { return Poll::Ready(Err(gone())); } coop.made_progress(); Poll::Ready(Ok(ev)) } fn poll_io<R>( &self, cx: &mut Context<'_>, direction: Direction, mut f: impl FnMut() -> io::Result<R>, ) -> Poll<io::Result<R>> { loop { let ev = ready!(self.poll_ready(cx, direction))?; match f() { Ok(ret) => { return Poll::Ready(Ok(ret)); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.clear_readiness(ev); } Err(e) => return Poll::Ready(Err(e)), } } } pub(crate) fn try_io<R>( &self, interest: Interest, f: impl FnOnce() -> io::Result<R>, ) -> io::Result<R> { let ev = self.shared.ready_event(interest); // Don't attempt the operation if the resource is not ready. if ev.ready.is_empty() { return Err(io::ErrorKind::WouldBlock.into()); } match f() { Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.clear_readiness(ev); Err(io::ErrorKind::WouldBlock.into()) } res => res, } } pub(crate) async fn readiness(&self, interest: Interest) -> io::Result<ReadyEvent> { let ev = self.shared.readiness(interest).await; if ev.is_shutdown { return Err(gone()); } Ok(ev) } pub(crate) async fn async_io<R>( &self, interest: Interest, mut f: impl FnMut() -> io::Result<R>, ) -> io::Result<R> { loop { let event = self.readiness(interest).await?; let coop = std::future::poll_fn(crate::task::coop::poll_proceed).await; match f() { Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.clear_readiness(event); } x => { coop.made_progress(); return x; } } } } fn handle(&self) -> &Handle { self.handle.driver().io() } } impl Drop for Registration { fn drop(&mut self) { // It is possible for a cycle to be created between wakers stored in // `ScheduledIo` instances and `Arc<driver::Inner>`. To break this // cycle, wakers are cleared. This is an imperfect solution as it is // possible to store a `Registration` in a waker. In this case, the // cycle would remain. // // See tokio-rs/tokio#3481 for more details. self.shared.clear_wakers(); } } fn gone() -> io::Error { io::Error::new( io::ErrorKind::Other, crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR, ) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/driver.rs
tokio/src/runtime/io/driver.rs
// Signal handling cfg_signal_internal_and_unix! { mod signal; } cfg_io_uring! { mod uring; use uring::UringContext; use crate::loom::sync::atomic::AtomicUsize; } use crate::io::interest::Interest; use crate::io::ready::Ready; use crate::loom::sync::Mutex; use crate::runtime::driver; use crate::runtime::io::registration_set; use crate::runtime::io::{IoDriverMetrics, RegistrationSet, ScheduledIo}; use mio::event::Source; use std::fmt; use std::io; use std::sync::Arc; use std::time::Duration; /// I/O driver, backed by Mio. pub(crate) struct Driver { /// True when an event with the signal token is received signal_ready: bool, /// Reuse the `mio::Events` value across calls to poll. events: mio::Events, /// The system event queue. poll: mio::Poll, } /// A reference to an I/O driver. pub(crate) struct Handle { /// Registers I/O resources. registry: mio::Registry, /// Tracks all registrations registrations: RegistrationSet, /// State that should be synchronized synced: Mutex<registration_set::Synced>, /// Used to wake up the reactor from a call to `turn`. /// Not supported on `Wasi` due to lack of threading support. #[cfg(not(target_os = "wasi"))] waker: mio::Waker, pub(crate) metrics: IoDriverMetrics, #[cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux", ))] pub(crate) uring_context: Mutex<UringContext>, #[cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux", ))] pub(crate) uring_state: AtomicUsize, } #[derive(Debug)] pub(crate) struct ReadyEvent { pub(super) tick: u8, pub(crate) ready: Ready, pub(super) is_shutdown: bool, } cfg_net_unix!( impl ReadyEvent { pub(crate) fn with_ready(&self, ready: Ready) -> Self { Self { ready, tick: self.tick, is_shutdown: self.is_shutdown, } } } ); #[derive(Debug, Eq, PartialEq, Clone, Copy)] pub(super) enum Direction { Read, Write, } pub(super) enum Tick { Set, Clear(u8), } const TOKEN_WAKEUP: mio::Token = mio::Token(0); const TOKEN_SIGNAL: mio::Token = mio::Token(1); fn _assert_kinds() { fn _assert<T: Send + Sync>() {} _assert::<Handle>(); } // ===== impl Driver ===== impl Driver { /// Creates a new event loop, returning any error that happened during the /// creation. pub(crate) fn new(nevents: usize) -> io::Result<(Driver, Handle)> { let poll = mio::Poll::new()?; #[cfg(not(target_os = "wasi"))] let waker = mio::Waker::new(poll.registry(), TOKEN_WAKEUP)?; let registry = poll.registry().try_clone()?; let driver = Driver { signal_ready: false, events: mio::Events::with_capacity(nevents), poll, }; let (registrations, synced) = RegistrationSet::new(); let handle = Handle { registry, registrations, synced: Mutex::new(synced), #[cfg(not(target_os = "wasi"))] waker, metrics: IoDriverMetrics::default(), #[cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux", ))] uring_context: Mutex::new(UringContext::new()), #[cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux", ))] uring_state: AtomicUsize::new(0), }; Ok((driver, handle)) } pub(crate) fn park(&mut self, rt_handle: &driver::Handle) { let handle = rt_handle.io(); self.turn(handle, None); } pub(crate) fn park_timeout(&mut self, rt_handle: &driver::Handle, duration: Duration) { let handle = rt_handle.io(); self.turn(handle, Some(duration)); } pub(crate) fn shutdown(&mut self, rt_handle: &driver::Handle) { let handle = rt_handle.io(); let ios = handle.registrations.shutdown(&mut handle.synced.lock()); // `shutdown()` must be called without holding the lock. for io in ios { io.shutdown(); } } fn turn(&mut self, handle: &Handle, max_wait: Option<Duration>) { debug_assert!(!handle.registrations.is_shutdown(&handle.synced.lock())); handle.release_pending_registrations(); let events = &mut self.events; // Block waiting for an event to happen, peeling out how many events // happened. match self.poll.poll(events, max_wait) { Ok(()) => {} Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} #[cfg(target_os = "wasi")] Err(e) if e.kind() == io::ErrorKind::InvalidInput => { // In case of wasm32_wasi this error happens, when trying to poll without subscriptions // just return from the park, as there would be nothing, which wakes us up. } Err(e) => panic!("unexpected error when polling the I/O driver: {e:?}"), } // Process all the events that came in, dispatching appropriately let mut ready_count = 0; for event in events.iter() { let token = event.token(); if token == TOKEN_WAKEUP { // Nothing to do, the event is used to unblock the I/O driver } else if token == TOKEN_SIGNAL { self.signal_ready = true; } else { let ready = Ready::from_mio(event); let ptr = super::EXPOSE_IO.from_exposed_addr(token.0); // Safety: we ensure that the pointers used as tokens are not freed // until they are both deregistered from mio **and** we know the I/O // driver is not concurrently polling. The I/O driver holds ownership of // an `Arc<ScheduledIo>` so we can safely cast this to a ref. let io: &ScheduledIo = unsafe { &*ptr }; io.set_readiness(Tick::Set, |curr| curr | ready); io.wake(ready); ready_count += 1; } } #[cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux", ))] { let mut guard = handle.get_uring().lock(); let ctx = &mut *guard; ctx.dispatch_completions(); } handle.metrics.incr_ready_count_by(ready_count); } } impl fmt::Debug for Driver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Driver") } } impl Handle { /// Forces a reactor blocked in a call to `turn` to wakeup, or otherwise /// makes the next call to `turn` return immediately. /// /// This method is intended to be used in situations where a notification /// needs to otherwise be sent to the main reactor. If the reactor is /// currently blocked inside of `turn` then it will wake up and soon return /// after this method has been called. If the reactor is not currently /// blocked in `turn`, then the next call to `turn` will not block and /// return immediately. pub(crate) fn unpark(&self) { #[cfg(not(target_os = "wasi"))] self.waker.wake().expect("failed to wake I/O driver"); } /// Registers an I/O resource with the reactor for a given `mio::Ready` state. /// /// The registration token is returned. pub(super) fn add_source( &self, source: &mut impl mio::event::Source, interest: Interest, ) -> io::Result<Arc<ScheduledIo>> { let scheduled_io = self.registrations.allocate(&mut self.synced.lock())?; let token = scheduled_io.token(); // we should remove the `scheduled_io` from the `registrations` set if registering // the `source` with the OS fails. Otherwise it will leak the `scheduled_io`. if let Err(e) = self.registry.register(source, token, interest.to_mio()) { // safety: `scheduled_io` is part of the `registrations` set. unsafe { self.registrations .remove(&mut self.synced.lock(), &scheduled_io) }; return Err(e); } // TODO: move this logic to `RegistrationSet` and use a `CountedLinkedList` self.metrics.incr_fd_count(); Ok(scheduled_io) } /// Deregisters an I/O resource from the reactor. pub(super) fn deregister_source( &self, registration: &Arc<ScheduledIo>, source: &mut impl Source, ) -> io::Result<()> { // Deregister the source with the OS poller **first** self.registry.deregister(source)?; if self .registrations .deregister(&mut self.synced.lock(), registration) { self.unpark(); } self.metrics.dec_fd_count(); Ok(()) } fn release_pending_registrations(&self) { if self.registrations.needs_release() { self.registrations.release(&mut self.synced.lock()); } } } impl fmt::Debug for Handle { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Handle") } } impl Direction { pub(super) fn mask(self) -> Ready { match self { Direction::Read => Ready::READABLE | Ready::READ_CLOSED, Direction::Write => Ready::WRITABLE | Ready::WRITE_CLOSED, } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/driver/signal.rs
tokio/src/runtime/io/driver/signal.rs
use super::{Driver, Handle, TOKEN_SIGNAL}; use std::io; impl Handle { pub(crate) fn register_signal_receiver( &self, receiver: &mut mio::net::UnixStream, ) -> io::Result<()> { self.registry .register(receiver, TOKEN_SIGNAL, mio::Interest::READABLE)?; Ok(()) } } impl Driver { pub(crate) fn consume_signal_ready(&mut self) -> bool { let ret = self.signal_ready; self.signal_ready = false; ret } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/io/driver/uring.rs
tokio/src/runtime/io/driver/uring.rs
use io_uring::{squeue::Entry, IoUring, Probe}; use mio::unix::SourceFd; use slab::Slab; use crate::loom::sync::atomic::Ordering; use crate::runtime::driver::op::{Cancellable, Lifecycle}; use crate::{io::Interest, loom::sync::Mutex}; use super::{Handle, TOKEN_WAKEUP}; use std::os::fd::{AsRawFd, RawFd}; use std::{io, mem, task::Waker}; const DEFAULT_RING_SIZE: u32 = 256; #[repr(usize)] #[derive(Debug, PartialEq, Eq, Copy, Clone)] enum State { Uninitialized = 0, Initialized = 1, Unsupported = 2, } impl State { fn as_usize(&self) -> usize { *self as usize } fn from_usize(value: usize) -> Self { match value { 0 => State::Uninitialized, 1 => State::Initialized, 2 => State::Unsupported, _ => unreachable!("invalid Uring state: {}", value), } } } pub(crate) struct UringContext { pub(crate) uring: Option<io_uring::IoUring>, pub(crate) probe: io_uring::Probe, pub(crate) ops: slab::Slab<Lifecycle>, } impl UringContext { pub(crate) fn new() -> Self { Self { ops: Slab::new(), uring: None, probe: Probe::new(), } } pub(crate) fn ring(&self) -> &io_uring::IoUring { self.uring.as_ref().expect("io_uring not initialized") } pub(crate) fn ring_mut(&mut self) -> &mut io_uring::IoUring { self.uring.as_mut().expect("io_uring not initialized") } pub(crate) fn is_opcode_supported(&self, opcode: u8) -> bool { self.probe.is_supported(opcode) } /// Perform `io_uring_setup` system call, and Returns true if this /// actually initialized the io_uring. /// /// If the machine doesn't support io_uring, then this will return an /// `ENOSYS` error. pub(crate) fn try_init(&mut self) -> io::Result<bool> { if self.uring.is_some() { // Already initialized. return Ok(false); } let uring = IoUring::new(DEFAULT_RING_SIZE)?; match uring.submitter().register_probe(&mut self.probe) { Ok(_) => {} Err(e) if e.raw_os_error() == Some(libc::EINVAL) => { // The kernel does not support IORING_REGISTER_PROBE. return Err(io::Error::from_raw_os_error(libc::ENOSYS)); } Err(e) => return Err(e), } self.uring.replace(uring); Ok(true) } pub(crate) fn dispatch_completions(&mut self) { let ops = &mut self.ops; let Some(mut uring) = self.uring.take() else { // Uring is not initialized yet. return; }; let cq = uring.completion(); for cqe in cq { let idx = cqe.user_data() as usize; match ops.get_mut(idx) { Some(Lifecycle::Waiting(waker)) => { waker.wake_by_ref(); *ops.get_mut(idx).unwrap() = Lifecycle::Completed(cqe); } Some(Lifecycle::Cancelled(_)) => { // Op future was cancelled, so we discard the result. // We just remove the entry from the slab. ops.remove(idx); } Some(other) => { panic!("unexpected lifecycle for slot {idx}: {other:?}"); } None => { panic!("no op at index {idx}"); } } } self.uring.replace(uring); // `cq`'s drop gets called here, updating the latest head pointer } pub(crate) fn submit(&mut self) -> io::Result<()> { loop { // Errors from io_uring_enter: https://man7.org/linux/man-pages/man2/io_uring_enter.2.html#ERRORS match self.ring().submit() { Ok(_) => { return Ok(()); } // If the submission queue is full, we dispatch completions and try again. Err(ref e) if e.raw_os_error() == Some(libc::EBUSY) => { self.dispatch_completions(); } // For other errors, we currently return the error as is. Err(e) => { return Err(e); } } } } pub(crate) fn remove_op(&mut self, index: usize) -> Lifecycle { self.ops.remove(index) } } /// Drop the driver, cancelling any in-progress ops and waiting for them to terminate. impl Drop for UringContext { fn drop(&mut self) { if self.uring.is_none() { // Uring is not initialized or not supported. return; } // Make sure we flush the submission queue before dropping the driver. while !self.ring_mut().submission().is_empty() { self.submit().expect("Internal error when dropping driver"); } let mut ops = std::mem::take(&mut self.ops); // Remove all completed ops since we don't need to wait for them. ops.retain(|_, lifecycle| !matches!(lifecycle, Lifecycle::Completed(_))); while !ops.is_empty() { // Wait until at least one completion is available. self.ring_mut() .submit_and_wait(1) .expect("Internal error when dropping driver"); for cqe in self.ring_mut().completion() { let idx = cqe.user_data() as usize; ops.remove(idx); } } } } impl Handle { fn add_uring_source(&self, uringfd: RawFd) -> io::Result<()> { let mut source = SourceFd(&uringfd); self.registry .register(&mut source, TOKEN_WAKEUP, Interest::READABLE.to_mio()) } pub(crate) fn get_uring(&self) -> &Mutex<UringContext> { &self.uring_context } fn set_uring_state(&self, state: State) { self.uring_state.store(state.as_usize(), Ordering::Release); } /// Check if the io_uring context is initialized. If not, it will try to initialize it. /// Then, check if the provided opcode is supported. /// /// If both the context initialization succeeds and the opcode is supported, /// this returns `Ok(true)`. /// If either io_uring is unsupported or the opcode is unsupported, /// this returns `Ok(false)`. /// An error is returned if an io_uring syscall returns an unexpected error value. pub(crate) fn check_and_init(&self, opcode: u8) -> io::Result<bool> { match State::from_usize(self.uring_state.load(Ordering::Acquire)) { State::Uninitialized => match self.try_init_and_check_opcode(opcode) { Ok(opcode_supported) => { self.set_uring_state(State::Initialized); Ok(opcode_supported) } // If the system doesn't support io_uring, we set the state to Unsupported. Err(e) if e.raw_os_error() == Some(libc::ENOSYS) => { self.set_uring_state(State::Unsupported); Ok(false) } // If we get EPERM, io-uring syscalls may be blocked (for example, by seccomp). // In this case, we try to fall back to spawn_blocking for this and future operations. // See also: https://github.com/tokio-rs/tokio/issues/7691 Err(e) if e.raw_os_error() == Some(libc::EPERM) => { self.set_uring_state(State::Unsupported); Ok(false) } // For other system errors, we just return it. Err(e) => Err(e), }, State::Unsupported => Ok(false), State::Initialized => Ok(self.get_uring().lock().is_opcode_supported(opcode)), } } /// Initialize the io_uring context if it hasn't been initialized yet. /// Then, check whether the given opcode is supported. fn try_init_and_check_opcode(&self, opcode: u8) -> io::Result<bool> { let mut guard = self.get_uring().lock(); if guard.try_init()? { self.add_uring_source(guard.ring().as_raw_fd())?; } Ok(guard.is_opcode_supported(opcode)) } /// Register an operation with the io_uring. /// /// If this is the first io_uring operation, it will also initialize the io_uring context. /// If io_uring isn't supported, this function returns an `ENOSYS` error, so the caller can /// perform custom handling, such as falling back to an alternative mechanism. /// /// # Safety /// /// Callers must ensure that parameters of the entry (such as buffer) are valid and will /// be valid for the entire duration of the operation, otherwise it may cause memory problems. pub(crate) unsafe fn register_op(&self, entry: Entry, waker: Waker) -> io::Result<usize> { // Note: Maybe this check can be removed if upstream callers consistently use `check_and_init`. if !self.check_and_init(entry.get_opcode() as u8)? { return Err(io::Error::from_raw_os_error(libc::ENOSYS)); } // Uring is initialized. let mut guard = self.get_uring().lock(); let ctx = &mut *guard; let index = ctx.ops.insert(Lifecycle::Waiting(waker)); let entry = entry.user_data(index as u64); let submit_or_remove = |ctx: &mut UringContext| -> io::Result<()> { if let Err(e) = ctx.submit() { // Submission failed, remove the entry from the slab and return the error ctx.remove_op(index); return Err(e); } Ok(()) }; // SAFETY: entry is valid for the entire duration of the operation while unsafe { ctx.ring_mut().submission().push(&entry).is_err() } { // If the submission queue is full, flush it to the kernel submit_or_remove(ctx)?; } // Ensure that the completion queue is not full before submitting the entry. while ctx.ring_mut().completion().is_full() { ctx.dispatch_completions(); } // Note: For now, we submit the entry immediately without utilizing batching. submit_or_remove(ctx)?; Ok(index) } pub(crate) fn cancel_op<T: Cancellable>(&self, index: usize, data: Option<T>) { let mut guard = self.get_uring().lock(); let ctx = &mut *guard; let ops = &mut ctx.ops; let Some(lifecycle) = ops.get_mut(index) else { // The corresponding index doesn't exist anymore, so this Op is already complete. return; }; // This Op will be cancelled. Here, we don't remove the lifecycle from the slab to keep // uring data alive until the operation completes. let cancel_data = data.expect("Data should be present").cancel(); match mem::replace(lifecycle, Lifecycle::Cancelled(cancel_data)) { Lifecycle::Submitted | Lifecycle::Waiting(_) => (), // The driver saw the completion, but it was never polled. Lifecycle::Completed(_) => { // We can safely remove the entry from the slab, as it has already been completed. ops.remove(index); } prev => panic!("Unexpected state: {prev:?}"), }; } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/driver/op.rs
tokio/src/runtime/driver/op.rs
use crate::io::uring::open::Open; use crate::io::uring::read::Read; use crate::io::uring::write::Write; use crate::runtime::Handle; use io_uring::cqueue; use io_uring::squeue::Entry; use std::future::Future; use std::io::{self, Error}; use std::mem; use std::pin::Pin; use std::task::{Context, Poll, Waker}; // This field isn't accessed directly, but it holds cancellation data, // so `#[allow(dead_code)]` is needed. #[allow(dead_code)] #[derive(Debug)] pub(crate) enum CancelData { Open(Open), Write(Write), Read(Read), } #[derive(Debug)] pub(crate) enum Lifecycle { /// The operation has been submitted to uring and is currently in-flight Submitted, /// The submitter is waiting for the completion of the operation Waiting(Waker), /// The submitter no longer has interest in the operation result. The state /// must be passed to the driver and held until the operation completes. Cancelled( // This field isn't accessed directly, but it holds cancellation data, // so `#[allow(dead_code)]` is needed. #[allow(dead_code)] CancelData, ), /// The operation has completed with a single cqe result Completed(io_uring::cqueue::Entry), } pub(crate) enum State { Initialize(Option<Entry>), Polled(usize), Complete, } pub(crate) struct Op<T: Cancellable> { // Handle to the runtime handle: Handle, // State of this Op state: State, // Per operation data. data: Option<T>, } impl<T: Cancellable> Op<T> { /// # Safety /// /// Callers must ensure that parameters of the entry (such as buffer) are valid and will /// be valid for the entire duration of the operation, otherwise it may cause memory problems. pub(crate) unsafe fn new(entry: Entry, data: T) -> Self { let handle = Handle::current(); Self { handle, data: Some(data), state: State::Initialize(Some(entry)), } } pub(crate) fn take_data(&mut self) -> Option<T> { self.data.take() } } impl<T: Cancellable> Drop for Op<T> { fn drop(&mut self) { match self.state { // We've already dropped this Op. State::Complete => (), // We will cancel this Op. State::Polled(index) => { let data = self.take_data(); let handle = &mut self.handle; handle.inner.driver().io().cancel_op(index, data); } // This Op has not been polled yet. // We don't need to do anything here. State::Initialize(_) => (), } } } /// A single CQE result pub(crate) struct CqeResult { pub(crate) result: io::Result<u32>, } impl From<cqueue::Entry> for CqeResult { fn from(cqe: cqueue::Entry) -> Self { let res = cqe.result(); let result = if res >= 0 { Ok(res as u32) } else { Err(io::Error::from_raw_os_error(-res)) }; CqeResult { result } } } /// A trait that converts a CQE result into a usable value for each operation. pub(crate) trait Completable { type Output; fn complete(self, cqe: CqeResult) -> Self::Output; // This is used when you want to terminate an operation with an error. // // The `Op` type that implements this trait can return the passed error // upstream by embedding it in the `Output`. fn complete_with_error(self, error: Error) -> Self::Output; } /// Extracts the `CancelData` needed to safely cancel an in-flight io_uring operation. pub(crate) trait Cancellable { fn cancel(self) -> CancelData; } impl<T: Cancellable> Unpin for Op<T> {} impl<T: Cancellable + Completable + Send> Future for Op<T> { type Output = T::Output; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.get_mut(); let handle = &mut this.handle; let driver = handle.inner.driver().io(); match &mut this.state { State::Initialize(entry_opt) => { let entry = entry_opt.take().expect("Entry must be present"); let waker = cx.waker().clone(); // SAFETY: entry is valid for the entire duration of the operation match unsafe { driver.register_op(entry, waker) } { Ok(idx) => this.state = State::Polled(idx), Err(err) => { let data = this .take_data() .expect("Data must be present on Initialization"); this.state = State::Complete; return Poll::Ready(data.complete_with_error(err)); } }; Poll::Pending } State::Polled(idx) => { let mut ctx = driver.get_uring().lock(); let lifecycle = ctx.ops.get_mut(*idx).expect("Lifecycle must be present"); match mem::replace(lifecycle, Lifecycle::Submitted) { // Only replace the stored waker if it wouldn't wake the new one Lifecycle::Waiting(prev) if !prev.will_wake(cx.waker()) => { let waker = cx.waker().clone(); *lifecycle = Lifecycle::Waiting(waker); Poll::Pending } Lifecycle::Waiting(prev) => { *lifecycle = Lifecycle::Waiting(prev); Poll::Pending } Lifecycle::Completed(cqe) => { // Clean up and complete the future ctx.remove_op(*idx); this.state = State::Complete; drop(ctx); let data = this .take_data() .expect("Data must be present on completion"); Poll::Ready(data.complete(cqe.into())) } Lifecycle::Submitted => { unreachable!("Submitted lifecycle should never be seen here"); } Lifecycle::Cancelled(_) => { unreachable!("Cancelled lifecycle should never be seen here"); } } } State::Complete => { panic!("Future polled after completion"); } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/context/blocking.rs
tokio/src/runtime/context/blocking.rs
use super::{EnterRuntime, CONTEXT}; use crate::loom::thread::AccessError; use crate::util::markers::NotSendOrSync; use std::marker::PhantomData; use std::time::Duration; /// Guard tracking that a caller has entered a blocking region. #[must_use] pub(crate) struct BlockingRegionGuard { _p: PhantomData<NotSendOrSync>, } pub(crate) struct DisallowBlockInPlaceGuard(bool); pub(crate) fn try_enter_blocking_region() -> Option<BlockingRegionGuard> { CONTEXT .try_with(|c| { if c.runtime.get().is_entered() { None } else { Some(BlockingRegionGuard::new()) } // If accessing the thread-local fails, the thread is terminating // and thread-locals are being destroyed. Because we don't know if // we are currently in a runtime or not, we default to being // permissive. }) .unwrap_or_else(|_| Some(BlockingRegionGuard::new())) } /// Disallows blocking in the current runtime context until the guard is dropped. pub(crate) fn disallow_block_in_place() -> DisallowBlockInPlaceGuard { let reset = CONTEXT.try_with(|c| { if let EnterRuntime::Entered { allow_block_in_place: true, } = c.runtime.get() { c.runtime.set(EnterRuntime::Entered { allow_block_in_place: false, }); true } else { false } }); DisallowBlockInPlaceGuard(reset.unwrap_or(false)) } impl BlockingRegionGuard { pub(super) fn new() -> BlockingRegionGuard { BlockingRegionGuard { _p: PhantomData } } /// Blocks the thread on the specified future, returning the value with /// which that future completes. pub(crate) fn block_on<F>(&mut self, f: F) -> Result<F::Output, AccessError> where F: std::future::Future, { use crate::runtime::park::CachedParkThread; let mut park = CachedParkThread::new(); park.block_on(f) } /// Blocks the thread on the specified future for **at most** `timeout` /// /// If the future completes before `timeout`, the result is returned. If /// `timeout` elapses, then `Err` is returned. pub(crate) fn block_on_timeout<F>(&mut self, f: F, timeout: Duration) -> Result<F::Output, ()> where F: std::future::Future, { use crate::runtime::park::CachedParkThread; use std::task::Context; use std::task::Poll::Ready; use std::time::Instant; let mut park = CachedParkThread::new(); let waker = park.waker().map_err(|_| ())?; let mut cx = Context::from_waker(&waker); pin!(f); let when = Instant::now() + timeout; loop { if let Ready(v) = crate::task::coop::budget(|| f.as_mut().poll(&mut cx)) { return Ok(v); } let now = Instant::now(); if now >= when { return Err(()); } park.park_timeout(when - now); } } } impl Drop for DisallowBlockInPlaceGuard { fn drop(&mut self) { if self.0 { // XXX: Do we want some kind of assertion here, or is "best effort" okay? CONTEXT.with(|c| { if let EnterRuntime::Entered { allow_block_in_place: false, } = c.runtime.get() { c.runtime.set(EnterRuntime::Entered { allow_block_in_place: true, }); } }); } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/context/scoped.rs
tokio/src/runtime/context/scoped.rs
use std::cell::Cell; use std::ptr; /// Scoped thread-local storage pub(super) struct Scoped<T> { pub(super) inner: Cell<*const T>, } impl<T> Scoped<T> { pub(super) const fn new() -> Scoped<T> { Scoped { inner: Cell::new(ptr::null()), } } /// Inserts a value into the scoped cell for the duration of the closure pub(super) fn set<F, R>(&self, t: &T, f: F) -> R where F: FnOnce() -> R, { struct Reset<'a, T> { cell: &'a Cell<*const T>, prev: *const T, } impl<T> Drop for Reset<'_, T> { fn drop(&mut self) { self.cell.set(self.prev); } } let prev = self.inner.get(); self.inner.set(t as *const _); let _reset = Reset { cell: &self.inner, prev, }; f() } /// Gets the value out of the scoped cell; pub(super) fn with<F, R>(&self, f: F) -> R where F: FnOnce(Option<&T>) -> R, { let val = self.inner.get(); if val.is_null() { f(None) } else { unsafe { f(Some(&*val)) } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/context/runtime_mt.rs
tokio/src/runtime/context/runtime_mt.rs
use super::{EnterRuntime, CONTEXT}; /// Returns true if in a runtime context. pub(crate) fn current_enter_context() -> EnterRuntime { CONTEXT.with(|c| c.runtime.get()) } /// Forces the current "entered" state to be cleared while the closure /// is executed. pub(crate) fn exit_runtime<F: FnOnce() -> R, R>(f: F) -> R { // Reset in case the closure panics struct Reset(EnterRuntime); impl Drop for Reset { fn drop(&mut self) { CONTEXT.with(|c| { assert!( !c.runtime.get().is_entered(), "closure claimed permanent executor" ); c.runtime.set(self.0); }); } } let was = CONTEXT.with(|c| { let e = c.runtime.get(); assert!(e.is_entered(), "asked to exit when not entered"); c.runtime.set(EnterRuntime::NotEntered); e }); let _reset = Reset(was); // dropping _reset after f() will reset ENTERED f() }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/context/runtime.rs
tokio/src/runtime/context/runtime.rs
use super::{BlockingRegionGuard, SetCurrentGuard, CONTEXT}; use crate::runtime::scheduler; use crate::util::rand::{FastRand, RngSeed}; use std::fmt; #[derive(Debug, Clone, Copy)] #[must_use] pub(crate) enum EnterRuntime { /// Currently in a runtime context. #[cfg_attr(not(feature = "rt"), allow(dead_code))] Entered { allow_block_in_place: bool }, /// Not in a runtime context **or** a blocking region. NotEntered, } /// Guard tracking that a caller has entered a runtime context. #[must_use] pub(crate) struct EnterRuntimeGuard { /// Tracks that the current thread has entered a blocking function call. pub(crate) blocking: BlockingRegionGuard, #[allow(dead_code)] // Only tracking the guard. pub(crate) handle: SetCurrentGuard, // Tracks the previous random number generator seed old_seed: RngSeed, } /// Marks the current thread as being within the dynamic extent of an /// executor. #[track_caller] pub(crate) fn enter_runtime<F, R>(handle: &scheduler::Handle, allow_block_in_place: bool, f: F) -> R where F: FnOnce(&mut BlockingRegionGuard) -> R, { let maybe_guard = CONTEXT.with(|c| { if c.runtime.get().is_entered() { None } else { // Set the entered flag c.runtime.set(EnterRuntime::Entered { allow_block_in_place, }); // Generate a new seed let rng_seed = handle.seed_generator().next_seed(); // Swap the RNG seed let mut rng = c.rng.get().unwrap_or_else(FastRand::new); let old_seed = rng.replace_seed(rng_seed); c.rng.set(Some(rng)); Some(EnterRuntimeGuard { blocking: BlockingRegionGuard::new(), handle: c.set_current(handle), old_seed, }) } }); if let Some(mut guard) = maybe_guard { return f(&mut guard.blocking); } panic!( "Cannot start a runtime from within a runtime. This happens \ because a function (like `block_on`) attempted to block the \ current thread while the thread is being used to drive \ asynchronous tasks." ); } impl fmt::Debug for EnterRuntimeGuard { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Enter").finish() } } impl Drop for EnterRuntimeGuard { fn drop(&mut self) { CONTEXT.with(|c| { assert!(c.runtime.get().is_entered()); c.runtime.set(EnterRuntime::NotEntered); // Replace the previous RNG seed let mut rng = c.rng.get().unwrap_or_else(FastRand::new); rng.replace_seed(self.old_seed.clone()); c.rng.set(Some(rng)); }); } } impl EnterRuntime { pub(crate) fn is_entered(self) -> bool { matches!(self, EnterRuntime::Entered { .. }) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/context/current.rs
tokio/src/runtime/context/current.rs
use super::{Context, CONTEXT}; use crate::runtime::{scheduler, TryCurrentError}; use crate::util::markers::SyncNotSend; use std::cell::{Cell, RefCell}; use std::marker::PhantomData; #[derive(Debug)] #[must_use] pub(crate) struct SetCurrentGuard { // The previous handle prev: Option<scheduler::Handle>, // The depth for this guard depth: usize, // Don't let the type move across threads. _p: PhantomData<SyncNotSend>, } pub(super) struct HandleCell { /// Current handle handle: RefCell<Option<scheduler::Handle>>, /// Tracks the number of nested calls to `try_set_current`. depth: Cell<usize>, } /// Sets this [`Handle`] as the current active [`Handle`]. /// /// [`Handle`]: crate::runtime::scheduler::Handle pub(crate) fn try_set_current(handle: &scheduler::Handle) -> Option<SetCurrentGuard> { CONTEXT.try_with(|ctx| ctx.set_current(handle)).ok() } pub(crate) fn with_current<F, R>(f: F) -> Result<R, TryCurrentError> where F: FnOnce(&scheduler::Handle) -> R, { match CONTEXT.try_with(|ctx| ctx.current.handle.borrow().as_ref().map(f)) { Ok(Some(ret)) => Ok(ret), Ok(None) => Err(TryCurrentError::new_no_context()), Err(_access_error) => Err(TryCurrentError::new_thread_local_destroyed()), } } impl Context { pub(super) fn set_current(&self, handle: &scheduler::Handle) -> SetCurrentGuard { let old_handle = self.current.handle.borrow_mut().replace(handle.clone()); let depth = self.current.depth.get(); assert!(depth != usize::MAX, "reached max `enter` depth"); let depth = depth + 1; self.current.depth.set(depth); SetCurrentGuard { prev: old_handle, depth, _p: PhantomData, } } } impl HandleCell { pub(super) const fn new() -> HandleCell { HandleCell { handle: RefCell::new(None), depth: Cell::new(0), } } } impl Drop for SetCurrentGuard { fn drop(&mut self) { CONTEXT.with(|ctx| { let depth = ctx.current.depth.get(); if depth != self.depth { if !std::thread::panicking() { panic!( "`EnterGuard` values dropped out of order. Guards returned by \ `tokio::runtime::Handle::enter()` must be dropped in the reverse \ order as they were acquired." ); } else { // Just return... this will leave handles in a wonky state though... return; } } *ctx.current.handle.borrow_mut() = self.prev.take(); ctx.current.depth.set(depth - 1); }); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/tests.rs
tokio/src/runtime/time_alt/tests.rs
use super::*; use crate::loom::thread; use futures_test::task::{new_count_waker, AwokenCount}; #[cfg(loom)] const NUM_ITEMS: usize = 16; #[cfg(not(loom))] const NUM_ITEMS: usize = 64; fn new_handle() -> (EntryHandle, AwokenCount) { let (waker, count) = new_count_waker(); (EntryHandle::new(0, waker), count) } fn model<F: Fn() + Send + Sync + 'static>(f: F) { #[cfg(loom)] loom::model(f); #[cfg(not(loom))] f(); } #[test] fn wake_up_in_the_same_thread() { model(|| { let mut counts = Vec::new(); let mut reg_queue = RegistrationQueue::new(); for _ in 0..NUM_ITEMS { let (hdl, count) = new_handle(); counts.push(count); unsafe { reg_queue.push_front(hdl); } } let mut wake_queue = WakeQueue::new(); for _ in 0..NUM_ITEMS { if let Some(hdl) = reg_queue.pop_front() { unsafe { wake_queue.push_front(hdl); } } } assert!(reg_queue.pop_front().is_none()); wake_queue.wake_all(); assert!(counts.into_iter().all(|c| c.get() == 1)); }); } #[test] fn cancel_in_the_same_thread() { model(|| { let mut counts = Vec::new(); let (cancel_tx, mut cancel_rx) = cancellation_queue::new(); let mut reg_queue = RegistrationQueue::new(); for _ in 0..NUM_ITEMS { let (hdl, count) = new_handle(); hdl.register_cancel_tx(cancel_tx.clone()); counts.push(count); unsafe { reg_queue.push_front(hdl.clone()); } hdl.cancel(); } // drain the registration queue while let Some(hdl) = reg_queue.pop_front() { drop(hdl); } let mut wake_queue = WakeQueue::new(); for hdl in cancel_rx.recv_all() { unsafe { wake_queue.push_front(hdl); } } wake_queue.wake_all(); assert!(counts.into_iter().all(|c| c.get() == 0)); }); } #[test] fn wake_up_in_the_different_thread() { model(|| { let mut counts = Vec::new(); let mut hdls = Vec::new(); let mut reg_queue = RegistrationQueue::new(); for _ in 0..NUM_ITEMS { let (hdl, count) = new_handle(); counts.push(count); hdls.push(hdl.clone()); unsafe { reg_queue.push_front(hdl); } } // wake up all handles in a different thread thread::spawn(move || { let mut wake_queue = WakeQueue::new(); for _ in 0..NUM_ITEMS { if let Some(hdl) = reg_queue.pop_front() { unsafe { wake_queue.push_front(hdl); } } } assert!(reg_queue.pop_front().is_none()); wake_queue.wake_all(); assert!(counts.into_iter().all(|c| c.get() == 1)); }) .join() .unwrap(); }); } #[test] fn cancel_in_the_different_thread() { model(|| { let mut counts = Vec::new(); let (cancel_tx, mut cancel_rx) = cancellation_queue::new(); let mut hdls = Vec::new(); let mut reg_queue = RegistrationQueue::new(); for _ in 0..NUM_ITEMS { let (hdl, count) = new_handle(); hdl.register_cancel_tx(cancel_tx.clone()); counts.push(count); hdls.push(hdl.clone()); unsafe { reg_queue.push_front(hdl); } } // this thread cancel all handles concurrently let jh = thread::spawn(move || { // cancel all handles for hdl in hdls { hdl.cancel(); } }); // cancellation queue concurrently while let Some(hdl) = reg_queue.pop_front() { drop(hdl); } let mut wake_queue = WakeQueue::new(); for hdl in cancel_rx.recv_all() { unsafe { wake_queue.push_front(hdl); } } wake_queue.wake_all(); assert!(counts.into_iter().all(|c| c.get() == 0)); jh.join().unwrap(); }) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/wake_queue.rs
tokio/src/runtime/time_alt/wake_queue.rs
use super::{Entry, EntryHandle, WakeQueueEntry}; use crate::util::linked_list; type EntryList = linked_list::LinkedList<WakeQueueEntry, Entry>; /// A queue of entries that need to be woken up. #[derive(Debug)] pub(crate) struct WakeQueue { list: EntryList, } impl Drop for WakeQueue { fn drop(&mut self) { // drain all entries without waking them up while let Some(hdl) = self.list.pop_front() { drop(hdl); } } } impl WakeQueue { pub(crate) fn new() -> Self { Self { list: EntryList::new(), } } pub(crate) fn is_empty(&self) -> bool { self.list.is_empty() } /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - [`Entry::extra_pointers`] of `hdl` must not being used. pub(crate) unsafe fn push_front(&mut self, hdl: EntryHandle) { self.list.push_front(hdl); } /// Wakes all entries in the wake queue. pub(crate) fn wake_all(mut self) { while let Some(hdl) = self.list.pop_front() { hdl.wake(); } } } #[cfg(test)] mod tests;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/timer.rs
tokio/src/runtime/time_alt/timer.rs
use super::{EntryHandle, TempLocalContext}; use crate::runtime::scheduler::Handle as SchedulerHandle; use crate::time::Instant; use std::pin::Pin; use std::task::{Context, Poll}; #[cfg(any(feature = "rt", feature = "rt-multi-thread"))] use crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR; pub(crate) struct Timer { sched_handle: SchedulerHandle, /// The entry in the timing wheel. /// /// - `Some` if the timer is registered / pending / woken up / cancelling. /// - `None` if the timer is unregistered. entry: Option<EntryHandle>, /// The deadline for the timer. deadline: Instant, } impl std::fmt::Debug for Timer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Timer") .field("deadline", &self.deadline) .finish() } } impl Drop for Timer { fn drop(&mut self) { if let Some(entry) = self.entry.take() { entry.cancel(); } } } impl Timer { #[track_caller] pub(crate) fn new(sched_hdl: SchedulerHandle, deadline: Instant) -> Self { // Panic if the time driver is not enabled let _ = sched_hdl.driver().time(); Timer { sched_handle: sched_hdl, entry: None, deadline, } } pub(crate) fn deadline(&self) -> Instant { self.deadline } pub(crate) fn is_elapsed(&self) -> bool { self.entry.as_ref().is_some_and(|entry| entry.is_woken_up()) } fn register(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let this = self.get_mut(); with_current_temp_local_context(&this.sched_handle, |maybe_time_cx| { let deadline = deadline_to_tick(&this.sched_handle, this.deadline); match maybe_time_cx { Some(TempLocalContext::Running { registration_queue: _, elapsed, }) if deadline <= elapsed => Poll::Ready(()), Some(TempLocalContext::Running { registration_queue, elapsed: _, }) => { let hdl = EntryHandle::new(deadline, cx.waker().clone()); this.entry = Some(hdl.clone()); unsafe { registration_queue.push_front(hdl); } Poll::Pending } #[cfg(feature = "rt-multi-thread")] Some(TempLocalContext::Shutdown) => panic!("{RUNTIME_SHUTTING_DOWN_ERROR}"), _ => { let hdl = EntryHandle::new(deadline, cx.waker().clone()); this.entry = Some(hdl.clone()); push_from_remote(&this.sched_handle, hdl); Poll::Pending } } }) } pub(crate) fn poll_elapsed(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { match self.entry.as_ref() { Some(entry) if entry.is_woken_up() => Poll::Ready(()), Some(entry) => { entry.register_waker(cx.waker().clone()); Poll::Pending } None => self.register(cx), } } pub(crate) fn scheduler_handle(&self) -> &SchedulerHandle { &self.sched_handle } #[cfg(all(tokio_unstable, feature = "tracing"))] pub(crate) fn driver(&self) -> &crate::runtime::time::Handle { self.sched_handle.driver().time() } #[cfg(all(tokio_unstable, feature = "tracing"))] pub(crate) fn clock(&self) -> &crate::time::Clock { self.sched_handle.driver().clock() } } fn with_current_temp_local_context<F, R>(hdl: &SchedulerHandle, f: F) -> R where F: FnOnce(Option<TempLocalContext<'_>>) -> R, { #[cfg(not(feature = "rt"))] { let (_, _) = (hdl, f); panic!("Tokio runtime is not enabled, cannot access the current wheel"); } #[cfg(feature = "rt")] { use crate::runtime::context; let is_same_rt = context::with_current(|cur_hdl| cur_hdl.is_same_runtime(hdl)).unwrap_or_default(); if !is_same_rt { // We don't want to create the timer in one runtime, // but register it in a different runtime's timer wheel. f(None) } else { context::with_scheduler(|maybe_cx| match maybe_cx { Some(cx) => cx.with_time_temp_local_context(f), None => f(None), }) } } } fn push_from_remote(sched_hdl: &SchedulerHandle, entry_hdl: EntryHandle) { #[cfg(not(feature = "rt"))] { let (_, _) = (sched_hdl, entry_hdl); panic!("Tokio runtime is not enabled, cannot access the current wheel"); } #[cfg(feature = "rt")] { assert!(!sched_hdl.is_shutdown(), "{RUNTIME_SHUTTING_DOWN_ERROR}"); sched_hdl.push_remote_timer(entry_hdl); } } fn deadline_to_tick(sched_hdl: &SchedulerHandle, deadline: Instant) -> u64 { let time_hdl = sched_hdl.driver().time(); time_hdl.time_source().deadline_to_tick(deadline) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/mod.rs
tokio/src/runtime/time_alt/mod.rs
pub(crate) mod context; pub(super) use context::{LocalContext, TempLocalContext}; pub(crate) mod cancellation_queue; mod entry; pub(crate) use entry::Handle as EntryHandle; use entry::{CancellationQueueEntry, RegistrationQueueEntry, WakeQueueEntry}; use entry::{Entry, EntryList}; mod registration_queue; pub(crate) use registration_queue::RegistrationQueue; mod timer; pub(crate) use timer::Timer; mod wheel; pub(super) use wheel::Wheel; mod wake_queue; pub(crate) use wake_queue::WakeQueue; #[cfg(test)] mod tests;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/cancellation_queue.rs
tokio/src/runtime/time_alt/cancellation_queue.rs
use super::{CancellationQueueEntry, Entry, EntryHandle}; use crate::loom::sync::{Arc, Mutex}; use crate::util::linked_list; type EntryList = linked_list::LinkedList<CancellationQueueEntry, Entry>; #[derive(Debug, Default)] struct Inner { list: EntryList, } impl Drop for Inner { fn drop(&mut self) { // consume all entries while let Some(hdl) = self.list.pop_front() { drop(hdl) } } } impl Inner { fn new() -> Self { Self { list: EntryList::new(), } } /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - `hdl` must not in any [`super::cancellation_queue`], and also mus not in any [`super::WakeQueue`]. unsafe fn push_front(&mut self, hdl: EntryHandle) { self.list.push_front(hdl); } fn into_iter(self) -> impl Iterator<Item = EntryHandle> { struct Iter(Inner); impl Iterator for Iter { type Item = EntryHandle; fn next(&mut self) -> Option<Self::Item> { self.0.list.pop_front() } } Iter(self) } } #[derive(Debug, Clone)] pub(crate) struct Sender { inner: Arc<Mutex<Inner>>, } impl Sender { /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - `hdl` must not in any cancellation queue. pub(crate) unsafe fn send(&self, hdl: EntryHandle) { unsafe { self.inner.lock().push_front(hdl); } } } #[derive(Debug)] pub(crate) struct Receiver { inner: Arc<Mutex<Inner>>, } impl Receiver { pub(crate) fn recv_all(&mut self) -> impl Iterator<Item = EntryHandle> { std::mem::take(&mut *self.inner.lock()).into_iter() } } pub(crate) fn new() -> (Sender, Receiver) { let inner = Arc::new(Mutex::new(Inner::new())); ( Sender { inner: inner.clone(), }, Receiver { inner }, ) } #[cfg(test)] mod tests;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/context.rs
tokio/src/runtime/time_alt/context.rs
use super::{cancellation_queue, RegistrationQueue, Wheel}; /// Local context for the time driver, used when the runtime wants to /// fire/cancel timers. pub(crate) struct LocalContext { pub(crate) wheel: Wheel, pub(crate) registration_queue: RegistrationQueue, pub(crate) canc_tx: cancellation_queue::Sender, pub(crate) canc_rx: cancellation_queue::Receiver, } impl LocalContext { pub(crate) fn new() -> Self { let (canc_tx, canc_rx) = cancellation_queue::new(); Self { wheel: Wheel::new(), registration_queue: RegistrationQueue::new(), canc_tx, canc_rx, } } } pub(crate) enum TempLocalContext<'a> { /// The runtime is running, we can access it. Running { registration_queue: &'a mut RegistrationQueue, elapsed: u64, }, #[cfg(feature = "rt-multi-thread")] /// The runtime is shutting down, no timers can be registered. Shutdown, } impl<'a> TempLocalContext<'a> { pub(crate) fn new_running(cx: &'a mut LocalContext) -> Self { TempLocalContext::Running { registration_queue: &mut cx.registration_queue, elapsed: cx.wheel.elapsed(), } } #[cfg(feature = "rt-multi-thread")] pub(crate) fn new_shutdown() -> Self { TempLocalContext::Shutdown } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/registration_queue.rs
tokio/src/runtime/time_alt/registration_queue.rs
use super::{Entry, EntryHandle, RegistrationQueueEntry}; use crate::util::linked_list; type EntryList = linked_list::LinkedList<RegistrationQueueEntry, Entry>; /// A queue of entries that need to be registered in the timer wheel. #[derive(Debug)] pub(crate) struct RegistrationQueue { list: EntryList, } impl Drop for RegistrationQueue { fn drop(&mut self) { // drain all entries without waking them up while let Some(hdl) = self.list.pop_front() { drop(hdl); } } } impl RegistrationQueue { pub(crate) fn new() -> Self { Self { list: EntryList::new(), } } /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - [`Entry::extra_pointers`] of `hdl` must not being used. pub(crate) unsafe fn push_front(&mut self, hdl: EntryHandle) { self.list.push_front(hdl); } pub(crate) fn pop_front(&mut self) -> Option<EntryHandle> { self.list.pop_front() } } #[cfg(test)] mod tests;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/entry.rs
tokio/src/runtime/time_alt/entry.rs
use super::cancellation_queue::Sender; use crate::loom::sync::{Arc, Mutex}; use crate::util::linked_list; use std::marker::PhantomPinned; use std::ptr::NonNull; use std::task::Waker; pub(super) type EntryList = linked_list::LinkedList<Entry, Entry>; #[derive(Debug)] struct State { cancelled: bool, woken_up: bool, waker: Option<Waker>, cancel_tx: Option<Sender>, } #[derive(Debug)] pub(crate) struct Entry { /// The intrusive pointer used by [`super::cancellation_queue`]. cancel_pointers: linked_list::Pointers<Entry>, /// The intrusive pointer used by any of the following queues: /// /// - [`Wheel`] /// - [`RegistrationQueue`] /// - [`WakeQueue`] /// /// We can guarantee that this pointer is only used by one of the above /// at any given time. See below for the journey of this pointer. /// /// Initially, this pointer is used by the [`RegistrationQueue`]. /// /// And then, before parking the resource driver, /// the scheduler removes the entry from the [`RegistrationQueue`] /// [`RegistrationQueue`] and insert it into the [`Wheel`]. /// /// Finally, after parking the resource driver, the scheduler removes /// the entry from the [`Wheel`] and insert it into the [`WakeQueue`]. /// /// [`RegistrationQueue`]: super::RegistrationQueue /// [`Wheel`]: super::Wheel /// [`WakeQueue`]: super::WakeQueue extra_pointers: linked_list::Pointers<Entry>, /// The tick when this entry is scheduled to expire. deadline: u64, state: Mutex<State>, /// Make the type `!Unpin` to prevent LLVM from emitting /// the `noalias` attribute for mutable references. /// /// See <https://github.com/rust-lang/rust/pull/82834>. _pin: PhantomPinned, } // Safety: `Entry` is always in an `Arc`. unsafe impl linked_list::Link for Entry { type Handle = Handle; type Target = Entry; fn as_raw(hdl: &Self::Handle) -> NonNull<Self::Target> { unsafe { NonNull::new_unchecked(Arc::as_ptr(&hdl.entry).cast_mut()) } } unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle { Handle { entry: unsafe { Arc::from_raw(ptr.as_ptr()) }, } } unsafe fn pointers( target: NonNull<Self::Target>, ) -> NonNull<linked_list::Pointers<Self::Target>> { let this = target.as_ptr(); let field = unsafe { std::ptr::addr_of_mut!((*this).extra_pointers) }; unsafe { NonNull::new_unchecked(field) } } } /// An ZST to allow [`super::registration_queue`] to utilize the [`Entry::extra_pointers`] /// by impl [`linked_list::Link`] as we cannot impl it on [`Entry`] /// directly due to the conflicting implementations. /// /// This type should never be constructed. pub(super) struct RegistrationQueueEntry; // Safety: `Entry` is always in an `Arc`. unsafe impl linked_list::Link for RegistrationQueueEntry { type Handle = Handle; type Target = Entry; fn as_raw(hdl: &Self::Handle) -> NonNull<Self::Target> { unsafe { NonNull::new_unchecked(Arc::as_ptr(&hdl.entry).cast_mut()) } } unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle { Handle { entry: unsafe { Arc::from_raw(ptr.as_ptr()) }, } } unsafe fn pointers( target: NonNull<Self::Target>, ) -> NonNull<linked_list::Pointers<Self::Target>> { let this = target.as_ptr(); let field = unsafe { std::ptr::addr_of_mut!((*this).extra_pointers) }; unsafe { NonNull::new_unchecked(field) } } } /// An ZST to allow [`super::cancellation_queue`] to utilize the [`Entry::cancel_pointers`] /// by impl [`linked_list::Link`] as we cannot impl it on [`Entry`] /// directly due to the conflicting implementations. /// /// This type should never be constructed. pub(super) struct CancellationQueueEntry; // Safety: `Entry` is always in an `Arc`. unsafe impl linked_list::Link for CancellationQueueEntry { type Handle = Handle; type Target = Entry; fn as_raw(hdl: &Self::Handle) -> NonNull<Self::Target> { unsafe { NonNull::new_unchecked(Arc::as_ptr(&hdl.entry).cast_mut()) } } unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle { Handle { entry: unsafe { Arc::from_raw(ptr.as_ptr()) }, } } unsafe fn pointers( target: NonNull<Self::Target>, ) -> NonNull<linked_list::Pointers<Self::Target>> { let this = target.as_ptr(); let field = unsafe { std::ptr::addr_of_mut!((*this).cancel_pointers) }; unsafe { NonNull::new_unchecked(field) } } } /// An ZST to allow [`super::WakeQueue`] to utilize the [`Entry::extra_pointers`] /// by impl [`linked_list::Link`] as we cannot impl it on [`Entry`] /// directly due to the conflicting implementations. /// /// This type should never be constructed. pub(super) struct WakeQueueEntry; // Safety: `Entry` is always in an `Arc`. unsafe impl linked_list::Link for WakeQueueEntry { type Handle = Handle; type Target = Entry; fn as_raw(hdl: &Self::Handle) -> NonNull<Self::Target> { unsafe { NonNull::new_unchecked(Arc::as_ptr(&hdl.entry).cast_mut()) } } unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle { Handle { entry: unsafe { Arc::from_raw(ptr.as_ptr()) }, } } unsafe fn pointers( target: NonNull<Self::Target>, ) -> NonNull<linked_list::Pointers<Self::Target>> { let this = target.as_ptr(); let field = unsafe { std::ptr::addr_of_mut!((*this).extra_pointers) }; unsafe { NonNull::new_unchecked(field) } } } #[derive(Debug, Clone)] pub(crate) struct Handle { pub(crate) entry: Arc<Entry>, } impl From<&Handle> for NonNull<Entry> { fn from(hdl: &Handle) -> Self { // Safety: entry is in an `Arc`, so the pointer is valid. unsafe { NonNull::new_unchecked(Arc::as_ptr(&hdl.entry) as *mut Entry) } } } impl Handle { pub(crate) fn new(deadline: u64, waker: Waker) -> Self { let state = State { cancelled: false, woken_up: false, waker: Some(waker), cancel_tx: None, }; let entry = Arc::new(Entry { cancel_pointers: linked_list::Pointers::new(), extra_pointers: linked_list::Pointers::new(), deadline, state: Mutex::new(state), _pin: PhantomPinned, }); Handle { entry } } /// Wake the entry if it is already in the pending queue of the timer wheel. pub(crate) fn wake(&self) { let mut lock = self.entry.state.lock(); if !lock.cancelled { lock.woken_up = true; if let Some(waker) = lock.waker.take() { // unlock before calling waker drop(lock); waker.wake(); } } } pub(crate) fn register_cancel_tx(&self, cancel_tx: Sender) { let mut lock = self.entry.state.lock(); if !lock.cancelled && !lock.woken_up { let old_tx = lock.cancel_tx.replace(cancel_tx); // don't unlock — poisoning the `Mutex` stops others from using the bad state. assert!(old_tx.is_none(), "cancel_tx is already registered"); } } pub(crate) fn register_waker(&self, waker: Waker) { let mut lock = self.entry.state.lock(); if !lock.cancelled && !lock.woken_up { let maybe_old_waker = lock.waker.replace(waker); // unlock before calling waker drop(lock); drop(maybe_old_waker); } } pub(crate) fn cancel(&self) { let mut lock = self.entry.state.lock(); if !lock.cancelled { lock.cancelled = true; if let Some(cancel_tx) = lock.cancel_tx.take() { drop(lock); // Safety: we can guarantee that `self` is not in any cancellation queue // because the `self.cancelled` was just set to `true`. unsafe { cancel_tx.send(self.clone()); } } } } pub(crate) fn deadline(&self) -> u64 { self.entry.deadline } pub(crate) fn is_woken_up(&self) -> bool { let lock = self.entry.state.lock(); lock.woken_up } pub(crate) fn is_cancelled(&self) -> bool { let lock = self.entry.state.lock(); lock.cancelled } #[cfg(test)] /// Only used for unit tests. pub(crate) fn inner_strong_count(&self) -> usize { Arc::strong_count(&self.entry) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/cancellation_queue/tests.rs
tokio/src/runtime/time_alt/cancellation_queue/tests.rs
use super::*; use futures::task::noop_waker; #[cfg(loom)] const NUM_ITEMS: usize = 16; #[cfg(not(loom))] const NUM_ITEMS: usize = 64; fn new_handle() -> EntryHandle { EntryHandle::new(0, noop_waker()) } fn model<F: Fn() + Send + Sync + 'static>(f: F) { #[cfg(loom)] loom::model(f); #[cfg(not(loom))] f(); } #[test] fn single_thread() { model(|| { for i in 0..NUM_ITEMS { let (tx, mut rx) = new(); for _ in 0..i { unsafe { tx.send(new_handle()) }; } assert_eq!(rx.recv_all().count(), i); } }); } #[test] #[cfg(not(target_os = "wasi"))] // No thread on wasi. fn multi_thread() { use crate::loom::sync::atomic::{AtomicUsize, Ordering::SeqCst}; use crate::loom::sync::Arc; use crate::loom::thread; #[cfg(loom)] const NUM_THREADS: usize = 3; #[cfg(not(loom))] const NUM_THREADS: usize = 8; model(|| { let (tx, mut rx) = new(); let mut jhs = Vec::new(); let sent = Arc::new(AtomicUsize::new(0)); for _ in 0..NUM_THREADS { let tx = tx.clone(); let sent = sent.clone(); jhs.push(thread::spawn(move || { for _ in 0..NUM_ITEMS { unsafe { tx.send(new_handle()) }; sent.fetch_add(1, SeqCst); } })); } let mut count = 0; loop { count += rx.recv_all().count(); if sent.fetch_add(0, SeqCst) == NUM_ITEMS * NUM_THREADS { jhs.into_iter().for_each(|jh| { jh.join().unwrap(); }); count += rx.recv_all().count(); break; } thread::yield_now(); } assert_eq!(count, NUM_ITEMS * NUM_THREADS); }) } #[test] fn drop_iter_should_not_leak_memory() { model(|| { let (tx, mut rx) = new(); let hdls = (0..NUM_ITEMS).map(|_| new_handle()).collect::<Vec<_>>(); for hdl in hdls.iter() { unsafe { tx.send(hdl.clone()) }; } drop(rx.recv_all()); assert!(hdls.into_iter().all(|hdl| hdl.inner_strong_count() == 1)); }); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/wake_queue/tests.rs
tokio/src/runtime/time_alt/wake_queue/tests.rs
use super::*; use futures_test::task::{new_count_waker, AwokenCount}; #[cfg(loom)] const NUM_ITEMS: usize = 16; #[cfg(not(loom))] const NUM_ITEMS: usize = 64; fn new_handle() -> (EntryHandle, AwokenCount) { let (waker, count) = new_count_waker(); (EntryHandle::new(0, waker), count) } fn model<F: Fn() + Send + Sync + 'static>(f: F) { #[cfg(loom)] loom::model(f); #[cfg(not(loom))] f(); } #[test] fn sanity() { model(|| { let mut queue = WakeQueue::new(); let mut counts = Vec::new(); for _ in 0..NUM_ITEMS { let (hdl, count) = new_handle(); counts.push(count); unsafe { queue.push_front(hdl); } } assert!(!queue.is_empty()); queue.wake_all(); assert!(counts.into_iter().all(|c| c.get() == 1)); }); } #[test] fn drop_should_not_leak_memory() { model(|| { let mut queue = WakeQueue::new(); let mut hdls = vec![]; let mut counts = vec![]; for _ in 0..NUM_ITEMS { let (hdl, count) = new_handle(); hdls.push(hdl); counts.push(count); } for hdl in hdls.iter() { unsafe { queue.push_front(hdl.clone()) }; } drop(queue); assert!(hdls.into_iter().all(|hdl| hdl.inner_strong_count() == 1)); // drop should not wake any entries assert!(counts.into_iter().all(|count| count.get() == 0)); }); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/registration_queue/tests.rs
tokio/src/runtime/time_alt/registration_queue/tests.rs
use super::*; use futures::task::noop_waker; #[cfg(loom)] const NUM_ITEMS: usize = 16; #[cfg(not(loom))] const NUM_ITEMS: usize = 64; fn new_handle() -> EntryHandle { EntryHandle::new(0, noop_waker()) } fn model<F: Fn() + Send + Sync + 'static>(f: F) { #[cfg(loom)] loom::model(f); #[cfg(not(loom))] f(); } #[test] fn sanity() { model(|| { let mut queue = RegistrationQueue::new(); for _ in 0..NUM_ITEMS { unsafe { queue.push_front(new_handle()); } } for _ in 0..NUM_ITEMS { assert!(queue.pop_front().is_some()); } assert!(queue.pop_front().is_none()); }); } #[test] fn drop_should_not_leak_memory() { model(|| { let mut queue = RegistrationQueue::new(); let hdls = (0..NUM_ITEMS).map(|_| new_handle()).collect::<Vec<_>>(); for hdl in hdls.iter() { unsafe { queue.push_front(hdl.clone()) }; } drop(queue); assert!(hdls.into_iter().all(|hdl| hdl.inner_strong_count() == 1)); }); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/wheel/level.rs
tokio/src/runtime/time_alt/wheel/level.rs
use super::{EntryHandle, EntryList}; use std::ptr::NonNull; use std::{array, fmt}; /// Wheel for a single level in the timer. This wheel contains 64 slots. pub(crate) struct Level { level: usize, /// Bit field tracking which slots currently contain entries. /// /// Using a bit field to track slots that contain entries allows avoiding a /// scan to find entries. This field is updated when entries are added or /// removed from a slot. /// /// The least-significant bit represents slot zero. occupied: u64, /// Slots. We access these via the EntryInner `current_list` as well, so this needs to be an `UnsafeCell`. slot: [EntryList; LEVEL_MULT], } /// Indicates when a slot must be processed next. #[derive(Debug)] pub(crate) struct Expiration { /// The level containing the slot. pub(crate) level: usize, /// The slot index. pub(crate) slot: usize, /// The instant at which the slot needs to be processed. pub(crate) deadline: u64, } /// Level multiplier. /// /// Being a power of 2 is very important. const LEVEL_MULT: usize = 64; impl Level { pub(crate) fn new(level: usize) -> Level { Level { level, occupied: 0, slot: array::from_fn(|_| EntryList::default()), } } /// Finds the slot that needs to be processed next and returns the slot and /// `Instant` at which this slot must be processed. pub(crate) fn next_expiration(&self, now: u64) -> Option<Expiration> { // Use the `occupied` bit field to get the index of the next slot that // needs to be processed. let slot = self.next_occupied_slot(now)?; // From the slot index, calculate the `Instant` at which it needs to be // processed. This value *must* be in the future with respect to `now`. let level_range = level_range(self.level); let slot_range = slot_range(self.level); // Compute the start date of the current level by masking the low bits // of `now` (`level_range` is a power of 2). let level_start = now & !(level_range - 1); let mut deadline = level_start + slot as u64 * slot_range; if deadline <= now { // A timer is in a slot "prior" to the current time. This can occur // because we do not have an infinite hierarchy of timer levels, and // eventually a timer scheduled for a very distant time might end up // being placed in a slot that is beyond the end of all of the // arrays. // // To deal with this, we first limit timers to being scheduled no // more than MAX_DURATION ticks in the future; that is, they're at // most one rotation of the top level away. Then, we force timers // that logically would go into the top+1 level, to instead go into // the top level's slots. // // What this means is that the top level's slots act as a // pseudo-ring buffer, and we rotate around them indefinitely. If we // compute a deadline before now, and it's the top level, it // therefore means we're actually looking at a slot in the future. debug_assert_eq!(self.level, super::NUM_LEVELS - 1); deadline += level_range; } debug_assert!( deadline >= now, "deadline={:016X}; now={:016X}; level={}; lr={:016X}, sr={:016X}, slot={}; occupied={:b}", deadline, now, self.level, level_range, slot_range, slot, self.occupied ); Some(Expiration { level: self.level, slot, deadline, }) } fn next_occupied_slot(&self, now: u64) -> Option<usize> { if self.occupied == 0 { return None; } // Get the slot for now using Maths let now_slot = (now / slot_range(self.level)) as usize; let occupied = self.occupied.rotate_right(now_slot as u32); let zeros = occupied.trailing_zeros() as usize; let slot = (zeros + now_slot) % LEVEL_MULT; Some(slot) } pub(crate) unsafe fn add_entry(&mut self, hdl: EntryHandle) { // Safety: the associated entry must be valid. let deadline = hdl.deadline(); let slot = slot_for(deadline, self.level); self.slot[slot].push_front(hdl); self.occupied |= occupied_bit(slot); } pub(crate) unsafe fn remove_entry(&mut self, hdl: EntryHandle) { let slot = slot_for(hdl.deadline(), self.level); unsafe { self.slot[slot].remove(NonNull::from(&hdl)) }; if self.slot[slot].is_empty() { // The bit is currently set debug_assert!(self.occupied & occupied_bit(slot) != 0); // Unset the bit self.occupied ^= occupied_bit(slot); } } pub(crate) fn take_slot(&mut self, slot: usize) -> EntryList { self.occupied &= !occupied_bit(slot); std::mem::take(&mut self.slot[slot]) } } impl fmt::Debug for Level { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Level") .field("occupied", &self.occupied) .finish() } } fn occupied_bit(slot: usize) -> u64 { 1 << slot } fn slot_range(level: usize) -> u64 { LEVEL_MULT.pow(level as u32) as u64 } fn level_range(level: usize) -> u64 { LEVEL_MULT as u64 * slot_range(level) } /// Converts a duration (milliseconds) and a level to a slot position. fn slot_for(duration: u64, level: usize) -> usize { ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize } #[cfg(all(test, not(loom)))] mod test { use super::*; #[test] fn test_slot_for() { for pos in 0..64 { assert_eq!(pos as usize, slot_for(pos, 0)); } for level in 1..5 { for pos in level..64 { let a = pos * 64_usize.pow(level as u32); assert_eq!(pos, slot_for(a as u64, level)); } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/runtime/time_alt/wheel/mod.rs
tokio/src/runtime/time_alt/wheel/mod.rs
mod level; pub(crate) use self::level::Expiration; use self::level::Level; use super::cancellation_queue::Sender; use super::{EntryHandle, EntryList, WakeQueue}; use std::array; /// Hashed timing wheel implementation. /// /// See [`Driver`] documentation for some implementation notes. /// /// [`Driver`]: crate::runtime::time::Driver #[derive(Debug)] pub(crate) struct Wheel { /// The number of milliseconds elapsed since the wheel started. elapsed: u64, /// Timer wheel. /// /// Levels: /// /// * 1 ms slots / 64 ms range /// * 64 ms slots / ~ 4 sec range /// * ~ 4 sec slots / ~ 4 min range /// * ~ 4 min slots / ~ 4 hr range /// * ~ 4 hr slots / ~ 12 day range /// * ~ 12 day slots / ~ 2 yr range levels: Box<[Level; NUM_LEVELS]>, } /// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots /// each, the timer is able to track time up to 2 years into the future with a /// precision of 1 millisecond. const NUM_LEVELS: usize = 6; /// The maximum duration of a `Sleep`. pub(super) const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1; impl Wheel { /// Creates a new timing wheel. pub(crate) fn new() -> Wheel { Wheel { elapsed: 0, levels: Box::new(array::from_fn(Level::new)), } } /// Returns the number of milliseconds that have elapsed since the timing /// wheel's creation. pub(crate) fn elapsed(&self) -> u64 { self.elapsed } /// Inserts an entry into the timing wheel. /// /// # Arguments /// /// * `hdl`: The entry handle to insert into the wheel. /// /// # Safety /// /// The caller must ensure: /// /// * The entry is not already registered in ANY wheel. pub(crate) unsafe fn insert(&mut self, hdl: EntryHandle, cancel_tx: Sender) { let deadline = hdl.deadline(); assert!(deadline > self.elapsed); hdl.register_cancel_tx(cancel_tx); // Get the level at which the entry should be stored let level = self.level_for(deadline); unsafe { self.levels[level].add_entry(hdl); } debug_assert!({ self.levels[level] .next_expiration(self.elapsed) .map(|e| e.deadline >= self.elapsed) .unwrap_or(true) }); } /// Removes `item` from the timing wheel. /// /// # Safety /// /// The caller must ensure: /// /// * The entry is already registered in THIS wheel. pub(crate) unsafe fn remove(&mut self, hdl: EntryHandle) { let deadline = hdl.deadline(); debug_assert!( self.elapsed <= deadline, "elapsed={}; deadline={}", self.elapsed, deadline ); let level = self.level_for(deadline); unsafe { self.levels[level].remove_entry(hdl.clone()) }; } /// Advances the timer up to the instant represented by `now`. pub(crate) fn take_expired(&mut self, now: u64, wake_queue: &mut WakeQueue) { while let Some(expiration) = self .next_expiration() .filter(|expiration| expiration.deadline <= now) { self.process_expiration(&expiration, wake_queue); self.set_elapsed(expiration.deadline); } self.set_elapsed(now); } /// Returns the instant at which the next timeout expires. fn next_expiration(&self) -> Option<Expiration> { // Check all levels self.levels .iter() .enumerate() .find_map(|(level_num, level)| { let expiration = level.next_expiration(self.elapsed)?; // There cannot be any expirations at a higher level that happen // before this one. debug_assert!(self.no_expirations_before(level_num + 1, expiration.deadline)); Some(expiration) }) } /// Returns the tick at which this timer wheel next needs to perform some /// processing, or None if there are no timers registered. pub(crate) fn next_expiration_time(&self) -> Option<u64> { self.next_expiration().map(|ex| ex.deadline) } /// Used for debug assertions fn no_expirations_before(&self, start_level: usize, before: u64) -> bool { self.levels[start_level..] .iter() .flat_map(|level| level.next_expiration(self.elapsed)) .all(|e2| before <= e2.deadline) } /// iteratively find entries that are between the wheel's current /// time and the expiration time. for each in that population either /// queue it for notification (in the case of the last level) or tier /// it down to the next level (in all other cases). pub(crate) fn process_expiration( &mut self, expiration: &Expiration, wake_queue: &mut WakeQueue, ) { // Note that we need to take _all_ of the entries off the list before // processing any of them. This is important because it's possible that // those entries might need to be reinserted into the same slot. // // This happens only on the highest level, when an entry is inserted // more than MAX_DURATION into the future. When this happens, we wrap // around, and process some entries a multiple of MAX_DURATION before // they actually need to be dropped down a level. We then reinsert them // back into the same position; we must make sure we don't then process // those entries again or we'll end up in an infinite loop. let mut entries = self.take_entries(expiration); while let Some(hdl) = entries.pop_back() { if expiration.level == 0 { debug_assert_eq!(hdl.deadline(), expiration.deadline); } let deadline = hdl.deadline(); if deadline > expiration.deadline { let level = level_for(expiration.deadline, deadline); unsafe { self.levels[level].add_entry(hdl); } } else { unsafe { wake_queue.push_front(hdl); } } } } fn set_elapsed(&mut self, when: u64) { assert!( self.elapsed <= when, "elapsed={:?}; when={:?}", self.elapsed, when ); if when > self.elapsed { self.elapsed = when; } } /// Obtains the list of entries that need processing for the given expiration. fn take_entries(&mut self, expiration: &Expiration) -> EntryList { self.levels[expiration.level].take_slot(expiration.slot) } fn level_for(&self, when: u64) -> usize { level_for(self.elapsed, when) } } fn level_for(elapsed: u64, when: u64) -> usize { const SLOT_MASK: u64 = (1 << 6) - 1; // Mask in the trailing bits ignored by the level calculation in order to cap // the possible leading zeros let mut masked = elapsed ^ when | SLOT_MASK; if masked >= MAX_DURATION { // Fudge the timer into the top level masked = MAX_DURATION - 1; } let leading_zeros = masked.leading_zeros() as usize; let significant = 63 - leading_zeros; significant / NUM_LEVELS } #[cfg(all(test, not(loom)))] mod test { use super::*; #[test] fn test_level_for() { for pos in 0..64 { assert_eq!(0, level_for(0, pos), "level_for({pos}) -- binary = {pos:b}"); } for level in 1..5 { for pos in level..64 { let a = pos * 64_usize.pow(level as u32); assert_eq!( level, level_for(0, a as u64), "level_for({a}) -- binary = {a:b}" ); if pos > level { let a = a - 1; assert_eq!( level, level_for(0, a as u64), "level_for({a}) -- binary = {a:b}" ); } if pos < 64 { let a = a + 1; assert_eq!( level, level_for(0, a as u64), "level_for({a}) -- binary = {a:b}" ); } } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/time/timeout.rs
tokio/src/time/timeout.rs
//! Allows a future to execute for a maximum amount of time. //! //! See [`Timeout`] documentation for more details. //! //! [`Timeout`]: struct@Timeout use crate::{ task::coop, time::{error::Elapsed, sleep_until, Duration, Instant, Sleep}, util::trace, }; use pin_project_lite::pin_project; use std::future::{Future, IntoFuture}; use std::pin::Pin; use std::task::{self, Poll}; /// Requires a `Future` to complete before the specified duration has elapsed. /// /// If the future completes before the duration has elapsed, then the completed /// value is returned. Otherwise, an error is returned and the future is /// canceled. /// /// Note that the timeout is checked before polling the future, so if the future /// does not yield during execution then it is possible for the future to complete /// and exceed the timeout _without_ returning an error. /// /// This function returns a future whose return type is [`Result`]`<T,`[`Elapsed`]`>`, where `T` is the /// return type of the provided future. /// /// If the provided future completes immediately, then the future returned from /// this function is guaranteed to complete immediately with an [`Ok`] variant /// no matter the provided duration. /// /// [`Ok`]: std::result::Result::Ok /// [`Result`]: std::result::Result /// [`Elapsed`]: crate::time::error::Elapsed /// /// # Cancellation /// /// Cancelling a timeout is done by dropping the future. No additional cleanup /// or other work is required. /// /// The original future may be obtained by calling [`Timeout::into_inner`]. This /// consumes the `Timeout`. /// /// # Examples /// /// Create a new `Timeout` set to expire in 10 milliseconds. /// /// ```rust /// use tokio::time::timeout; /// use tokio::sync::oneshot; /// /// use std::time::Duration; /// /// # async fn dox() { /// let (tx, rx) = oneshot::channel(); /// # tx.send(()).unwrap(); /// /// // Wrap the future with a `Timeout` set to expire in 10 milliseconds. /// if let Err(_) = timeout(Duration::from_millis(10), rx).await { /// println!("did not receive value within 10 ms"); /// } /// # } /// ``` /// /// # Panics /// /// This function panics if there is no current timer set. /// /// It can be triggered when [`Builder::enable_time`] or /// [`Builder::enable_all`] are not included in the builder. /// /// It can also panic whenever a timer is created outside of a /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, /// since the function is executed outside of the runtime. /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. /// And this is because wrapping the function on an async makes it lazy, /// and so gets executed inside the runtime successfully without /// panicking. /// /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all #[track_caller] pub fn timeout<F>(duration: Duration, future: F) -> Timeout<F::IntoFuture> where F: IntoFuture, { let location = trace::caller_location(); let deadline = Instant::now().checked_add(duration); let delay = match deadline { Some(deadline) => Sleep::new_timeout(deadline, location), None => Sleep::far_future(location), }; Timeout::new_with_delay(future.into_future(), delay) } /// Requires a `Future` to complete before the specified instant in time. /// /// If the future completes before the instant is reached, then the completed /// value is returned. Otherwise, an error is returned. /// /// This function returns a future whose return type is [`Result`]`<T,`[`Elapsed`]`>`, where `T` is the /// return type of the provided future. /// /// If the provided future completes immediately, then the future returned from /// this function is guaranteed to complete immediately with an [`Ok`] variant /// no matter the provided deadline. /// /// [`Ok`]: std::result::Result::Ok /// [`Result`]: std::result::Result /// [`Elapsed`]: crate::time::error::Elapsed /// /// # Cancellation /// /// Cancelling a timeout is done by dropping the future. No additional cleanup /// or other work is required. /// /// The original future may be obtained by calling [`Timeout::into_inner`]. This /// consumes the `Timeout`. /// /// # Examples /// /// Create a new `Timeout` set to expire in 10 milliseconds. /// /// ```rust /// use tokio::time::{Instant, timeout_at}; /// use tokio::sync::oneshot; /// /// use std::time::Duration; /// /// # async fn dox() { /// let (tx, rx) = oneshot::channel(); /// # tx.send(()).unwrap(); /// /// // Wrap the future with a `Timeout` set to expire 10 milliseconds into the /// // future. /// if let Err(_) = timeout_at(Instant::now() + Duration::from_millis(10), rx).await { /// println!("did not receive value within 10 ms"); /// } /// # } /// ``` pub fn timeout_at<F>(deadline: Instant, future: F) -> Timeout<F::IntoFuture> where F: IntoFuture, { let delay = sleep_until(deadline); Timeout { value: future.into_future(), delay, } } pin_project! { /// Future returned by [`timeout`](timeout) and [`timeout_at`](timeout_at). #[must_use = "futures do nothing unless you `.await` or poll them"] #[derive(Debug)] pub struct Timeout<T> { #[pin] value: T, #[pin] delay: Sleep, } } impl<T> Timeout<T> { pub(crate) fn new_with_delay(value: T, delay: Sleep) -> Timeout<T> { Timeout { value, delay } } /// Gets a reference to the underlying value in this timeout. pub fn get_ref(&self) -> &T { &self.value } /// Gets a mutable reference to the underlying value in this timeout. pub fn get_mut(&mut self) -> &mut T { &mut self.value } /// Consumes this timeout, returning the underlying value. pub fn into_inner(self) -> T { self.value } } impl<T> Future for Timeout<T> where T: Future, { type Output = Result<T::Output, Elapsed>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { let me = self.project(); let had_budget_before = coop::has_budget_remaining(); // First, try polling the future if let Poll::Ready(v) = me.value.poll(cx) { return Poll::Ready(Ok(v)); } poll_delay(had_budget_before, me.delay, cx).map(Err) } } // The T-invariant portion of Timeout::<T>::poll. Pulling this out reduces the // amount of code that gets duplicated during monomorphization. fn poll_delay( had_budget_before: bool, delay: Pin<&mut Sleep>, cx: &mut task::Context<'_>, ) -> Poll<Elapsed> { let delay_poll = || match delay.poll(cx) { Poll::Ready(()) => Poll::Ready(Elapsed::new()), Poll::Pending => Poll::Pending, }; let has_budget_now = coop::has_budget_remaining(); if let (true, false) = (had_budget_before, has_budget_now) { // if it is the underlying future that exhausted the budget, we poll // the `delay` with an unconstrained one. This prevents pathological // cases where the underlying future always exhausts the budget and // we never get a chance to evaluate whether the timeout was hit or // not. coop::with_unconstrained(delay_poll) } else { delay_poll() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/time/instant.rs
tokio/src/time/instant.rs
#![allow(clippy::trivially_copy_pass_by_ref)] use std::fmt; use std::ops; use std::time::Duration; /// A measurement of a monotonically nondecreasing clock. /// Opaque and useful only with `Duration`. /// /// Instants are always guaranteed to be no less than any previously measured /// instant when created, and are often useful for tasks such as measuring /// benchmarks or timing how long an operation takes. /// /// Note, however, that instants are not guaranteed to be **steady**. In other /// words, each tick of the underlying clock may not be the same length (e.g. /// some seconds may be longer than others). An instant may jump forwards or /// experience time dilation (slow down or speed up), but it will never go /// backwards. /// /// Instants are opaque types that can only be compared to one another. There is /// no method to get "the number of seconds" from an instant. Instead, it only /// allows measuring the duration between two instants (or comparing two /// instants). /// /// The size of an `Instant` struct may vary depending on the target operating /// system. /// /// # Note /// /// This type wraps the inner `std` variant and is used to align the Tokio /// clock for uses of `now()`. This can be useful for testing where you can /// take advantage of `time::pause()` and `time::advance()`. #[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] pub struct Instant { std: std::time::Instant, } impl Instant { /// Returns an instant corresponding to "now". /// /// # Examples /// /// ``` /// use tokio::time::Instant; /// /// let now = Instant::now(); /// ``` pub fn now() -> Instant { variant::now() } /// Create a `tokio::time::Instant` from a `std::time::Instant`. pub fn from_std(std: std::time::Instant) -> Instant { Instant { std } } pub(crate) fn far_future() -> Instant { // Roughly 30 years from now. // API does not provide a way to obtain max `Instant` // or convert specific date in the future to instant. // 1000 years overflows on macOS, 100 years overflows on FreeBSD. Self::now() + Duration::from_secs(86400 * 365 * 30) } /// Convert the value into a `std::time::Instant`. pub fn into_std(self) -> std::time::Instant { self.std } /// Returns the amount of time elapsed from another instant to this one, or /// zero duration if that instant is later than this one. pub fn duration_since(&self, earlier: Instant) -> Duration { self.std.saturating_duration_since(earlier.std) } /// Returns the amount of time elapsed from another instant to this one, or /// None if that instant is later than this one. /// /// # Examples /// /// ``` /// use tokio::time::{Duration, Instant, sleep}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let now = Instant::now(); /// sleep(Duration::new(1, 0)).await; /// let new_now = Instant::now(); /// println!("{:?}", new_now.checked_duration_since(now)); /// println!("{:?}", now.checked_duration_since(new_now)); // None /// # } /// ``` pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> { self.std.checked_duration_since(earlier.std) } /// Returns the amount of time elapsed from another instant to this one, or /// zero duration if that instant is later than this one. /// /// # Examples /// /// ``` /// use tokio::time::{Duration, Instant, sleep}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let now = Instant::now(); /// sleep(Duration::new(1, 0)).await; /// let new_now = Instant::now(); /// println!("{:?}", new_now.saturating_duration_since(now)); /// println!("{:?}", now.saturating_duration_since(new_now)); // 0ns /// } /// ``` pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { self.std.saturating_duration_since(earlier.std) } /// Returns the amount of time elapsed since this instant was created, /// or zero duration if this instant is in the future. /// /// # Examples /// /// ``` /// use tokio::time::{Duration, Instant, sleep}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let instant = Instant::now(); /// let three_secs = Duration::from_secs(3); /// sleep(three_secs).await; /// assert!(instant.elapsed() >= three_secs); /// # } /// ``` pub fn elapsed(&self) -> Duration { Instant::now().saturating_duration_since(*self) } /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be /// represented as `Instant` (which means it's inside the bounds of the /// underlying data structure), `None` otherwise. pub fn checked_add(&self, duration: Duration) -> Option<Instant> { self.std.checked_add(duration).map(Instant::from_std) } /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be /// represented as `Instant` (which means it's inside the bounds of the /// underlying data structure), `None` otherwise. pub fn checked_sub(&self, duration: Duration) -> Option<Instant> { self.std.checked_sub(duration).map(Instant::from_std) } } impl From<std::time::Instant> for Instant { fn from(time: std::time::Instant) -> Instant { Instant::from_std(time) } } impl From<Instant> for std::time::Instant { fn from(time: Instant) -> std::time::Instant { time.into_std() } } impl ops::Add<Duration> for Instant { type Output = Instant; fn add(self, other: Duration) -> Instant { Instant::from_std(self.std + other) } } impl ops::AddAssign<Duration> for Instant { fn add_assign(&mut self, rhs: Duration) { *self = *self + rhs; } } impl ops::Sub for Instant { type Output = Duration; fn sub(self, rhs: Instant) -> Duration { self.std.saturating_duration_since(rhs.std) } } impl ops::Sub<Duration> for Instant { type Output = Instant; fn sub(self, rhs: Duration) -> Instant { Instant::from_std(std::time::Instant::sub(self.std, rhs)) } } impl ops::SubAssign<Duration> for Instant { fn sub_assign(&mut self, rhs: Duration) { *self = *self - rhs; } } impl fmt::Debug for Instant { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.std.fmt(fmt) } } #[cfg(not(feature = "test-util"))] mod variant { use super::Instant; pub(super) fn now() -> Instant { Instant::from_std(std::time::Instant::now()) } } #[cfg(feature = "test-util")] mod variant { use super::Instant; pub(super) fn now() -> Instant { crate::time::clock::now() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/time/sleep.rs
tokio/src/time/sleep.rs
use crate::runtime::Timer; use crate::time::{error::Error, Duration, Instant}; use crate::util::trace; use pin_project_lite::pin_project; use std::future::Future; use std::panic::Location; use std::pin::Pin; use std::task::{self, ready, Poll}; /// Waits until `deadline` is reached. /// /// No work is performed while awaiting on the sleep future to complete. `Sleep` /// operates at millisecond granularity and should not be used for tasks that /// require high-resolution timers. /// /// To run something regularly on a schedule, see [`interval`]. /// /// # Cancellation /// /// Canceling a sleep instance is done by dropping the returned future. No additional /// cleanup work is required. /// /// # Examples /// /// Wait 100ms and print "100 ms have elapsed". /// /// ``` /// use tokio::time::{sleep_until, Instant, Duration}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// sleep_until(Instant::now() + Duration::from_millis(100)).await; /// println!("100 ms have elapsed"); /// # } /// ``` /// /// See the documentation for the [`Sleep`] type for more examples. /// /// # Panics /// /// This function panics if there is no current timer set. /// /// It can be triggered when [`Builder::enable_time`] or /// [`Builder::enable_all`] are not included in the builder. /// /// It can also panic whenever a timer is created outside of a /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, /// since the function is executed outside of the runtime. /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. /// And this is because wrapping the function on an async makes it lazy, /// and so gets executed inside the runtime successfully without /// panicking. /// /// [`Sleep`]: struct@crate::time::Sleep /// [`interval`]: crate::time::interval() /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_until"))] #[track_caller] pub fn sleep_until(deadline: Instant) -> Sleep { Sleep::new_timeout(deadline, trace::caller_location()) } /// Waits until `duration` has elapsed. /// /// Equivalent to `sleep_until(Instant::now() + duration)`. An asynchronous /// analog to `std::thread::sleep`. /// /// No work is performed while awaiting on the sleep future to complete. `Sleep` /// operates at millisecond granularity and should not be used for tasks that /// require high-resolution timers. The implementation is platform specific, /// and some platforms (specifically Windows) will provide timers with a /// larger resolution than 1 ms. /// /// To run something regularly on a schedule, see [`interval`]. /// /// # Cancellation /// /// Canceling a sleep instance is done by dropping the returned future. No additional /// cleanup work is required. /// /// # Examples /// /// Wait 100ms and print "100 ms have elapsed". /// /// ``` /// use tokio::time::{sleep, Duration}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// sleep(Duration::from_millis(100)).await; /// println!("100 ms have elapsed"); /// # } /// ``` /// /// See the documentation for the [`Sleep`] type for more examples. /// /// # Panics /// /// This function panics if there is no current timer set. /// /// It can be triggered when [`Builder::enable_time`] or /// [`Builder::enable_all`] are not included in the builder. /// /// It can also panic whenever a timer is created outside of a /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, /// since the function is executed outside of the runtime. /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. /// And this is because wrapping the function on an async makes it lazy, /// and so gets executed inside the runtime successfully without /// panicking. /// /// [`Sleep`]: struct@crate::time::Sleep /// [`interval`]: crate::time::interval() /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "delay_for"))] #[cfg_attr(docsrs, doc(alias = "wait"))] #[track_caller] pub fn sleep(duration: Duration) -> Sleep { let location = trace::caller_location(); match Instant::now().checked_add(duration) { Some(deadline) => Sleep::new_timeout(deadline, location), None => Sleep::new_timeout(Instant::far_future(), location), } } pin_project! { /// Future returned by [`sleep`](sleep) and [`sleep_until`](sleep_until). /// /// This type does not implement the `Unpin` trait, which means that if you /// use it with [`select!`] or by calling `poll`, you have to pin it first. /// If you use it with `.await`, this does not apply. /// /// # Examples /// /// Wait 100ms and print "100 ms have elapsed". /// /// ``` /// use tokio::time::{sleep, Duration}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// sleep(Duration::from_millis(100)).await; /// println!("100 ms have elapsed"); /// # } /// ``` /// /// Use with [`select!`]. Pinning the `Sleep` with [`tokio::pin!`] is /// necessary when the same `Sleep` is selected on multiple times. /// ```no_run /// use tokio::time::{self, Duration, Instant}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let sleep = time::sleep(Duration::from_millis(10)); /// tokio::pin!(sleep); /// /// loop { /// tokio::select! { /// () = &mut sleep => { /// println!("timer elapsed"); /// sleep.as_mut().reset(Instant::now() + Duration::from_millis(50)); /// }, /// } /// } /// # } /// ``` /// Use in a struct with boxing. By pinning the `Sleep` with a `Box`, the /// `HasSleep` struct implements `Unpin`, even though `Sleep` does not. /// ``` /// use std::future::Future; /// use std::pin::Pin; /// use std::task::{Context, Poll}; /// use tokio::time::Sleep; /// /// struct HasSleep { /// sleep: Pin<Box<Sleep>>, /// } /// /// impl Future for HasSleep { /// type Output = (); /// /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { /// self.sleep.as_mut().poll(cx) /// } /// } /// ``` /// Use in a struct with pin projection. This method avoids the `Box`, but /// the `HasSleep` struct will not be `Unpin` as a consequence. /// ``` /// use std::future::Future; /// use std::pin::Pin; /// use std::task::{Context, Poll}; /// use tokio::time::Sleep; /// use pin_project_lite::pin_project; /// /// pin_project! { /// struct HasSleep { /// #[pin] /// sleep: Sleep, /// } /// } /// /// impl Future for HasSleep { /// type Output = (); /// /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { /// self.project().sleep.poll(cx) /// } /// } /// ``` /// /// [`select!`]: ../macro.select.html /// [`tokio::pin!`]: ../macro.pin.html #[project(!Unpin)] // Alias for old name in 0.2 #[cfg_attr(docsrs, doc(alias = "Delay"))] #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Sleep { inner: Inner, // The link between the `Sleep` instance and the timer that drives it. #[pin] entry: Timer, } } cfg_trace! { #[derive(Debug)] struct Inner { ctx: trace::AsyncOpTracingCtx, } } cfg_not_trace! { #[derive(Debug)] struct Inner { } } impl Sleep { #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] #[track_caller] pub(crate) fn new_timeout( deadline: Instant, location: Option<&'static Location<'static>>, ) -> Sleep { use crate::runtime::scheduler; let handle = scheduler::Handle::current(); let entry = Timer::new(handle, deadline); #[cfg(all(tokio_unstable, feature = "tracing"))] let inner = { let handle = scheduler::Handle::current(); let clock = handle.driver().clock(); let handle = &handle.driver().time(); let time_source = handle.time_source(); let deadline_tick = time_source.deadline_to_tick(deadline); let duration = deadline_tick.saturating_sub(time_source.now(clock)); let location = location.expect("should have location if tracing"); let resource_span = tracing::trace_span!( parent: None, "runtime.resource", concrete_type = "Sleep", kind = "timer", loc.file = location.file(), loc.line = location.line(), loc.col = location.column(), ); let async_op_span = resource_span.in_scope(|| { tracing::trace!( target: "runtime::resource::state_update", duration = duration, duration.unit = "ms", duration.op = "override", ); tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout") }); let async_op_poll_span = async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); let ctx = trace::AsyncOpTracingCtx { async_op_span, async_op_poll_span, resource_span, }; Inner { ctx } }; #[cfg(not(all(tokio_unstable, feature = "tracing")))] let inner = Inner {}; Sleep { inner, entry } } pub(crate) fn far_future(location: Option<&'static Location<'static>>) -> Sleep { Self::new_timeout(Instant::far_future(), location) } /// Returns the instant at which the future will complete. pub fn deadline(&self) -> Instant { self.entry.deadline() } /// Returns `true` if `Sleep` has elapsed. /// /// A `Sleep` instance is elapsed when the requested duration has elapsed. pub fn is_elapsed(&self) -> bool { self.entry.is_elapsed() } /// Resets the `Sleep` instance to a new deadline. /// /// Calling this function allows changing the instant at which the `Sleep` /// future completes without having to create new associated state. /// /// This function can be called both before and after the future has /// completed. /// /// To call this method, you will usually combine the call with /// [`Pin::as_mut`], which lets you call the method without consuming the /// `Sleep` itself. /// /// # Example /// /// ``` /// use tokio::time::{Duration, Instant}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let sleep = tokio::time::sleep(Duration::from_millis(10)); /// tokio::pin!(sleep); /// /// sleep.as_mut().reset(Instant::now() + Duration::from_millis(20)); /// # } /// ``` /// /// See also the top-level examples. /// /// [`Pin::as_mut`]: fn@std::pin::Pin::as_mut pub fn reset(self: Pin<&mut Self>, deadline: Instant) { self.reset_inner(deadline); } /// Resets the `Sleep` instance to a new deadline without reregistering it /// to be woken up. /// /// Calling this function allows changing the instant at which the `Sleep` /// future completes without having to create new associated state and /// without having it registered. This is required in e.g. the /// [`crate::time::Interval`] where we want to reset the internal [Sleep] /// without having it wake up the last task that polled it. pub(crate) fn reset_without_reregister(self: Pin<&mut Self>, deadline: Instant) { let mut me = self.project(); match me.entry.as_ref().flavor() { crate::runtime::TimerFlavor::Traditional => { me.entry.as_mut().reset(deadline, false); } #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] crate::runtime::TimerFlavor::Alternative => { let handle = me.entry.as_ref().scheduler_handle().clone(); me.entry.set(Timer::new(handle, deadline)); } } } fn reset_inner(self: Pin<&mut Self>, deadline: Instant) { let mut me = self.project(); match me.entry.as_ref().flavor() { crate::runtime::TimerFlavor::Traditional => { me.entry.as_mut().reset(deadline, true); } #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] crate::runtime::TimerFlavor::Alternative => { let handle = me.entry.as_ref().scheduler_handle().clone(); me.entry.set(Timer::new(handle, deadline)); } } #[cfg(all(tokio_unstable, feature = "tracing"))] { let _resource_enter = me.inner.ctx.resource_span.enter(); me.inner.ctx.async_op_span = tracing::trace_span!("runtime.resource.async_op", source = "Sleep::reset"); let _async_op_enter = me.inner.ctx.async_op_span.enter(); me.inner.ctx.async_op_poll_span = tracing::trace_span!("runtime.resource.async_op.poll"); let duration = { let clock = me.entry.as_ref().clock(); let time_source = me.entry.as_ref().driver().time_source(); let now = time_source.now(clock); let deadline_tick = time_source.deadline_to_tick(deadline); deadline_tick.saturating_sub(now) }; tracing::trace!( target: "runtime::resource::state_update", duration = duration, duration.unit = "ms", duration.op = "override", ); } } fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> { let me = self.project(); ready!(crate::trace::trace_leaf(cx)); // Keep track of task budget #[cfg(all(tokio_unstable, feature = "tracing"))] let coop = ready!(trace_poll_op!( "poll_elapsed", crate::task::coop::poll_proceed(cx), )); #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] let coop = ready!(crate::task::coop::poll_proceed(cx)); let result = me.entry.poll_elapsed(cx).map(move |r| { coop.made_progress(); r }); #[cfg(all(tokio_unstable, feature = "tracing"))] return trace_poll_op!("poll_elapsed", result); #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] return result; } } impl Future for Sleep { type Output = (); // `poll_elapsed` can return an error in two cases: // // - AtCapacity: this is a pathological case where far too many // sleep instances have been scheduled. // - Shutdown: No timer has been setup, which is a misuse error. // // Both cases are extremely rare, and pretty accurately fit into // "logic errors", so we just panic in this case. A user couldn't // really do much better if we passed the error onwards. fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { #[cfg(all(tokio_unstable, feature = "tracing"))] let _res_span = self.inner.ctx.resource_span.clone().entered(); #[cfg(all(tokio_unstable, feature = "tracing"))] let _ao_span = self.inner.ctx.async_op_span.clone().entered(); #[cfg(all(tokio_unstable, feature = "tracing"))] let _ao_poll_span = self.inner.ctx.async_op_poll_span.clone().entered(); match ready!(self.as_mut().poll_elapsed(cx)) { Ok(()) => Poll::Ready(()), Err(e) => panic!("timer error: {e}"), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/time/clock.rs
tokio/src/time/clock.rs
#![cfg_attr(not(feature = "rt"), allow(dead_code))] //! Source of time abstraction. //! //! By default, `std::time::Instant::now()` is used. However, when the //! `test-util` feature flag is enabled, the values returned for `now()` are //! configurable. cfg_not_test_util! { use crate::time::{Instant}; #[derive(Debug, Clone)] pub(crate) struct Clock {} pub(crate) fn now() -> Instant { Instant::from_std(std::time::Instant::now()) } impl Clock { pub(crate) fn new(_enable_pausing: bool, _start_paused: bool) -> Clock { Clock {} } pub(crate) fn now(&self) -> Instant { now() } } } cfg_test_util! { use crate::time::{Duration, Instant}; use crate::loom::sync::Mutex; use crate::loom::sync::atomic::Ordering; use std::sync::atomic::AtomicBool as StdAtomicBool; cfg_rt! { #[track_caller] fn with_clock<R>(f: impl FnOnce(Option<&Clock>) -> Result<R, &'static str>) -> R { use crate::runtime::Handle; let res = match Handle::try_current() { Ok(handle) => f(Some(handle.inner.driver().clock())), Err(ref e) if e.is_missing_context() => f(None), Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), }; match res { Ok(ret) => ret, Err(msg) => panic!("{}", msg), } } } cfg_not_rt! { #[track_caller] fn with_clock<R>(f: impl FnOnce(Option<&Clock>) -> Result<R, &'static str>) -> R { match f(None) { Ok(ret) => ret, Err(msg) => panic!("{}", msg), } } } /// A handle to a source of time. #[derive(Debug)] pub(crate) struct Clock { inner: Mutex<Inner>, } // Used to track if the clock was ever paused. This is an optimization to // avoid touching the mutex if `test-util` was accidentally enabled in // release mode. // // A static is used so we can avoid accessing the thread-local as well. The // `std` AtomicBool is used directly because loom does not support static // atomics. static DID_PAUSE_CLOCK: StdAtomicBool = StdAtomicBool::new(false); #[derive(Debug)] struct Inner { /// True if the ability to pause time is enabled. enable_pausing: bool, /// Instant to use as the clock's base instant. base: std::time::Instant, /// Instant at which the clock was last unfrozen. unfrozen: Option<std::time::Instant>, /// Number of `inhibit_auto_advance` calls still in effect. auto_advance_inhibit_count: usize, } /// Pauses time. /// /// The current value of `Instant::now()` is saved and all subsequent calls /// to `Instant::now()` will return the saved value. The saved value can be /// changed by [`advance`] or by the time auto-advancing once the runtime /// has no work to do. This only affects the `Instant` type in Tokio, and /// the `Instant` in std continues to work as normal. /// /// Pausing time requires the `current_thread` Tokio runtime. This is the /// default runtime used by `#[tokio::test]`. The runtime can be initialized /// with time in a paused state using the `Builder::start_paused` method. /// /// For cases where time is immediately paused, it is better to pause /// the time using the `main` or `test` macro: /// ``` /// #[tokio::main(flavor = "current_thread", start_paused = true)] /// async fn main() { /// println!("Hello world"); /// } /// ``` /// /// # Panics /// /// Panics if time is already frozen or if called from outside of a /// `current_thread` Tokio runtime. /// /// # Auto-advance /// /// If time is paused and the runtime has no work to do, the clock is /// auto-advanced to the next pending timer. This means that [`Sleep`] or /// other timer-backed primitives can cause the runtime to advance the /// current time when awaited. /// /// [`Sleep`]: crate::time::Sleep /// [`advance`]: crate::time::advance #[track_caller] pub fn pause() { with_clock(|maybe_clock| { match maybe_clock { Some(clock) => clock.pause(), None => Err("time cannot be frozen from outside the Tokio runtime"), } }); } /// Resumes time. /// /// Clears the saved `Instant::now()` value. Subsequent calls to /// `Instant::now()` will return the value returned by the system call. /// /// # Panics /// /// Panics if time is not frozen or if called from outside of the Tokio /// runtime. #[track_caller] pub fn resume() { with_clock(|maybe_clock| { let clock = match maybe_clock { Some(clock) => clock, None => return Err("time cannot be frozen from outside the Tokio runtime"), }; let mut inner = clock.inner.lock(); if inner.unfrozen.is_some() { return Err("time is not frozen"); } inner.unfrozen = Some(std::time::Instant::now()); Ok(()) }); } /// Advances time. /// /// Increments the saved `Instant::now()` value by `duration`. Subsequent /// calls to `Instant::now()` will return the result of the increment. /// /// This function will make the current time jump forward by the given /// duration in one jump. This means that all `sleep` calls with a deadline /// before the new time will immediately complete "at the same time", and /// the runtime is free to poll them in any order. Additionally, this /// method will not wait for the `sleep` calls it advanced past to complete. /// If you want to do that, you should instead call [`sleep`] and rely on /// the runtime's auto-advance feature. /// /// Note that calls to `sleep` are not guaranteed to complete the first time /// they are polled after a call to `advance`. For example, this can happen /// if the runtime has not yet touched the timer driver after the call to /// `advance`. However if they don't, the runtime will poll the task again /// shortly. /// /// # Panics /// /// Panics if any of the following conditions are met: /// /// - The clock is not frozen, which means that you must /// call [`pause`] before calling this method. /// - If called outside of the Tokio runtime. /// - If the input `duration` is too large (such as [`Duration::MAX`]) /// to be safely added to the current time without causing an overflow. /// /// # Caveats /// /// Using a very large `duration` is not recommended, /// as it may cause panicking due to overflow. /// /// # Auto-advance /// /// If the time is paused and there is no work to do, the runtime advances /// time to the next timer. See [`pause`](pause#auto-advance) for more /// details. /// /// [`sleep`]: fn@crate::time::sleep pub async fn advance(duration: Duration) { with_clock(|maybe_clock| { let clock = match maybe_clock { Some(clock) => clock, None => return Err("time cannot be frozen from outside the Tokio runtime"), }; clock.advance(duration) }); crate::task::yield_now().await; } /// Returns the current instant, factoring in frozen time. pub(crate) fn now() -> Instant { if !DID_PAUSE_CLOCK.load(Ordering::Acquire) { return Instant::from_std(std::time::Instant::now()); } with_clock(|maybe_clock| { Ok(if let Some(clock) = maybe_clock { clock.now() } else { Instant::from_std(std::time::Instant::now()) }) }) } impl Clock { /// Returns a new `Clock` instance that uses the current execution context's /// source of time. pub(crate) fn new(enable_pausing: bool, start_paused: bool) -> Clock { let now = std::time::Instant::now(); let clock = Clock { inner: Mutex::new(Inner { enable_pausing, base: now, unfrozen: Some(now), auto_advance_inhibit_count: 0, }), }; if start_paused { if let Err(msg) = clock.pause() { panic!("{}", msg); } } clock } pub(crate) fn pause(&self) -> Result<(), &'static str> { let mut inner = self.inner.lock(); if !inner.enable_pausing { return Err("`time::pause()` requires the `current_thread` Tokio runtime. \ This is the default Runtime used by `#[tokio::test]."); } // Track that we paused the clock DID_PAUSE_CLOCK.store(true, Ordering::Release); let elapsed = match inner.unfrozen.as_ref() { Some(v) => v.elapsed(), None => return Err("time is already frozen") }; inner.base += elapsed; inner.unfrozen = None; Ok(()) } /// Temporarily stop auto-advancing the clock (see `tokio::time::pause`). pub(crate) fn inhibit_auto_advance(&self) { let mut inner = self.inner.lock(); inner.auto_advance_inhibit_count += 1; } pub(crate) fn allow_auto_advance(&self) { let mut inner = self.inner.lock(); inner.auto_advance_inhibit_count -= 1; } pub(crate) fn can_auto_advance(&self) -> bool { let inner = self.inner.lock(); inner.unfrozen.is_none() && inner.auto_advance_inhibit_count == 0 } pub(crate) fn advance(&self, duration: Duration) -> Result<(), &'static str> { let mut inner = self.inner.lock(); if inner.unfrozen.is_some() { return Err("time is not frozen"); } inner.base += duration; Ok(()) } pub(crate) fn now(&self) -> Instant { let inner = self.inner.lock(); let mut ret = inner.base; if let Some(unfrozen) = inner.unfrozen { ret += unfrozen.elapsed(); } Instant::from_std(ret) } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/time/error.rs
tokio/src/time/error.rs
//! Time error types. use std::error; use std::fmt; /// Errors encountered by the timer implementation. /// /// Currently, there are two different errors that can occur: /// /// * `shutdown` occurs when a timer operation is attempted, but the timer /// instance has been dropped. In this case, the operation will never be able /// to complete and the `shutdown` error is returned. This is a permanent /// error, i.e., once this error is observed, timer operations will never /// succeed in the future. /// /// * `at_capacity` occurs when a timer operation is attempted, but the timer /// instance is currently handling its maximum number of outstanding sleep instances. /// In this case, the operation is not able to be performed at the current /// moment, and `at_capacity` is returned. This is a transient error, i.e., at /// some point in the future, if the operation is attempted again, it might /// succeed. Callers that observe this error should attempt to [shed load]. One /// way to do this would be dropping the future that issued the timer operation. /// /// [shed load]: https://en.wikipedia.org/wiki/Load_Shedding #[derive(Debug, Copy, Clone)] pub struct Error(Kind); #[derive(Debug, Clone, Copy, Eq, PartialEq)] #[repr(u8)] pub(crate) enum Kind { Shutdown = 1, AtCapacity = 2, Invalid = 3, } impl From<Kind> for Error { fn from(k: Kind) -> Self { Error(k) } } /// Errors returned by `Timeout`. /// /// This error is returned when a timeout expires before the function was able /// to finish. #[derive(Debug, PartialEq, Eq)] pub struct Elapsed(()); #[derive(Debug)] pub(crate) enum InsertError { Elapsed, } // ===== impl Error ===== impl Error { /// Creates an error representing a shutdown timer. pub fn shutdown() -> Error { Error(Kind::Shutdown) } /// Returns `true` if the error was caused by the timer being shutdown. pub fn is_shutdown(&self) -> bool { matches!(self.0, Kind::Shutdown) } /// Creates an error representing a timer at capacity. pub fn at_capacity() -> Error { Error(Kind::AtCapacity) } /// Returns `true` if the error was caused by the timer being at capacity. pub fn is_at_capacity(&self) -> bool { matches!(self.0, Kind::AtCapacity) } /// Creates an error representing a misconfigured timer. pub fn invalid() -> Error { Error(Kind::Invalid) } /// Returns `true` if the error was caused by the timer being misconfigured. pub fn is_invalid(&self) -> bool { matches!(self.0, Kind::Invalid) } } impl error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let descr = match self.0 { Kind::Shutdown => { "the timer is shutdown, must be called from the context of Tokio runtime" } Kind::AtCapacity => "timer is at capacity and cannot create a new entry", Kind::Invalid => "timer duration exceeds maximum duration", }; write!(fmt, "{descr}") } } // ===== impl Elapsed ===== impl Elapsed { pub(crate) fn new() -> Self { Elapsed(()) } } impl fmt::Display for Elapsed { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { "deadline has elapsed".fmt(fmt) } } impl std::error::Error for Elapsed {} impl From<Elapsed> for std::io::Error { fn from(_err: Elapsed) -> std::io::Error { std::io::ErrorKind::TimedOut.into() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/time/mod.rs
tokio/src/time/mod.rs
//! Utilities for tracking time. //! //! This module provides a number of types for executing code after a set period //! of time. //! //! * [`Sleep`] is a future that does no work and completes at a specific [`Instant`] //! in time. //! //! * [`Interval`] is a stream yielding a value at a fixed period. It is //! initialized with a [`Duration`] and repeatedly yields each time the duration //! elapses. //! //! * [`Timeout`]: Wraps a future or stream, setting an upper bound to the amount //! of time it is allowed to execute. If the future or stream does not //! complete in time, then it is canceled and an error is returned. //! //! These types are sufficient for handling a large number of scenarios //! involving time. //! //! These types must be used from within the context of the [`Runtime`](crate::runtime::Runtime). //! //! # Examples //! //! Wait 100ms and print "100 ms have elapsed" //! //! ``` //! use std::time::Duration; //! use tokio::time::sleep; //! //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! sleep(Duration::from_millis(100)).await; //! println!("100 ms have elapsed"); //! # } //! ``` //! //! Require that an operation takes no more than 1s. //! //! ``` //! use tokio::time::{timeout, Duration}; //! //! async fn long_future() { //! // do work here //! } //! //! # async fn dox() { //! let res = timeout(Duration::from_secs(1), long_future()).await; //! //! if res.is_err() { //! println!("operation timed out"); //! } //! # } //! ``` //! //! A simple example using [`interval`] to execute a task every two seconds. //! //! The difference between [`interval`] and [`sleep`] is that an [`interval`] //! measures the time since the last tick, which means that `.tick().await` may //! wait for a shorter time than the duration specified for the interval //! if some time has passed between calls to `.tick().await`. //! //! If the tick in the example below was replaced with [`sleep`], the task //! would only be executed once every three seconds, and not every two //! seconds. //! //! ``` //! use tokio::time; //! //! async fn task_that_takes_a_second() { //! println!("hello"); //! time::sleep(time::Duration::from_secs(1)).await //! } //! //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! let mut interval = time::interval(time::Duration::from_secs(2)); //! for _i in 0..5 { //! interval.tick().await; //! task_that_takes_a_second().await; //! } //! # } //! ``` //! //! [`interval`]: crate::time::interval() //! [`sleep`]: sleep() mod clock; pub(crate) use self::clock::Clock; cfg_test_util! { pub use clock::{advance, pause, resume}; } pub mod error; mod instant; pub use self::instant::Instant; mod interval; pub use interval::{interval, interval_at, Interval, MissedTickBehavior}; mod sleep; pub use sleep::{sleep, sleep_until, Sleep}; mod timeout; #[doc(inline)] pub use timeout::{timeout, timeout_at, Timeout}; // Re-export for convenience #[doc(no_inline)] pub use std::time::Duration;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/time/interval.rs
tokio/src/time/interval.rs
use crate::time::{sleep_until, Duration, Instant, Sleep}; use crate::util::trace; use std::future::{poll_fn, Future}; use std::panic::Location; use std::pin::Pin; use std::task::{ready, Context, Poll}; /// Creates new [`Interval`] that yields with interval of `period`. The first /// tick completes immediately. The default [`MissedTickBehavior`] is /// [`Burst`](MissedTickBehavior::Burst), but this can be configured /// by calling [`set_missed_tick_behavior`](Interval::set_missed_tick_behavior). /// /// An interval will tick indefinitely. At any time, the [`Interval`] value can /// be dropped. This cancels the interval. /// /// This function is equivalent to /// [`interval_at(Instant::now(), period)`](interval_at). /// /// # Panics /// /// This function panics if `period` is zero. /// /// # Examples /// /// ``` /// use tokio::time::{self, Duration}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = time::interval(Duration::from_millis(10)); /// /// interval.tick().await; // ticks immediately /// interval.tick().await; // ticks after 10ms /// interval.tick().await; // ticks after 10ms /// /// // approximately 20ms have elapsed. /// # } /// ``` /// /// A simple example using `interval` to execute a task every two seconds. /// /// The difference between `interval` and [`sleep`] is that an [`Interval`] /// measures the time since the last tick, which means that [`.tick().await`] /// may wait for a shorter time than the duration specified for the interval /// if some time has passed between calls to [`.tick().await`]. /// /// If the tick in the example below was replaced with [`sleep`], the task /// would only be executed once every three seconds, and not every two /// seconds. /// /// ``` /// use tokio::time; /// /// async fn task_that_takes_a_second() { /// println!("hello"); /// time::sleep(time::Duration::from_secs(1)).await /// } /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = time::interval(time::Duration::from_secs(2)); /// for _i in 0..5 { /// interval.tick().await; /// task_that_takes_a_second().await; /// } /// # } /// ``` /// /// [`sleep`]: crate::time::sleep() /// [`.tick().await`]: Interval::tick #[track_caller] pub fn interval(period: Duration) -> Interval { assert!(period > Duration::new(0, 0), "`period` must be non-zero."); internal_interval_at(Instant::now(), period, trace::caller_location()) } /// Creates new [`Interval`] that yields with interval of `period` with the /// first tick completing at `start`. The default [`MissedTickBehavior`] is /// [`Burst`](MissedTickBehavior::Burst), but this can be configured /// by calling [`set_missed_tick_behavior`](Interval::set_missed_tick_behavior). /// /// An interval will tick indefinitely. At any time, the [`Interval`] value can /// be dropped. This cancels the interval. /// /// # Panics /// /// This function panics if `period` is zero. /// /// # Examples /// /// ``` /// use tokio::time::{interval_at, Duration, Instant}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let start = Instant::now() + Duration::from_millis(50); /// let mut interval = interval_at(start, Duration::from_millis(10)); /// /// interval.tick().await; // ticks after 50ms /// interval.tick().await; // ticks after 10ms /// interval.tick().await; // ticks after 10ms /// /// // approximately 70ms have elapsed. /// # } /// ``` #[track_caller] pub fn interval_at(start: Instant, period: Duration) -> Interval { assert!(period > Duration::new(0, 0), "`period` must be non-zero."); internal_interval_at(start, period, trace::caller_location()) } #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] fn internal_interval_at( start: Instant, period: Duration, location: Option<&'static Location<'static>>, ) -> Interval { #[cfg(all(tokio_unstable, feature = "tracing"))] let resource_span = { let location = location.expect("should have location if tracing"); tracing::trace_span!( parent: None, "runtime.resource", concrete_type = "Interval", kind = "timer", loc.file = location.file(), loc.line = location.line(), loc.col = location.column(), ) }; #[cfg(all(tokio_unstable, feature = "tracing"))] let delay = resource_span.in_scope(|| Box::pin(sleep_until(start))); #[cfg(not(all(tokio_unstable, feature = "tracing")))] let delay = Box::pin(sleep_until(start)); Interval { delay, period, missed_tick_behavior: MissedTickBehavior::default(), #[cfg(all(tokio_unstable, feature = "tracing"))] resource_span, } } /// Defines the behavior of an [`Interval`] when it misses a tick. /// /// Sometimes, an [`Interval`]'s tick is missed. For example, consider the /// following: /// /// ``` /// use tokio::time::{self, Duration}; /// # async fn task_that_takes_one_to_three_millis() {} /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// // ticks every 2 milliseconds /// let mut interval = time::interval(Duration::from_millis(2)); /// for _ in 0..5 { /// interval.tick().await; /// // if this takes more than 2 milliseconds, a tick will be delayed /// task_that_takes_one_to_three_millis().await; /// } /// # } /// ``` /// /// Generally, a tick is missed if too much time is spent without calling /// [`Interval::tick()`]. /// /// By default, when a tick is missed, [`Interval`] fires ticks as quickly as it /// can until it is "caught up" in time to where it should be. /// `MissedTickBehavior` can be used to specify a different behavior for /// [`Interval`] to exhibit. Each variant represents a different strategy. /// /// Note that because the executor cannot guarantee exact precision with timers, /// these strategies will only apply when the delay is greater than 5 /// milliseconds. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum MissedTickBehavior { /// Ticks as fast as possible until caught up. /// /// When this strategy is used, [`Interval`] schedules ticks "normally" (the /// same as it would have if the ticks hadn't been delayed), which results /// in it firing ticks as fast as possible until it is caught up in time to /// where it should be. Unlike [`Delay`] and [`Skip`], the ticks yielded /// when `Burst` is used (the [`Instant`]s that [`tick`](Interval::tick) /// yields) aren't different than they would have been if a tick had not /// been missed. Like [`Skip`], and unlike [`Delay`], the ticks may be /// shortened. /// /// This looks something like this: /// ```text /// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 | /// Actual ticks: | work -----| delay | work | work | work -| work -----| /// ``` /// /// In code: /// /// ``` /// use tokio::time::{interval, Duration}; /// # async fn task_that_takes_200_millis() {} /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = interval(Duration::from_millis(50)); /// /// // First tick resolves immediately after creation /// interval.tick().await; /// /// task_that_takes_200_millis().await; /// // The `Interval` has missed a tick /// /// // Since we have exceeded our timeout, this will resolve immediately /// interval.tick().await; /// /// // Since we are more than 100ms after the start of `interval`, this will /// // also resolve immediately. /// interval.tick().await; /// /// // Also resolves immediately, because it was supposed to resolve at /// // 150ms after the start of `interval` /// interval.tick().await; /// /// // Resolves immediately /// interval.tick().await; /// /// // Since we have gotten to 200ms after the start of `interval`, this /// // will resolve after 50ms /// interval.tick().await; /// # } /// ``` /// /// This is the default behavior when [`Interval`] is created with /// [`interval`] and [`interval_at`]. /// /// [`Delay`]: MissedTickBehavior::Delay /// [`Skip`]: MissedTickBehavior::Skip Burst, /// Tick at multiples of `period` from when [`tick`] was called, rather than /// from `start`. /// /// When this strategy is used and [`Interval`] has missed a tick, instead /// of scheduling ticks to fire at multiples of `period` from `start` (the /// time when the first tick was fired), it schedules all future ticks to /// happen at a regular `period` from the point when [`tick`] was called. /// Unlike [`Burst`] and [`Skip`], ticks are not shortened, and they aren't /// guaranteed to happen at a multiple of `period` from `start` any longer. /// /// This looks something like this: /// ```text /// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 | /// Actual ticks: | work -----| delay | work -----| work -----| work -----| /// ``` /// /// In code: /// /// ``` /// use tokio::time::{interval, Duration, MissedTickBehavior}; /// # async fn task_that_takes_more_than_50_millis() {} /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = interval(Duration::from_millis(50)); /// interval.set_missed_tick_behavior(MissedTickBehavior::Delay); /// /// task_that_takes_more_than_50_millis().await; /// // The `Interval` has missed a tick /// /// // Since we have exceeded our timeout, this will resolve immediately /// interval.tick().await; /// /// // But this one, rather than also resolving immediately, as might happen /// // with the `Burst` or `Skip` behaviors, will not resolve until /// // 50ms after the call to `tick` up above. That is, in `tick`, when we /// // recognize that we missed a tick, we schedule the next tick to happen /// // 50ms (or whatever the `period` is) from right then, not from when /// // were *supposed* to tick /// interval.tick().await; /// # } /// ``` /// /// [`Burst`]: MissedTickBehavior::Burst /// [`Skip`]: MissedTickBehavior::Skip /// [`tick`]: Interval::tick Delay, /// Skips missed ticks and tick on the next multiple of `period` from /// `start`. /// /// When this strategy is used, [`Interval`] schedules the next tick to fire /// at the next-closest tick that is a multiple of `period` away from /// `start` (the point where [`Interval`] first ticked). Like [`Burst`], all /// ticks remain multiples of `period` away from `start`, but unlike /// [`Burst`], the ticks may not be *one* multiple of `period` away from the /// last tick. Like [`Delay`], the ticks are no longer the same as they /// would have been if ticks had not been missed, but unlike [`Delay`], and /// like [`Burst`], the ticks may be shortened to be less than one `period` /// away from each other. /// /// This looks something like this: /// ```text /// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 | /// Actual ticks: | work -----| delay | work ---| work -----| work -----| /// ``` /// /// In code: /// /// ``` /// use tokio::time::{interval, Duration, MissedTickBehavior}; /// # async fn task_that_takes_75_millis() {} /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = interval(Duration::from_millis(50)); /// interval.set_missed_tick_behavior(MissedTickBehavior::Skip); /// /// task_that_takes_75_millis().await; /// // The `Interval` has missed a tick /// /// // Since we have exceeded our timeout, this will resolve immediately /// interval.tick().await; /// /// // This one will resolve after 25ms, 100ms after the start of /// // `interval`, which is the closest multiple of `period` from the start /// // of `interval` after the call to `tick` up above. /// interval.tick().await; /// # } /// ``` /// /// [`Burst`]: MissedTickBehavior::Burst /// [`Delay`]: MissedTickBehavior::Delay Skip, } impl MissedTickBehavior { /// If a tick is missed, this method is called to determine when the next tick should happen. fn next_timeout(&self, timeout: Instant, now: Instant, period: Duration) -> Instant { match self { Self::Burst => timeout + period, Self::Delay => now + period, Self::Skip => { now + period - Duration::from_nanos( ((now - timeout).as_nanos() % period.as_nanos()) .try_into() // This operation is practically guaranteed not to // fail, as in order for it to fail, `period` would // have to be longer than `now - timeout`, and both // would have to be longer than 584 years. // // If it did fail, there's not a good way to pass // the error along to the user, so we just panic. .expect( "too much time has elapsed since the interval was supposed to tick", ), ) } } } } impl Default for MissedTickBehavior { /// Returns [`MissedTickBehavior::Burst`]. /// /// For most usecases, the [`Burst`] strategy is what is desired. /// Additionally, to preserve backwards compatibility, the [`Burst`] /// strategy must be the default. For these reasons, /// [`MissedTickBehavior::Burst`] is the default for [`MissedTickBehavior`]. /// See [`Burst`] for more details. /// /// [`Burst`]: MissedTickBehavior::Burst fn default() -> Self { Self::Burst } } /// Interval returned by [`interval`] and [`interval_at`]. /// /// This type allows you to wait on a sequence of instants with a certain /// duration between each instant. Unlike calling [`sleep`] in a loop, this lets /// you count the time spent between the calls to [`sleep`] as well. /// /// An `Interval` can be turned into a `Stream` with [`IntervalStream`]. /// /// [`IntervalStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.IntervalStream.html /// [`sleep`]: crate::time::sleep() #[derive(Debug)] pub struct Interval { /// Future that completes the next time the `Interval` yields a value. delay: Pin<Box<Sleep>>, /// The duration between values yielded by `Interval`. period: Duration, /// The strategy `Interval` should use when a tick is missed. missed_tick_behavior: MissedTickBehavior, #[cfg(all(tokio_unstable, feature = "tracing"))] resource_span: tracing::Span, } impl Interval { /// Completes when the next instant in the interval has been reached. /// /// # Cancel safety /// /// This method is cancellation safe. If `tick` is used as the branch in a `tokio::select!` and /// another branch completes first, then no tick has been consumed. /// /// # Examples /// /// ``` /// use tokio::time; /// /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = time::interval(Duration::from_millis(10)); /// /// interval.tick().await; /// // approximately 0ms have elapsed. The first tick completes immediately. /// interval.tick().await; /// interval.tick().await; /// /// // approximately 20ms have elapsed. /// # } /// ``` pub async fn tick(&mut self) -> Instant { #[cfg(all(tokio_unstable, feature = "tracing"))] let resource_span = self.resource_span.clone(); #[cfg(all(tokio_unstable, feature = "tracing"))] let instant = trace::async_op( || poll_fn(|cx| self.poll_tick(cx)), resource_span, "Interval::tick", "poll_tick", false, ); #[cfg(not(all(tokio_unstable, feature = "tracing")))] let instant = poll_fn(|cx| self.poll_tick(cx)); instant.await } /// Polls for the next instant in the interval to be reached. /// /// This method can return the following values: /// /// * `Poll::Pending` if the next instant has not yet been reached. /// * `Poll::Ready(instant)` if the next instant has been reached. /// /// When this method returns `Poll::Pending`, the current task is scheduled /// to receive a wakeup when the instant has elapsed. Note that on multiple /// calls to `poll_tick`, only the [`Waker`](std::task::Waker) from the /// [`Context`] passed to the most recent call is scheduled to receive a /// wakeup. pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll<Instant> { // Wait for the delay to be done ready!(Pin::new(&mut self.delay).poll(cx)); // Get the time when we were scheduled to tick let timeout = self.delay.deadline(); let now = Instant::now(); // If a tick was not missed, and thus we are being called before the // next tick is due, just schedule the next tick normally, one `period` // after `timeout` // // However, if a tick took excessively long and we are now behind, // schedule the next tick according to how the user specified with // `MissedTickBehavior` let next = if now > timeout + Duration::from_millis(5) { self.missed_tick_behavior .next_timeout(timeout, now, self.period) } else { timeout .checked_add(self.period) .unwrap_or_else(Instant::far_future) }; // When we arrive here, the internal delay returned `Poll::Ready`. // Reset the delay but do not register it. It should be registered with // the next call to [`poll_tick`]. self.delay.as_mut().reset_without_reregister(next); // Return the time when we were scheduled to tick Poll::Ready(timeout) } /// Resets the interval to complete one period after the current time. /// /// This method ignores [`MissedTickBehavior`] strategy. /// /// This is equivalent to calling `reset_at(Instant::now() + period)`. /// /// # Examples /// /// ``` /// use tokio::time; /// /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = time::interval(Duration::from_millis(100)); /// /// interval.tick().await; /// /// time::sleep(Duration::from_millis(50)).await; /// interval.reset(); /// /// interval.tick().await; /// interval.tick().await; /// /// // approximately 250ms have elapsed. /// # } /// ``` pub fn reset(&mut self) { self.delay.as_mut().reset(Instant::now() + self.period); } /// Resets the interval immediately. /// /// This method ignores [`MissedTickBehavior`] strategy. /// /// This is equivalent to calling `reset_at(Instant::now())`. /// /// # Examples /// /// ``` /// use tokio::time; /// /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = time::interval(Duration::from_millis(100)); /// /// interval.tick().await; /// /// time::sleep(Duration::from_millis(50)).await; /// interval.reset_immediately(); /// /// interval.tick().await; /// interval.tick().await; /// /// // approximately 150ms have elapsed. /// # } /// ``` pub fn reset_immediately(&mut self) { self.delay.as_mut().reset(Instant::now()); } /// Resets the interval after the specified [`std::time::Duration`]. /// /// This method ignores [`MissedTickBehavior`] strategy. /// /// This is equivalent to calling `reset_at(Instant::now() + after)`. /// /// # Examples /// /// ``` /// use tokio::time; /// /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = time::interval(Duration::from_millis(100)); /// interval.tick().await; /// /// time::sleep(Duration::from_millis(50)).await; /// /// let after = Duration::from_millis(20); /// interval.reset_after(after); /// /// interval.tick().await; /// interval.tick().await; /// /// // approximately 170ms have elapsed. /// # } /// ``` pub fn reset_after(&mut self, after: Duration) { self.delay.as_mut().reset(Instant::now() + after); } /// Resets the interval to a [`crate::time::Instant`] deadline. /// /// Sets the next tick to expire at the given instant. If the instant is in /// the past, then the [`MissedTickBehavior`] strategy will be used to /// catch up. If the instant is in the future, then the next tick will /// complete at the given instant, even if that means that it will sleep for /// longer than the duration of this [`Interval`]. If the [`Interval`] had /// any missed ticks before calling this method, then those are discarded. /// /// # Examples /// /// ``` /// use tokio::time::{self, Instant}; /// /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut interval = time::interval(Duration::from_millis(100)); /// interval.tick().await; /// /// time::sleep(Duration::from_millis(50)).await; /// /// let deadline = Instant::now() + Duration::from_millis(30); /// interval.reset_at(deadline); /// /// interval.tick().await; /// interval.tick().await; /// /// // approximately 180ms have elapsed. /// # } /// ``` pub fn reset_at(&mut self, deadline: Instant) { self.delay.as_mut().reset(deadline); } /// Returns the [`MissedTickBehavior`] strategy currently being used. pub fn missed_tick_behavior(&self) -> MissedTickBehavior { self.missed_tick_behavior } /// Sets the [`MissedTickBehavior`] strategy that should be used. pub fn set_missed_tick_behavior(&mut self, behavior: MissedTickBehavior) { self.missed_tick_behavior = behavior; } /// Returns the period of the interval. pub fn period(&self) -> Duration { self.period } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/lookup_host.rs
tokio/src/net/lookup_host.rs
cfg_net! { use crate::net::addr::{self, ToSocketAddrs}; use std::io; use std::net::SocketAddr; /// Performs a DNS resolution. /// /// The returned iterator may not actually yield any values depending on the /// outcome of any resolution performed. /// /// This API is not intended to cover all DNS use cases. Anything beyond the /// basic use case should be done with a specialized library. /// /// # Examples /// /// To resolve a DNS entry: /// /// ```no_run /// use tokio::net; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// for addr in net::lookup_host("localhost:3000").await? { /// println!("socket address is {}", addr); /// } /// /// Ok(()) /// } /// ``` pub async fn lookup_host<T>(host: T) -> io::Result<impl Iterator<Item = SocketAddr>> where T: ToSocketAddrs { addr::to_socket_addrs(host).await } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/udp.rs
tokio/src/net/udp.rs
use crate::io::{Interest, PollEvented, ReadBuf, Ready}; use crate::net::{to_socket_addrs, ToSocketAddrs}; use crate::util::check_socket_for_blocking; use std::fmt; use std::io; use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::task::{ready, Context, Poll}; cfg_io_util! { use bytes::BufMut; } cfg_net! { /// A UDP socket. /// /// UDP is "connectionless", unlike TCP. Meaning, regardless of what address you've bound to, a `UdpSocket` /// is free to communicate with many different remotes. In tokio there are basically two main ways to use `UdpSocket`: /// /// * one to many: [`bind`](`UdpSocket::bind`) and use [`send_to`](`UdpSocket::send_to`) /// and [`recv_from`](`UdpSocket::recv_from`) to communicate with many different addresses /// * one to one: [`connect`](`UdpSocket::connect`) and associate with a single address, using [`send`](`UdpSocket::send`) /// and [`recv`](`UdpSocket::recv`) to communicate only with that remote address /// /// This type does not provide a `split` method, because this functionality /// can be achieved by instead wrapping the socket in an [`Arc`]. Note that /// you do not need a `Mutex` to share the `UdpSocket` — an `Arc<UdpSocket>` /// is enough. This is because all of the methods take `&self` instead of /// `&mut self`. Once you have wrapped it in an `Arc`, you can call /// `.clone()` on the `Arc<UdpSocket>` to get multiple shared handles to the /// same socket. An example of such usage can be found further down. /// /// [`Arc`]: std::sync::Arc /// /// # Streams /// /// If you need to listen over UDP and produce a [`Stream`], you can look /// at [`UdpFramed`]. /// /// [`UdpFramed`]: https://docs.rs/tokio-util/latest/tokio_util/udp/struct.UdpFramed.html /// [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html /// /// # Example: one to many (bind) /// /// Using `bind` we can create a simple echo server that sends and recv's with many different clients: /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let sock = UdpSocket::bind("0.0.0.0:8080").await?; /// let mut buf = [0; 1024]; /// loop { /// let (len, addr) = sock.recv_from(&mut buf).await?; /// println!("{:?} bytes received from {:?}", len, addr); /// /// let len = sock.send_to(&buf[..len], addr).await?; /// println!("{:?} bytes sent", len); /// } /// } /// ``` /// /// # Example: one to one (connect) /// /// Or using `connect` we can echo with a single remote address using `send` and `recv`: /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let sock = UdpSocket::bind("0.0.0.0:8080").await?; /// /// let remote_addr = "127.0.0.1:59611"; /// sock.connect(remote_addr).await?; /// let mut buf = [0; 1024]; /// loop { /// let len = sock.recv(&mut buf).await?; /// println!("{:?} bytes received from {:?}", len, remote_addr); /// /// let len = sock.send(&buf[..len]).await?; /// println!("{:?} bytes sent", len); /// } /// } /// ``` /// /// # Example: Splitting with `Arc` /// /// Because `send_to` and `recv_from` take `&self`. It's perfectly alright /// to use an `Arc<UdpSocket>` and share the references to multiple tasks. /// Here is a similar "echo" example that supports concurrent /// sending/receiving: /// /// ```no_run /// use tokio::{net::UdpSocket, sync::mpsc}; /// use std::{io, net::SocketAddr, sync::Arc}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let sock = UdpSocket::bind("0.0.0.0:8080".parse::<SocketAddr>().unwrap()).await?; /// let r = Arc::new(sock); /// let s = r.clone(); /// let (tx, mut rx) = mpsc::channel::<(Vec<u8>, SocketAddr)>(1_000); /// /// tokio::spawn(async move { /// while let Some((bytes, addr)) = rx.recv().await { /// let len = s.send_to(&bytes, &addr).await.unwrap(); /// println!("{:?} bytes sent", len); /// } /// }); /// /// let mut buf = [0; 1024]; /// loop { /// let (len, addr) = r.recv_from(&mut buf).await?; /// println!("{:?} bytes received from {:?}", len, addr); /// tx.send((buf[..len].to_vec(), addr)).await.unwrap(); /// } /// } /// ``` /// pub struct UdpSocket { io: PollEvented<mio::net::UdpSocket>, } } impl UdpSocket { /// This function will create a new UDP socket and attempt to bind it to /// the `addr` provided. /// /// Binding with a port number of 0 will request that the OS assigns a port /// to this listener. The port allocated can be queried via the `local_addr` /// method. /// /// # Example /// /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// let sock = UdpSocket::bind("0.0.0.0:8080").await?; /// // use `sock` /// # let _ = sock; /// Ok(()) /// } /// ``` pub async fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> { let addrs = to_socket_addrs(addr).await?; let mut last_err = None; for addr in addrs { match UdpSocket::bind_addr(addr) { Ok(socket) => return Ok(socket), Err(e) => last_err = Some(e), } } Err(last_err.unwrap_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "could not resolve to any address", ) })) } fn bind_addr(addr: SocketAddr) -> io::Result<UdpSocket> { let sys = mio::net::UdpSocket::bind(addr)?; UdpSocket::new(sys) } #[track_caller] fn new(socket: mio::net::UdpSocket) -> io::Result<UdpSocket> { let io = PollEvented::new(socket)?; Ok(UdpSocket { io }) } /// Creates new `UdpSocket` from a previously bound `std::net::UdpSocket`. /// /// This function is intended to be used to wrap a UDP socket from the /// standard library in the Tokio equivalent. /// /// This can be used in conjunction with `socket2`'s `Socket` interface to /// configure a socket before it's handed off, such as setting options like /// `reuse_address` or binding to multiple addresses. /// /// # Notes /// /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. Otherwise all I/O operations on the socket /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. /// /// Passing a listener in blocking mode is always erroneous, /// and the behavior in that case may change in the future. /// For example, it could panic. /// /// [`set_nonblocking`]: std::net::UdpSocket::set_nonblocking /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. /// /// # Example /// /// ```no_run /// use tokio::net::UdpSocket; /// # use std::{io, net::SocketAddr}; /// /// # #[tokio::main] /// # async fn main() -> io::Result<()> { /// let addr = "0.0.0.0:8080".parse::<SocketAddr>().unwrap(); /// let std_sock = std::net::UdpSocket::bind(addr)?; /// std_sock.set_nonblocking(true)?; /// let sock = UdpSocket::from_std(std_sock)?; /// // use `sock` /// # Ok(()) /// # } /// ``` #[track_caller] pub fn from_std(socket: net::UdpSocket) -> io::Result<UdpSocket> { check_socket_for_blocking(&socket)?; let io = mio::net::UdpSocket::from_std(socket); UdpSocket::new(io) } /// Turns a [`tokio::net::UdpSocket`] into a [`std::net::UdpSocket`]. /// /// The returned [`std::net::UdpSocket`] will have nonblocking mode set as /// `true`. Use [`set_nonblocking`] to change the blocking mode if needed. /// /// # Examples /// /// ```rust,no_run /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let tokio_socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await?; /// let std_socket = tokio_socket.into_std()?; /// std_socket.set_nonblocking(false)?; /// Ok(()) /// } /// ``` /// /// [`tokio::net::UdpSocket`]: UdpSocket /// [`std::net::UdpSocket`]: std::net::UdpSocket /// [`set_nonblocking`]: fn@std::net::UdpSocket::set_nonblocking pub fn into_std(self) -> io::Result<std::net::UdpSocket> { #[cfg(unix)] { use std::os::unix::io::{FromRawFd, IntoRawFd}; self.io .into_inner() .map(IntoRawFd::into_raw_fd) .map(|raw_fd| unsafe { std::net::UdpSocket::from_raw_fd(raw_fd) }) } #[cfg(windows)] { use std::os::windows::io::{FromRawSocket, IntoRawSocket}; self.io .into_inner() .map(|io| io.into_raw_socket()) .map(|raw_socket| unsafe { std::net::UdpSocket::from_raw_socket(raw_socket) }) } } fn as_socket(&self) -> socket2::SockRef<'_> { socket2::SockRef::from(self) } /// Returns the local address that this socket is bound to. /// /// # Example /// /// ```no_run /// use tokio::net::UdpSocket; /// # use std::{io, net::SocketAddr}; /// /// # #[tokio::main] /// # async fn main() -> io::Result<()> { /// let addr = "0.0.0.0:8080".parse::<SocketAddr>().unwrap(); /// let sock = UdpSocket::bind(addr).await?; /// // the address the socket is bound to /// let local_addr = sock.local_addr()?; /// # Ok(()) /// # } /// ``` pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.local_addr() } /// Returns the socket address of the remote peer this socket was connected to. /// /// # Example /// /// ``` /// use tokio::net::UdpSocket; /// /// # use std::{io, net::SocketAddr}; /// # #[tokio::main] /// # async fn main() -> io::Result<()> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// let addr = "0.0.0.0:8080".parse::<SocketAddr>().unwrap(); /// let peer = "127.0.0.1:11100".parse::<SocketAddr>().unwrap(); /// let sock = UdpSocket::bind(addr).await?; /// sock.connect(peer).await?; /// assert_eq!(peer, sock.peer_addr()?); /// # Ok(()) /// # } /// ``` pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.io.peer_addr() } /// Connects the UDP socket setting the default destination for send() and /// limiting packets that are read via `recv` from the address specified in /// `addr`. /// /// # Example /// /// ```no_run /// use tokio::net::UdpSocket; /// # use std::{io, net::SocketAddr}; /// /// # #[tokio::main] /// # async fn main() -> io::Result<()> { /// let sock = UdpSocket::bind("0.0.0.0:8080".parse::<SocketAddr>().unwrap()).await?; /// /// let remote_addr = "127.0.0.1:59600".parse::<SocketAddr>().unwrap(); /// sock.connect(remote_addr).await?; /// let mut buf = [0u8; 32]; /// // recv from remote_addr /// let len = sock.recv(&mut buf).await?; /// // send to remote_addr /// let _len = sock.send(&buf[..len]).await?; /// # Ok(()) /// # } /// ``` pub async fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> { let addrs = to_socket_addrs(addr).await?; let mut last_err = None; for addr in addrs { match self.io.connect(addr) { Ok(()) => return Ok(()), Err(e) => last_err = Some(e), } } Err(last_err.unwrap_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "could not resolve to any address", ) })) } /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_recv()` or `try_send()`. It /// can be used to concurrently `recv` / `send` to the same socket on a single /// task without splitting the socket. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// Concurrently receive from and send to the socket on the same task /// without splitting. /// /// ```no_run /// use tokio::io::{self, Interest}; /// use tokio::net::UdpSocket; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; /// socket.connect("127.0.0.1:8081").await?; /// /// loop { /// let ready = socket.ready(Interest::READABLE | Interest::WRITABLE).await?; /// /// if ready.is_readable() { /// // The buffer is **not** included in the async task and will only exist /// // on the stack. /// let mut data = [0; 1024]; /// match socket.try_recv(&mut data[..]) { /// Ok(n) => { /// println!("received {:?}", &data[..n]); /// } /// // False-positive, continue /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// if ready.is_writable() { /// // Write some data /// match socket.try_send(b"hello world") { /// Ok(n) => { /// println!("sent {} bytes", n); /// } /// // False-positive, continue /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} /// Err(e) => { /// return Err(e); /// } /// } /// } /// } /// } /// ``` pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { let event = self.io.registration().readiness(interest).await?; Ok(event.ready) } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is /// usually paired with `try_send()` or `try_send_to()`. /// /// The function may complete without the socket being writable. This is a /// false-positive and attempting a `try_send()` will return with /// `io::ErrorKind::WouldBlock`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Bind socket /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; /// socket.connect("127.0.0.1:8081").await?; /// /// loop { /// // Wait for the socket to be writable /// socket.writable().await?; /// /// // Try to send data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_send(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn writable(&self) -> io::Result<()> { self.ready(Interest::WRITABLE).await?; Ok(()) } /// Polls for write/send readiness. /// /// If the udp stream is not currently ready for sending, this method will /// store a clone of the `Waker` from the provided `Context`. When the udp /// stream becomes ready for sending, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_send_ready` or `poll_send`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_recv_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the udp stream is not ready for writing. /// * `Poll::Ready(Ok(()))` if the udp stream is ready for writing. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`writable`]: method@Self::writable pub fn poll_send_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_write_ready(cx).map_ok(|_| ()) } /// Sends data on the socket to the remote address that the socket is /// connected to. /// /// The [`connect`] method will connect this socket to a remote address. /// This method will fail if the socket is not connected. /// /// [`connect`]: method@Self::connect /// /// # Return /// /// On success, the number of bytes sent is returned, otherwise, the /// encountered error is returned. /// /// # Cancel safety /// /// This method is cancel safe. If `send` is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that the message was not sent. /// /// # Examples /// /// ```no_run /// use tokio::io; /// use tokio::net::UdpSocket; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Bind socket /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; /// socket.connect("127.0.0.1:8081").await?; /// /// // Send a message /// socket.send(b"hello world").await?; /// /// Ok(()) /// } /// ``` pub async fn send(&self, buf: &[u8]) -> io::Result<usize> { self.io .registration() .async_io(Interest::WRITABLE, || self.io.send(buf)) .await } /// Attempts to send data on the socket to the remote address to which it /// was previously `connect`ed. /// /// The [`connect`] method will connect this socket to a remote address. /// This method will fail if the socket is not connected. /// /// Note that on multiple calls to a `poll_*` method in the send direction, /// only the `Waker` from the `Context` passed to the most recent call will /// be scheduled to receive a wakeup. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the socket is not available to write /// * `Poll::Ready(Ok(n))` `n` is the number of bytes sent /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`connect`]: method@Self::connect pub fn poll_send(&self, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> { self.io .registration() .poll_write_io(cx, || self.io.send(buf)) } /// Tries to send data on the socket to the remote address to which it is /// connected. /// /// When the socket buffer is full, `Err(io::ErrorKind::WouldBlock)` is /// returned. This function is usually paired with `writable()`. /// /// # Returns /// /// If successful, `Ok(n)` is returned, where `n` is the number of bytes /// sent. If the socket is not ready to send data, /// `Err(ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Bind a UDP socket /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; /// /// // Connect to a peer /// socket.connect("127.0.0.1:8081").await?; /// /// loop { /// // Wait for the socket to be writable /// socket.writable().await?; /// /// // Try to send data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_send(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_send(&self, buf: &[u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || self.io.send(buf)) } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_recv()`. /// /// The function may complete without the socket being readable. This is a /// false-positive and attempting a `try_recv()` will return with /// `io::ErrorKind::WouldBlock`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Connect to a peer /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; /// socket.connect("127.0.0.1:8081").await?; /// /// loop { /// // Wait for the socket to be readable /// socket.readable().await?; /// /// // The buffer is **not** included in the async task and will /// // only exist on the stack. /// let mut buf = [0; 1024]; /// /// // Try to recv data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_recv(&mut buf) { /// Ok(n) => { /// println!("GOT {:?}", &buf[..n]); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn readable(&self) -> io::Result<()> { self.ready(Interest::READABLE).await?; Ok(()) } /// Polls for read/receive readiness. /// /// If the udp stream is not currently ready for receiving, this method will /// store a clone of the `Waker` from the provided `Context`. When the udp /// socket becomes ready for reading, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_recv_ready`, `poll_recv` or /// `poll_peek`, only the `Waker` from the `Context` passed to the most /// recent call is scheduled to receive a wakeup. (However, /// `poll_send_ready` retains a second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the udp stream is not ready for reading. /// * `Poll::Ready(Ok(()))` if the udp stream is ready for reading. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`readable`]: method@Self::readable pub fn poll_recv_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_read_ready(cx).map_ok(|_| ()) } /// Receives a single datagram message on the socket from the remote address /// to which it is connected. On success, returns the number of bytes read. /// /// The function must be called with valid byte array `buf` of sufficient /// size to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// /// The [`connect`] method will connect this socket to a remote address. /// This method will fail if the socket is not connected. /// /// # Cancel safety /// /// This method is cancel safe. If `recv` is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, it is guaranteed that no messages were received on this /// socket. /// /// [`connect`]: method@Self::connect /// /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Bind socket /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; /// socket.connect("127.0.0.1:8081").await?; /// /// let mut buf = vec![0; 10]; /// let n = socket.recv(&mut buf).await?; /// /// println!("received {} bytes {:?}", n, &buf[..n]); /// /// Ok(()) /// } /// ``` pub async fn recv(&self, buf: &mut [u8]) -> io::Result<usize> { self.io .registration() .async_io(Interest::READABLE, || self.io.recv(buf)) .await } /// Attempts to receive a single datagram message on the socket from the remote /// address to which it is `connect`ed. /// /// The [`connect`] method will connect this socket to a remote address. This method /// resolves to an error if the socket is not connected. /// /// Note that on multiple calls to a `poll_*` method in the `recv` direction, only the /// `Waker` from the `Context` passed to the most recent call will be scheduled to /// receive a wakeup. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the socket is not ready to read /// * `Poll::Ready(Ok(()))` reads data `ReadBuf` if the socket is ready /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`connect`]: method@Self::connect pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> { #[allow(clippy::blocks_in_conditions)] let n = ready!(self.io.registration().poll_read_io(cx, || { // Safety: will not read the maybe uninitialized bytes. let b = unsafe { &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; self.io.recv(b) }))?; // Safety: We trust `recv` to have filled up `n` bytes in the buffer. unsafe { buf.assume_init(n); } buf.advance(n); Poll::Ready(Ok(())) } /// Tries to receive a single datagram message on the socket from the remote /// address to which it is connected. On success, returns the number of /// bytes read. /// /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is /// returned. This function is usually paired with `readable()`. /// /// # Examples /// /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Connect to a peer /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; /// socket.connect("127.0.0.1:8081").await?; /// /// loop { /// // Wait for the socket to be readable /// socket.readable().await?; /// /// // The buffer is **not** included in the async task and will /// // only exist on the stack. /// let mut buf = [0; 1024]; /// /// // Try to recv data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_recv(&mut buf) { /// Ok(n) => { /// println!("GOT {:?}", &buf[..n]); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_recv(&self, buf: &mut [u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::READABLE, || self.io.recv(buf)) } cfg_io_util! { /// Tries to receive data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// /// This method can be used even if `buf` is uninitialized. /// /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is /// returned. This function is usually paired with `readable()`. /// /// # Examples /// /// ```no_run /// use tokio::net::UdpSocket; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Connect to a peer
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/mod.rs
tokio/src/net/mod.rs
#![cfg(not(loom))] //! TCP/UDP/Unix bindings for `tokio`. //! //! This module contains the TCP/UDP/Unix networking types, similar to the standard //! library, which can be used to implement networking protocols. //! //! # Organization //! //! * [`TcpListener`] and [`TcpStream`] provide functionality for communication over TCP //! * [`UdpSocket`] provides functionality for communication over UDP //! * [`UnixListener`] and [`UnixStream`] provide functionality for communication over a //! Unix Domain Stream Socket **(available on Unix only)** //! * [`UnixDatagram`] provides functionality for communication //! over Unix Domain Datagram Socket **(available on Unix only)** //! * [`tokio::net::unix::pipe`] for FIFO pipes **(available on Unix only)** //! * [`tokio::net::windows::named_pipe`] for Named Pipes **(available on Windows only)** //! //! For IO resources not available in `tokio::net`, you can use [`AsyncFd`]. //! //! [`TcpListener`]: TcpListener //! [`TcpStream`]: TcpStream //! [`UdpSocket`]: UdpSocket //! [`UnixListener`]: UnixListener //! [`UnixStream`]: UnixStream //! [`UnixDatagram`]: UnixDatagram //! [`tokio::net::unix::pipe`]: unix::pipe //! [`tokio::net::windows::named_pipe`]: windows::named_pipe //! [`AsyncFd`]: crate::io::unix::AsyncFd mod addr; cfg_not_wasi! { #[cfg(feature = "net")] pub(crate) use addr::to_socket_addrs; } pub use addr::ToSocketAddrs; cfg_net! { mod lookup_host; pub use lookup_host::lookup_host; pub mod tcp; pub use tcp::listener::TcpListener; pub use tcp::stream::TcpStream; cfg_not_wasi! { pub use tcp::socket::TcpSocket; mod udp; #[doc(inline)] pub use udp::UdpSocket; } } cfg_net_unix! { pub mod unix; pub use unix::datagram::socket::UnixDatagram; pub use unix::listener::UnixListener; pub use unix::stream::UnixStream; pub use unix::socket::UnixSocket; } cfg_net_windows! { pub mod windows; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/addr.rs
tokio/src/net/addr.rs
use std::future; use std::io; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; /// Converts or resolves without blocking to one or more `SocketAddr` values. /// /// # DNS /// /// Implementations of `ToSocketAddrs` for string types require a DNS lookup. /// /// # Calling /// /// Currently, this trait is only used as an argument to Tokio functions that /// need to reference a target socket address. To perform a `SocketAddr` /// conversion directly, use [`lookup_host()`](super::lookup_host()). /// /// This trait is sealed and is intended to be opaque. The details of the trait /// will change. Stabilization is pending enhancements to the Rust language. pub trait ToSocketAddrs: sealed::ToSocketAddrsPriv {} type ReadyFuture<T> = future::Ready<io::Result<T>>; cfg_net! { pub(crate) fn to_socket_addrs<T>(arg: T) -> T::Future where T: ToSocketAddrs, { arg.to_socket_addrs(sealed::Internal) } } // ===== impl &impl ToSocketAddrs ===== impl<T: ToSocketAddrs + ?Sized> ToSocketAddrs for &T {} impl<T> sealed::ToSocketAddrsPriv for &T where T: sealed::ToSocketAddrsPriv + ?Sized, { type Iter = T::Iter; type Future = T::Future; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { (**self).to_socket_addrs(sealed::Internal) } } // ===== impl SocketAddr ===== impl ToSocketAddrs for SocketAddr {} impl sealed::ToSocketAddrsPriv for SocketAddr { type Iter = std::option::IntoIter<SocketAddr>; type Future = ReadyFuture<Self::Iter>; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { let iter = Some(*self).into_iter(); future::ready(Ok(iter)) } } // ===== impl SocketAddrV4 ===== impl ToSocketAddrs for SocketAddrV4 {} impl sealed::ToSocketAddrsPriv for SocketAddrV4 { type Iter = std::option::IntoIter<SocketAddr>; type Future = ReadyFuture<Self::Iter>; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { SocketAddr::V4(*self).to_socket_addrs(sealed::Internal) } } // ===== impl SocketAddrV6 ===== impl ToSocketAddrs for SocketAddrV6 {} impl sealed::ToSocketAddrsPriv for SocketAddrV6 { type Iter = std::option::IntoIter<SocketAddr>; type Future = ReadyFuture<Self::Iter>; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { SocketAddr::V6(*self).to_socket_addrs(sealed::Internal) } } // ===== impl (IpAddr, u16) ===== impl ToSocketAddrs for (IpAddr, u16) {} impl sealed::ToSocketAddrsPriv for (IpAddr, u16) { type Iter = std::option::IntoIter<SocketAddr>; type Future = ReadyFuture<Self::Iter>; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { let iter = Some(SocketAddr::from(*self)).into_iter(); future::ready(Ok(iter)) } } // ===== impl (Ipv4Addr, u16) ===== impl ToSocketAddrs for (Ipv4Addr, u16) {} impl sealed::ToSocketAddrsPriv for (Ipv4Addr, u16) { type Iter = std::option::IntoIter<SocketAddr>; type Future = ReadyFuture<Self::Iter>; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { let (ip, port) = *self; SocketAddrV4::new(ip, port).to_socket_addrs(sealed::Internal) } } // ===== impl (Ipv6Addr, u16) ===== impl ToSocketAddrs for (Ipv6Addr, u16) {} impl sealed::ToSocketAddrsPriv for (Ipv6Addr, u16) { type Iter = std::option::IntoIter<SocketAddr>; type Future = ReadyFuture<Self::Iter>; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { let (ip, port) = *self; SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs(sealed::Internal) } } // ===== impl &[SocketAddr] ===== impl ToSocketAddrs for &[SocketAddr] {} impl sealed::ToSocketAddrsPriv for &[SocketAddr] { type Iter = std::vec::IntoIter<SocketAddr>; type Future = ReadyFuture<Self::Iter>; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { #[inline] fn slice_to_vec(addrs: &[SocketAddr]) -> Vec<SocketAddr> { addrs.to_vec() } // This uses a helper method because clippy doesn't like the `to_vec()` // call here (it will allocate, whereas `self.iter().copied()` would // not), but it's actually necessary in order to ensure that the // returned iterator is valid for the `'static` lifetime, which the // borrowed `slice::Iter` iterator would not be. // // Note that we can't actually add an `allow` attribute for // `clippy::unnecessary_to_owned` here, as Tokio's CI runs clippy lints // on Rust 1.52 to avoid breaking LTS releases of Tokio. Users of newer // Rust versions who see this lint should just ignore it. let iter = slice_to_vec(self).into_iter(); future::ready(Ok(iter)) } } cfg_net! { // ===== impl str ===== impl ToSocketAddrs for str {} impl sealed::ToSocketAddrsPriv for str { type Iter = sealed::OneOrMore; type Future = sealed::MaybeReady; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { use crate::blocking::spawn_blocking; use sealed::MaybeReady; // First check if the input parses as a socket address let res: Result<SocketAddr, _> = self.parse(); if let Ok(addr) = res { return MaybeReady(sealed::State::Ready(Some(addr))); } // Run DNS lookup on the blocking pool let s = self.to_owned(); MaybeReady(sealed::State::Blocking(spawn_blocking(move || { std::net::ToSocketAddrs::to_socket_addrs(&s) }))) } } // ===== impl (&str, u16) ===== impl ToSocketAddrs for (&str, u16) {} impl sealed::ToSocketAddrsPriv for (&str, u16) { type Iter = sealed::OneOrMore; type Future = sealed::MaybeReady; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { use crate::blocking::spawn_blocking; use sealed::MaybeReady; let (host, port) = *self; // try to parse the host as a regular IP address first if let Ok(addr) = host.parse::<Ipv4Addr>() { let addr = SocketAddrV4::new(addr, port); let addr = SocketAddr::V4(addr); return MaybeReady(sealed::State::Ready(Some(addr))); } if let Ok(addr) = host.parse::<Ipv6Addr>() { let addr = SocketAddrV6::new(addr, port, 0, 0); let addr = SocketAddr::V6(addr); return MaybeReady(sealed::State::Ready(Some(addr))); } let host = host.to_owned(); MaybeReady(sealed::State::Blocking(spawn_blocking(move || { std::net::ToSocketAddrs::to_socket_addrs(&(&host[..], port)) }))) } } // ===== impl (String, u16) ===== impl ToSocketAddrs for (String, u16) {} impl sealed::ToSocketAddrsPriv for (String, u16) { type Iter = sealed::OneOrMore; type Future = sealed::MaybeReady; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { (self.0.as_str(), self.1).to_socket_addrs(sealed::Internal) } } // ===== impl String ===== impl ToSocketAddrs for String {} impl sealed::ToSocketAddrsPriv for String { type Iter = <str as sealed::ToSocketAddrsPriv>::Iter; type Future = <str as sealed::ToSocketAddrsPriv>::Future; fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { self[..].to_socket_addrs(sealed::Internal) } } } pub(crate) mod sealed { //! The contents of this trait are intended to remain private and __not__ //! part of the `ToSocketAddrs` public API. The details will change over //! time. use std::future::Future; use std::io; use std::net::SocketAddr; #[doc(hidden)] pub trait ToSocketAddrsPriv { type Iter: Iterator<Item = SocketAddr> + Send + 'static; type Future: Future<Output = io::Result<Self::Iter>> + Send + 'static; fn to_socket_addrs(&self, internal: Internal) -> Self::Future; } #[allow(missing_debug_implementations)] pub struct Internal; cfg_net! { use crate::blocking::JoinHandle; use std::option; use std::pin::Pin; use std::task::{ready,Context, Poll}; use std::vec; #[doc(hidden)] #[derive(Debug)] pub struct MaybeReady(pub(super) State); #[derive(Debug)] pub(super) enum State { Ready(Option<SocketAddr>), Blocking(JoinHandle<io::Result<vec::IntoIter<SocketAddr>>>), } #[doc(hidden)] #[derive(Debug)] pub enum OneOrMore { One(option::IntoIter<SocketAddr>), More(vec::IntoIter<SocketAddr>), } impl Future for MaybeReady { type Output = io::Result<OneOrMore>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { match self.0 { State::Ready(ref mut i) => { let iter = OneOrMore::One(i.take().into_iter()); Poll::Ready(Ok(iter)) } State::Blocking(ref mut rx) => { let res = ready!(Pin::new(rx).poll(cx))?.map(OneOrMore::More); Poll::Ready(res) } } } } impl Iterator for OneOrMore { type Item = SocketAddr; fn next(&mut self) -> Option<Self::Item> { match self { OneOrMore::One(i) => i.next(), OneOrMore::More(i) => i.next(), } } fn size_hint(&self) -> (usize, Option<usize>) { match self { OneOrMore::One(i) => i.size_hint(), OneOrMore::More(i) => i.size_hint(), } } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/tcp/stream.rs
tokio/src/net/tcp/stream.rs
cfg_not_wasi! { use crate::net::{to_socket_addrs, ToSocketAddrs}; use std::future::poll_fn; use std::time::Duration; } use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready}; use crate::net::tcp::split::{split, ReadHalf, WriteHalf}; use crate::net::tcp::split_owned::{split_owned, OwnedReadHalf, OwnedWriteHalf}; use crate::util::check_socket_for_blocking; use std::fmt; use std::io; use std::net::{Shutdown, SocketAddr}; use std::pin::Pin; use std::task::{ready, Context, Poll}; cfg_io_util! { use bytes::BufMut; } cfg_net! { /// A TCP stream between a local and a remote socket. /// /// A TCP stream can either be created by connecting to an endpoint, via the /// [`connect`] method, or by [accepting] a connection from a [listener]. A /// TCP stream can also be created via the [`TcpSocket`] type. /// /// Reading and writing to a `TcpStream` is usually done using the /// convenience methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] /// traits. /// /// [`connect`]: method@TcpStream::connect /// [accepting]: method@crate::net::TcpListener::accept /// [listener]: struct@crate::net::TcpListener /// [`TcpSocket`]: struct@crate::net::TcpSocket /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use tokio::io::AsyncWriteExt; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// // Write some data. /// stream.write_all(b"hello world!").await?; /// /// Ok(()) /// } /// ``` /// /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. /// /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt /// /// To shut down the stream in the write direction, you can call the /// [`shutdown()`] method. This will cause the other peer to receive a read of /// length 0, indicating that no more data will be sent. This only closes /// the stream in one direction. /// /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown pub struct TcpStream { io: PollEvented<mio::net::TcpStream>, } } impl TcpStream { cfg_not_wasi! { /// Opens a TCP connection to a remote host. /// /// `addr` is an address of the remote host. Anything which implements the /// [`ToSocketAddrs`] trait can be supplied as the address. If `addr` /// yields multiple addresses, connect will be attempted with each of the /// addresses until a connection is successful. If none of the addresses /// result in a successful connection, the error returned from the last /// connection attempt (the last address) is returned. /// /// To configure the socket before connecting, you can use the [`TcpSocket`] /// type. /// /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs /// [`TcpSocket`]: struct@crate::net::TcpSocket /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use tokio::io::AsyncWriteExt; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// // Write some data. /// stream.write_all(b"hello world!").await?; /// /// Ok(()) /// } /// ``` /// /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. /// /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt pub async fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> { let addrs = to_socket_addrs(addr).await?; let mut last_err = None; for addr in addrs { match TcpStream::connect_addr(addr).await { Ok(stream) => return Ok(stream), Err(e) => last_err = Some(e), } } Err(last_err.unwrap_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "could not resolve to any address", ) })) } /// Establishes a connection to the specified `addr`. async fn connect_addr(addr: SocketAddr) -> io::Result<TcpStream> { let sys = mio::net::TcpStream::connect(addr)?; TcpStream::connect_mio(sys).await } pub(crate) async fn connect_mio(sys: mio::net::TcpStream) -> io::Result<TcpStream> { let stream = TcpStream::new(sys)?; // Once we've connected, wait for the stream to be writable as // that's when the actual connection has been initiated. Once we're // writable we check for `take_socket_error` to see if the connect // actually hit an error or not. // // If all that succeeded then we ship everything on up. poll_fn(|cx| stream.io.registration().poll_write_ready(cx)).await?; if let Some(e) = stream.io.take_error()? { return Err(e); } Ok(stream) } } pub(crate) fn new(connected: mio::net::TcpStream) -> io::Result<TcpStream> { let io = PollEvented::new(connected)?; Ok(TcpStream { io }) } /// Creates new `TcpStream` from a `std::net::TcpStream`. /// /// This function is intended to be used to wrap a TCP stream from the /// standard library in the Tokio equivalent. /// /// # Notes /// /// The caller is responsible for ensuring that the stream is in /// non-blocking mode. Otherwise all I/O operations on the stream /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. /// /// Passing a listener in blocking mode is always erroneous, /// and the behavior in that case may change in the future. /// For example, it could panic. /// /// [`set_nonblocking`]: std::net::TcpStream::set_nonblocking /// /// # Examples /// /// ```rust,no_run /// use std::error::Error; /// use tokio::net::TcpStream; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let std_stream = std::net::TcpStream::connect("127.0.0.1:34254")?; /// std_stream.set_nonblocking(true)?; /// let stream = TcpStream::from_std(std_stream)?; /// Ok(()) /// } /// ``` /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[track_caller] pub fn from_std(stream: std::net::TcpStream) -> io::Result<TcpStream> { check_socket_for_blocking(&stream)?; let io = mio::net::TcpStream::from_std(stream); let io = PollEvented::new(io)?; Ok(TcpStream { io }) } /// Turns a [`tokio::net::TcpStream`] into a [`std::net::TcpStream`]. /// /// The returned [`std::net::TcpStream`] will have nonblocking mode set as `true`. /// Use [`set_nonblocking`] to change the blocking mode if needed. /// /// # Examples /// /// ``` /// use std::error::Error; /// use std::io::Read; /// use tokio::net::TcpListener; /// # use tokio::net::TcpStream; /// # use tokio::io::AsyncWriteExt; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// let mut data = [0u8; 12]; /// # if false { /// let listener = TcpListener::bind("127.0.0.1:34254").await?; /// # } /// # let listener = TcpListener::bind("127.0.0.1:0").await?; /// # let addr = listener.local_addr().unwrap(); /// # let handle = tokio::spawn(async move { /// # let mut stream: TcpStream = TcpStream::connect(addr).await.unwrap(); /// # stream.write_all(b"Hello world!").await.unwrap(); /// # }); /// let (tokio_tcp_stream, _) = listener.accept().await?; /// let mut std_tcp_stream = tokio_tcp_stream.into_std()?; /// # handle.await.expect("The task being joined has panicked"); /// std_tcp_stream.set_nonblocking(false)?; /// std_tcp_stream.read_exact(&mut data)?; /// # assert_eq!(b"Hello world!", &data); /// Ok(()) /// } /// ``` /// [`tokio::net::TcpStream`]: TcpStream /// [`std::net::TcpStream`]: std::net::TcpStream /// [`set_nonblocking`]: fn@std::net::TcpStream::set_nonblocking pub fn into_std(self) -> io::Result<std::net::TcpStream> { #[cfg(unix)] { use std::os::unix::io::{FromRawFd, IntoRawFd}; self.io .into_inner() .map(IntoRawFd::into_raw_fd) .map(|raw_fd| unsafe { std::net::TcpStream::from_raw_fd(raw_fd) }) } #[cfg(windows)] { use std::os::windows::io::{FromRawSocket, IntoRawSocket}; self.io .into_inner() .map(|io| io.into_raw_socket()) .map(|raw_socket| unsafe { std::net::TcpStream::from_raw_socket(raw_socket) }) } #[cfg(target_os = "wasi")] { use std::os::wasi::io::{FromRawFd, IntoRawFd}; self.io .into_inner() .map(|io| io.into_raw_fd()) .map(|raw_fd| unsafe { std::net::TcpStream::from_raw_fd(raw_fd) }) } } /// Returns the local address that this stream is bound to. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// println!("{:?}", stream.local_addr()?); /// # Ok(()) /// # } /// ``` pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.local_addr() } /// Returns the value of the `SO_ERROR` option. pub fn take_error(&self) -> io::Result<Option<io::Error>> { self.io.take_error() } /// Returns the remote address that this stream is connected to. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// println!("{:?}", stream.peer_addr()?); /// # Ok(()) /// # } /// ``` pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.io.peer_addr() } /// Attempts to receive data on the socket, without removing that data from /// the queue, registering the current task for wakeup if data is not yet /// available. /// /// Note that on multiple calls to `poll_peek`, `poll_read` or /// `poll_read_ready`, only the `Waker` from the `Context` passed to the /// most recent call is scheduled to receive a wakeup. (However, /// `poll_write` retains a second, independent waker.) /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if data is not yet available. /// * `Poll::Ready(Ok(n))` if data is available. `n` is the number of bytes peeked. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// # Examples /// /// ```no_run /// use tokio::io::{self, ReadBuf}; /// use tokio::net::TcpStream; /// /// use std::future::poll_fn; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let stream = TcpStream::connect("127.0.0.1:8000").await?; /// let mut buf = [0; 10]; /// let mut buf = ReadBuf::new(&mut buf); /// /// poll_fn(|cx| { /// stream.poll_peek(cx, &mut buf) /// }).await?; /// /// Ok(()) /// } /// ``` pub fn poll_peek( &self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<usize>> { loop { let ev = ready!(self.io.registration().poll_read_ready(cx))?; let b = unsafe { &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; match self.io.peek(b) { Ok(ret) => { unsafe { buf.assume_init(ret) }; buf.advance(ret); return Poll::Ready(Ok(ret)); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.io.registration().clear_readiness(ev); } Err(e) => return Poll::Ready(Err(e)), } } } /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_read()` or `try_write()`. It /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// Concurrently read and write to the stream on the same task without /// splitting. /// /// ```no_run /// use tokio::io::Interest; /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// let ready = stream.ready(Interest::READABLE | Interest::WRITABLE).await?; /// /// if ready.is_readable() { /// let mut data = vec![0; 1024]; /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut data) { /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// /// } /// /// if ready.is_writable() { /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// println!("write {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// } /// } /// ``` pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { let event = self.io.registration().readiness(interest).await?; Ok(event.ready) } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// let mut msg = vec![0; 1024]; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut msg) { /// Ok(n) => { /// msg.truncate(n); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// println!("GOT = {:?}", msg); /// Ok(()) /// } /// ``` pub async fn readable(&self) -> io::Result<()> { self.ready(Interest::READABLE).await?; Ok(()) } /// Polls for read readiness. /// /// If the tcp stream is not currently ready for reading, this method will /// store a clone of the `Waker` from the provided `Context`. When the tcp /// stream becomes ready for reading, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_read_ready`, `poll_read` or /// `poll_peek`, only the `Waker` from the `Context` passed to the most /// recent call is scheduled to receive a wakeup. (However, /// `poll_write_ready` retains a second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the tcp stream is not ready for reading. /// * `Poll::Ready(Ok(()))` if the tcp stream is ready for reading. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`readable`]: method@Self::readable pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_read_ready(cx).map_ok(|_| ()) } /// Tries to read data from the stream into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: TcpStream::readable() /// [`ready()`]: TcpStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. The stream's read half is closed and will no longer yield data. /// 2. The specified buffer was 0 bytes in length. /// /// If the stream is not ready to read data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Creating the buffer **after** the `await` prevents it from /// // being stored in the async task. /// let mut buf = [0; 4096]; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { use std::io::Read; self.io .registration() .try_io(Interest::READABLE, || (&*self.io).read(buf)) } /// Tries to read data from the stream into the provided buffers, returning /// how many bytes were read. /// /// Data is copied to fill each buffer in order, with the final buffer /// written to possibly being only partially filled. This method behaves /// equivalently to a single call to [`try_read()`] with concatenated /// buffers. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_vectored()` is non-blocking, the buffer does not have to be /// stored by the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`try_read()`]: TcpStream::try_read() /// [`readable()`]: TcpStream::readable() /// [`ready()`]: TcpStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io::{self, IoSliceMut}; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Creating the buffer **after** the `await` prevents it from /// // being stored in the async task. /// let mut buf_a = [0; 512]; /// let mut buf_b = [0; 1024]; /// let mut bufs = [ /// IoSliceMut::new(&mut buf_a), /// IoSliceMut::new(&mut buf_b), /// ]; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read_vectored(&mut bufs) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> { use std::io::Read; self.io .registration() .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) } cfg_io_util! { /// Tries to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: TcpStream::readable() /// [`ready()`]: TcpStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// let mut buf = Vec::with_capacity(4096); /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read_buf(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.io.registration().try_io(Interest::READABLE, || { use std::io::Read; let dst = buf.chunk_mut(); let dst = unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; // Safety: We trust `TcpStream::read` to have filled up `n` bytes in the // buffer. let n = (&*self.io).read(dst)?; unsafe { buf.advance_mut(n); } Ok(n) }) } } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be writable /// stream.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn writable(&self) -> io::Result<()> { self.ready(Interest::WRITABLE).await?; Ok(()) } /// Polls for write readiness. /// /// If the tcp stream is not currently ready for writing, this method will /// store a clone of the `Waker` from the provided `Context`. When the tcp /// stream becomes ready for writing, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the tcp stream is not ready for writing. /// * `Poll::Ready(Ok(()))` if the tcp stream is ready for writing. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`writable`]: method@Self::writable pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_write_ready(cx).map_ok(|_| ()) } /// Try to write a buffer to the stream, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be writable /// stream.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") {
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/tcp/split_owned.rs
tokio/src/net/tcp/split_owned.rs
//! `TcpStream` owned split support. //! //! A `TcpStream` can be split into an `OwnedReadHalf` and a `OwnedWriteHalf` //! with the `TcpStream::into_split` method. `OwnedReadHalf` implements //! `AsyncRead` while `OwnedWriteHalf` implements `AsyncWrite`. //! //! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized //! split has no associated overhead and enforces all invariants at the type //! level. use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; use crate::net::TcpStream; use std::error::Error; use std::future::poll_fn; use std::net::{Shutdown, SocketAddr}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::{fmt, io}; cfg_io_util! { use bytes::BufMut; } /// Owned read half of a [`TcpStream`], created by [`into_split`]. /// /// Reading from an `OwnedReadHalf` is usually done using the convenience methods found /// on the [`AsyncReadExt`] trait. /// /// [`TcpStream`]: TcpStream /// [`into_split`]: TcpStream::into_split() /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt #[derive(Debug)] pub struct OwnedReadHalf { inner: Arc<TcpStream>, } /// Owned write half of a [`TcpStream`], created by [`into_split`]. /// /// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will /// shut down the TCP stream in the write direction. Dropping the write half /// will also shut down the write half of the TCP stream. /// /// Writing to an `OwnedWriteHalf` is usually done using the convenience methods found /// on the [`AsyncWriteExt`] trait. /// /// [`TcpStream`]: TcpStream /// [`into_split`]: TcpStream::into_split() /// [`AsyncWrite`]: trait@crate::io::AsyncWrite /// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt #[derive(Debug)] pub struct OwnedWriteHalf { inner: Arc<TcpStream>, shutdown_on_drop: bool, } pub(crate) fn split_owned(stream: TcpStream) -> (OwnedReadHalf, OwnedWriteHalf) { let arc = Arc::new(stream); let read = OwnedReadHalf { inner: Arc::clone(&arc), }; let write = OwnedWriteHalf { inner: arc, shutdown_on_drop: true, }; (read, write) } pub(crate) fn reunite( read: OwnedReadHalf, write: OwnedWriteHalf, ) -> Result<TcpStream, ReuniteError> { if Arc::ptr_eq(&read.inner, &write.inner) { write.forget(); // This unwrap cannot fail as the api does not allow creating more than two Arcs, // and we just dropped the other half. Ok(Arc::try_unwrap(read.inner).expect("TcpStream: try_unwrap failed in reunite")) } else { Err(ReuniteError(read, write)) } } /// Error indicating that two halves were not from the same socket, and thus could /// not be reunited. #[derive(Debug)] pub struct ReuniteError(pub OwnedReadHalf, pub OwnedWriteHalf); impl fmt::Display for ReuniteError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "tried to reunite halves that are not from the same socket" ) } } impl Error for ReuniteError {} impl OwnedReadHalf { /// Attempts to put the two halves of a `TcpStream` back together and /// recover the original socket. Succeeds only if the two halves /// originated from the same call to [`into_split`]. /// /// [`into_split`]: TcpStream::into_split() pub fn reunite(self, other: OwnedWriteHalf) -> Result<TcpStream, ReuniteError> { reunite(self, other) } /// Attempt to receive data on the socket, without removing that data from /// the queue, registering the current task for wakeup if data is not yet /// available. /// /// Note that on multiple calls to `poll_peek` or `poll_read`, only the /// `Waker` from the `Context` passed to the most recent call is scheduled /// to receive a wakeup. /// /// See the [`TcpStream::poll_peek`] level documentation for more details. /// /// # Examples /// /// ```no_run /// use tokio::io::{self, ReadBuf}; /// use tokio::net::TcpStream; /// /// use std::future::poll_fn; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let stream = TcpStream::connect("127.0.0.1:8000").await?; /// let (mut read_half, _) = stream.into_split(); /// let mut buf = [0; 10]; /// let mut buf = ReadBuf::new(&mut buf); /// /// poll_fn(|cx| { /// read_half.poll_peek(cx, &mut buf) /// }).await?; /// /// Ok(()) /// } /// ``` /// /// [`TcpStream::poll_peek`]: TcpStream::poll_peek pub fn poll_peek( &mut self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<usize>> { self.inner.poll_peek(cx, buf) } /// Receives data on the socket from the remote address to which it is /// connected, without removing that data from the queue. On success, /// returns the number of bytes peeked. /// /// See the [`TcpStream::peek`] level documentation for more details. /// /// [`TcpStream::peek`]: TcpStream::peek /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use tokio::io::AsyncReadExt; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// let (mut read_half, _) = stream.into_split(); /// /// let mut b1 = [0; 10]; /// let mut b2 = [0; 10]; /// /// // Peek at the data /// let n = read_half.peek(&mut b1).await?; /// /// // Read the data /// assert_eq!(n, read_half.read(&mut b2[..n]).await?); /// assert_eq!(&b1[..n], &b2[..n]); /// /// Ok(()) /// } /// ``` /// /// The [`read`] method is defined on the [`AsyncReadExt`] trait. /// /// [`read`]: fn@crate::io::AsyncReadExt::read /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> { let mut buf = ReadBuf::new(buf); poll_fn(|cx| self.poll_peek(cx, &mut buf)).await } /// Waits for any of the requested ready states. /// /// This function is usually paired with [`try_read()`]. It can be used instead /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] /// and [`Ready::READ_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`TcpStream::ready`]. /// /// [`try_read()`]: Self::try_read /// [`readable()`]: Self::readable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.inner.ready(interest).await } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// This function is also equivalent to [`TcpStream::ready`]. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn readable(&self) -> io::Result<()> { self.inner.readable().await } /// Tries to read data from the stream into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. The stream's read half is closed and will no longer yield data. /// 2. The specified buffer was 0 bytes in length. /// /// If the stream is not ready to read data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { self.inner.try_read(buf) } /// Tries to read data from the stream into the provided buffers, returning /// how many bytes were read. /// /// Data is copied to fill each buffer in order, with the final buffer /// written to possibly being only partially filled. This method behaves /// equivalently to a single call to [`try_read()`] with concatenated /// buffers. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_vectored()` is non-blocking, the buffer does not have to be /// stored by the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`try_read()`]: Self::try_read() /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> { self.inner.try_read_vectored(bufs) } cfg_io_util! { /// Tries to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.inner.try_read_buf(buf) } } /// Returns the remote address that this stream is connected to. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.inner.peer_addr() } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.inner.local_addr() } } impl AsyncRead for OwnedReadHalf { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.inner.poll_read_priv(cx, buf) } } impl OwnedWriteHalf { /// Attempts to put the two halves of a `TcpStream` back together and /// recover the original socket. Succeeds only if the two halves /// originated from the same call to [`into_split`]. /// /// [`into_split`]: TcpStream::into_split() pub fn reunite(self, other: OwnedReadHalf) -> Result<TcpStream, ReuniteError> { reunite(other, self) } /// Destroys the write half, but don't close the write half of the stream /// until the read half is dropped. If the read half has already been /// dropped, this closes the stream. pub fn forget(mut self) { self.shutdown_on_drop = false; drop(self); } /// Waits for any of the requested ready states. /// /// This function is usually paired with [`try_write()`]. It can be used instead /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] /// and [`Ready::WRITE_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`TcpStream::ready`]. /// /// [`try_write()`]: Self::try_write /// [`writable()`]: Self::writable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.inner.ready(interest).await } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn writable(&self) -> io::Result<()> { self.inner.writable().await } /// Tries to write a buffer to the stream, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { self.inner.try_write(buf) } /// Tries to write several buffers to the stream, returning how many bytes /// were written. /// /// Data is written from each buffer in order, with the final buffer read /// from possible being only partially consumed. This method behaves /// equivalently to a single call to [`try_write()`] with concatenated /// buffers. /// /// This function is usually paired with `writable()`. /// /// [`try_write()`]: Self::try_write() /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write_vectored(&self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> { self.inner.try_write_vectored(bufs) } /// Returns the remote address that this stream is connected to. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.inner.peer_addr() } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.inner.local_addr() } } impl Drop for OwnedWriteHalf { fn drop(&mut self) { if self.shutdown_on_drop { let _ = self.inner.shutdown_std(Shutdown::Write); } } } impl AsyncWrite for OwnedWriteHalf { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.inner.poll_write_priv(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.inner.poll_write_vectored_priv(cx, bufs) } fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } #[inline] fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { // tcp flush is a no-op Poll::Ready(Ok(())) } // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { let res = self.inner.shutdown_std(Shutdown::Write); if res.is_ok() { Pin::into_inner(self).shutdown_on_drop = false; } res.into() } } impl AsRef<TcpStream> for OwnedReadHalf { fn as_ref(&self) -> &TcpStream { &self.inner } } impl AsRef<TcpStream> for OwnedWriteHalf { fn as_ref(&self) -> &TcpStream { &self.inner } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/tcp/listener.rs
tokio/src/net/tcp/listener.rs
use crate::io::{Interest, PollEvented}; use crate::net::tcp::TcpStream; use crate::util::check_socket_for_blocking; cfg_not_wasi! { use crate::net::{to_socket_addrs, ToSocketAddrs}; } use std::fmt; use std::io; use std::net::{self, SocketAddr}; use std::task::{ready, Context, Poll}; cfg_net! { /// A TCP socket server, listening for connections. /// /// You can accept a new connection by using the [`accept`](`TcpListener::accept`) /// method. /// /// A `TcpListener` can be turned into a `Stream` with [`TcpListenerStream`]. /// /// The socket will be closed when the value is dropped. /// /// [`TcpListenerStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.TcpListenerStream.html /// /// # Errors /// /// Note that accepting a connection can lead to various errors and not all /// of them are necessarily fatal ‒ for example having too many open file /// descriptors or the other side closing the connection while it waits in /// an accept queue. These would terminate the stream if not handled in any /// way. /// /// # Examples /// /// Using `accept`: /// ```no_run /// use tokio::net::TcpListener; /// /// use std::io; /// /// async fn process_socket<T>(socket: T) { /// # drop(socket); /// // do work with socket here /// } /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let listener = TcpListener::bind("127.0.0.1:8080").await?; /// /// loop { /// let (socket, _) = listener.accept().await?; /// process_socket(socket).await; /// } /// } /// ``` pub struct TcpListener { io: PollEvented<mio::net::TcpListener>, } } impl TcpListener { cfg_not_wasi! { /// Creates a new `TcpListener`, which will be bound to the specified address. /// /// The returned listener is ready for accepting connections. /// /// Binding with a port number of 0 will request that the OS assigns a port /// to this listener. The port allocated can be queried via the `local_addr` /// method. /// /// The address type can be any implementor of the [`ToSocketAddrs`] trait. /// If `addr` yields multiple addresses, bind will be attempted with each of /// the addresses until one succeeds and returns the listener. If none of /// the addresses succeed in creating a listener, the error returned from /// the last attempt (the last address) is returned. /// /// This function sets the `SO_REUSEADDR` option on the socket on Unix. /// /// To configure the socket before binding, you can use the [`TcpSocket`] /// type. /// /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs /// [`TcpSocket`]: struct@crate::net::TcpSocket /// /// # Examples /// /// ```no_run /// use tokio::net::TcpListener; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// let listener = TcpListener::bind("127.0.0.1:2345").await?; /// /// // use the listener /// /// # let _ = listener; /// Ok(()) /// } /// ``` pub async fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> { let addrs = to_socket_addrs(addr).await?; let mut last_err = None; for addr in addrs { match TcpListener::bind_addr(addr) { Ok(listener) => return Ok(listener), Err(e) => last_err = Some(e), } } Err(last_err.unwrap_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "could not resolve to any address", ) })) } fn bind_addr(addr: SocketAddr) -> io::Result<TcpListener> { let listener = mio::net::TcpListener::bind(addr)?; TcpListener::new(listener) } } /// Accepts a new incoming connection from this listener. /// /// This function will yield once a new TCP connection is established. When /// established, the corresponding [`TcpStream`] and the remote peer's /// address will be returned. /// /// # Cancel safety /// /// This method is cancel safe. If the method is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no new connections were /// accepted by this method. /// /// [`TcpStream`]: struct@crate::net::TcpStream /// /// # Examples /// /// ```no_run /// use tokio::net::TcpListener; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let listener = TcpListener::bind("127.0.0.1:8080").await?; /// /// match listener.accept().await { /// Ok((_socket, addr)) => println!("new client: {:?}", addr), /// Err(e) => println!("couldn't get client: {:?}", e), /// } /// /// Ok(()) /// } /// ``` pub async fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { let (mio, addr) = self .io .registration() .async_io(Interest::READABLE, || self.io.accept()) .await?; let stream = TcpStream::new(mio)?; Ok((stream, addr)) } /// Polls to accept a new incoming connection to this listener. /// /// If there is no connection to accept, `Poll::Pending` is returned and the /// current task will be notified by a waker. Note that on multiple calls /// to `poll_accept`, only the `Waker` from the `Context` passed to the most /// recent call is scheduled to receive a wakeup. pub fn poll_accept(&self, cx: &mut Context<'_>) -> Poll<io::Result<(TcpStream, SocketAddr)>> { loop { let ev = ready!(self.io.registration().poll_read_ready(cx))?; match self.io.accept() { Ok((io, addr)) => { let io = TcpStream::new(io)?; return Poll::Ready(Ok((io, addr))); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.io.registration().clear_readiness(ev); } Err(e) => return Poll::Ready(Err(e)), } } } /// Creates new `TcpListener` from a `std::net::TcpListener`. /// /// This function is intended to be used to wrap a TCP listener from the /// standard library in the Tokio equivalent. /// /// This API is typically paired with the `socket2` crate and the `Socket` /// type to build up and customize a listener before it's shipped off to the /// backing event loop. This allows configuration of options like /// `SO_REUSEPORT`, binding to multiple addresses, etc. /// /// # Notes /// /// The caller is responsible for ensuring that the listener is in /// non-blocking mode. Otherwise all I/O operations on the listener /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. /// /// Passing a listener in blocking mode is always erroneous, /// and the behavior in that case may change in the future. /// For example, it could panic. /// /// [`set_nonblocking`]: std::net::TcpListener::set_nonblocking /// /// # Examples /// /// ```rust,no_run /// use std::error::Error; /// use tokio::net::TcpListener; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let std_listener = std::net::TcpListener::bind("127.0.0.1:0")?; /// std_listener.set_nonblocking(true)?; /// let listener = TcpListener::from_std(std_listener)?; /// Ok(()) /// } /// ``` /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[track_caller] pub fn from_std(listener: net::TcpListener) -> io::Result<TcpListener> { check_socket_for_blocking(&listener)?; let io = mio::net::TcpListener::from_std(listener); let io = PollEvented::new(io)?; Ok(TcpListener { io }) } /// Turns a [`tokio::net::TcpListener`] into a [`std::net::TcpListener`]. /// /// The returned [`std::net::TcpListener`] will have nonblocking mode set as /// `true`. Use [`set_nonblocking`] to change the blocking mode if needed. /// /// # Examples /// /// ```rust,no_run /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let tokio_listener = tokio::net::TcpListener::bind("127.0.0.1:0").await?; /// let std_listener = tokio_listener.into_std()?; /// std_listener.set_nonblocking(false)?; /// Ok(()) /// } /// ``` /// /// [`tokio::net::TcpListener`]: TcpListener /// [`std::net::TcpListener`]: std::net::TcpListener /// [`set_nonblocking`]: fn@std::net::TcpListener::set_nonblocking pub fn into_std(self) -> io::Result<std::net::TcpListener> { #[cfg(unix)] { use std::os::unix::io::{FromRawFd, IntoRawFd}; self.io .into_inner() .map(IntoRawFd::into_raw_fd) .map(|raw_fd| unsafe { std::net::TcpListener::from_raw_fd(raw_fd) }) } #[cfg(windows)] { use std::os::windows::io::{FromRawSocket, IntoRawSocket}; self.io .into_inner() .map(|io| io.into_raw_socket()) .map(|raw_socket| unsafe { std::net::TcpListener::from_raw_socket(raw_socket) }) } #[cfg(target_os = "wasi")] { use std::os::wasi::io::{FromRawFd, IntoRawFd}; self.io .into_inner() .map(|io| io.into_raw_fd()) .map(|raw_fd| unsafe { std::net::TcpListener::from_raw_fd(raw_fd) }) } } cfg_not_wasi! { pub(crate) fn new(listener: mio::net::TcpListener) -> io::Result<TcpListener> { let io = PollEvented::new(listener)?; Ok(TcpListener { io }) } } /// Returns the local address that this listener is bound to. /// /// This can be useful, for example, when binding to port 0 to figure out /// which port was actually bound. /// /// # Examples /// /// ```rust,no_run /// use tokio::net::TcpListener; /// /// use std::io; /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let listener = TcpListener::bind("127.0.0.1:8080").await?; /// /// assert_eq!(listener.local_addr()?, /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080))); /// /// Ok(()) /// } /// ``` pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.local_addr() } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`]. /// /// [`set_ttl`]: method@Self::set_ttl /// /// # Examples /// /// ```no_run /// use tokio::net::TcpListener; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let listener = TcpListener::bind("127.0.0.1:0").await?; /// /// listener.set_ttl(100).expect("could not set TTL"); /// assert_eq!(listener.ttl()?, 100); /// /// Ok(()) /// } /// ``` pub fn ttl(&self) -> io::Result<u32> { self.io.ttl() } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpListener; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let listener = TcpListener::bind("127.0.0.1:0").await?; /// /// listener.set_ttl(100).expect("could not set TTL"); /// /// Ok(()) /// } /// ``` pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.set_ttl(ttl) } } impl TryFrom<net::TcpListener> for TcpListener { type Error = io::Error; /// Consumes stream, returning the tokio I/O object. /// /// This is equivalent to /// [`TcpListener::from_std(stream)`](TcpListener::from_std). fn try_from(stream: net::TcpListener) -> Result<Self, Self::Error> { Self::from_std(stream) } } impl fmt::Debug for TcpListener { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (*self.io).fmt(f) } } #[cfg(unix)] mod sys { use super::TcpListener; use std::os::unix::prelude::*; impl AsRawFd for TcpListener { fn as_raw_fd(&self) -> RawFd { self.io.as_raw_fd() } } impl AsFd for TcpListener { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } } cfg_unstable! { #[cfg(target_os = "wasi")] mod sys { use super::TcpListener; use std::os::wasi::prelude::*; impl AsRawFd for TcpListener { fn as_raw_fd(&self) -> RawFd { self.io.as_raw_fd() } } impl AsFd for TcpListener { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } } } cfg_windows! { use crate::os::windows::io::{AsRawSocket, RawSocket, AsSocket, BorrowedSocket}; impl AsRawSocket for TcpListener { fn as_raw_socket(&self) -> RawSocket { self.io.as_raw_socket() } } impl AsSocket for TcpListener { fn as_socket(&self) -> BorrowedSocket<'_> { unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/tcp/mod.rs
tokio/src/net/tcp/mod.rs
//! TCP utility types. pub(crate) mod listener; cfg_not_wasi! { pub(crate) mod socket; } mod split; pub use split::{ReadHalf, WriteHalf}; mod split_owned; pub use split_owned::{OwnedReadHalf, OwnedWriteHalf, ReuniteError}; pub(crate) mod stream; pub(crate) use stream::TcpStream;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/tcp/socket.rs
tokio/src/net/tcp/socket.rs
use crate::net::{TcpListener, TcpStream}; use std::fmt; use std::io; use std::net::SocketAddr; #[cfg(unix)] use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; use std::time::Duration; cfg_windows! { use crate::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket, AsSocket, BorrowedSocket}; } cfg_net! { /// A TCP socket that has not yet been converted to a `TcpStream` or /// `TcpListener`. /// /// `TcpSocket` wraps an operating system socket and enables the caller to /// configure the socket before establishing a TCP connection or accepting /// inbound connections. The caller is able to set socket option and explicitly /// bind the socket with a socket address. /// /// The underlying socket is closed when the `TcpSocket` value is dropped. /// /// `TcpSocket` should only be used directly if the default configuration used /// by `TcpStream::connect` and `TcpListener::bind` does not meet the required /// use case. /// /// Calling `TcpStream::connect("127.0.0.1:8080")` is equivalent to: /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// let stream = socket.connect(addr).await?; /// # drop(stream); /// /// Ok(()) /// } /// ``` /// /// Calling `TcpListener::bind("127.0.0.1:8080")` is equivalent to: /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// // On platforms with Berkeley-derived sockets, this allows to quickly /// // rebind a socket, without needing to wait for the OS to clean up the /// // previous one. /// // /// // On Windows, this allows rebinding sockets which are actively in use, /// // which allows "socket hijacking", so we explicitly don't set it here. /// // https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse /// socket.set_reuseaddr(true)?; /// socket.bind(addr)?; /// /// // Note: the actual backlog used by `TcpListener::bind` is platform-dependent, /// // as Tokio relies on Mio's default backlog value configuration. The `1024` here is only /// // illustrative and does not reflect the real value used. /// let listener = socket.listen(1024)?; /// # drop(listener); /// /// Ok(()) /// } /// ``` /// /// Setting socket options not explicitly provided by `TcpSocket` may be done by /// accessing the `RawFd`/`RawSocket` using [`AsRawFd`]/[`AsRawSocket`] and /// setting the option with a crate like [`socket2`]. /// /// [`RawFd`]: https://doc.rust-lang.org/std/os/fd/type.RawFd.html /// [`RawSocket`]: https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html /// [`AsRawFd`]: https://doc.rust-lang.org/std/os/fd/trait.AsRawFd.html /// [`AsRawSocket`]: https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html /// [`socket2`]: https://docs.rs/socket2/ #[cfg_attr(docsrs, doc(alias = "connect_std"))] pub struct TcpSocket { inner: socket2::Socket, } } impl TcpSocket { /// Creates a new socket configured for IPv4. /// /// Calls `socket(2)` with `AF_INET` and `SOCK_STREAM`. /// /// # Returns /// /// On success, the newly created `TcpSocket` is returned. If an error is /// encountered, it is returned instead. /// /// # Examples /// /// Create a new IPv4 socket and start listening. /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// let socket = TcpSocket::new_v4()?; /// socket.bind(addr)?; /// /// let listener = socket.listen(128)?; /// # drop(listener); /// Ok(()) /// } /// ``` pub fn new_v4() -> io::Result<TcpSocket> { TcpSocket::new(socket2::Domain::IPV4) } /// Creates a new socket configured for IPv6. /// /// Calls `socket(2)` with `AF_INET6` and `SOCK_STREAM`. /// /// # Returns /// /// On success, the newly created `TcpSocket` is returned. If an error is /// encountered, it is returned instead. /// /// # Examples /// /// Create a new IPv6 socket and start listening. /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "[::1]:8080".parse().unwrap(); /// let socket = TcpSocket::new_v6()?; /// socket.bind(addr)?; /// /// let listener = socket.listen(128)?; /// # drop(listener); /// Ok(()) /// } /// ``` pub fn new_v6() -> io::Result<TcpSocket> { TcpSocket::new(socket2::Domain::IPV6) } fn new(domain: socket2::Domain) -> io::Result<TcpSocket> { let ty = socket2::Type::STREAM; #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "linux", target_os = "netbsd", target_os = "openbsd" ))] let ty = ty.nonblocking(); let inner = socket2::Socket::new(domain, ty, Some(socket2::Protocol::TCP))?; #[cfg(not(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "linux", target_os = "netbsd", target_os = "openbsd" )))] inner.set_nonblocking(true)?; Ok(TcpSocket { inner }) } /// Sets value for the `SO_KEEPALIVE` option on this socket. pub fn set_keepalive(&self, keepalive: bool) -> io::Result<()> { self.inner.set_keepalive(keepalive) } /// Gets the value of the `SO_KEEPALIVE` option on this socket. pub fn keepalive(&self) -> io::Result<bool> { self.inner.keepalive() } /// Allows the socket to bind to an in-use address. /// /// Behavior is platform specific. Refer to the target platform's /// documentation for more details. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// socket.set_reuseaddr(true)?; /// socket.bind(addr)?; /// /// let listener = socket.listen(1024)?; /// # drop(listener); /// /// Ok(()) /// } /// ``` pub fn set_reuseaddr(&self, reuseaddr: bool) -> io::Result<()> { self.inner.set_reuse_address(reuseaddr) } /// Retrieves the value set for `SO_REUSEADDR` on this socket. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// socket.set_reuseaddr(true)?; /// assert!(socket.reuseaddr().unwrap()); /// socket.bind(addr)?; /// /// let listener = socket.listen(1024)?; /// Ok(()) /// } /// ``` pub fn reuseaddr(&self) -> io::Result<bool> { self.inner.reuse_address() } /// Allows the socket to bind to an in-use port. Only available for unix systems /// (excluding Solaris, Illumos, and Cygwin). /// /// Behavior is platform specific. Refer to the target platform's /// documentation for more details. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// socket.set_reuseport(true)?; /// socket.bind(addr)?; /// /// let listener = socket.listen(1024)?; /// Ok(()) /// } /// ``` #[cfg(all( unix, not(target_os = "solaris"), not(target_os = "illumos"), not(target_os = "cygwin"), ))] #[cfg_attr( docsrs, doc(cfg(all( unix, not(target_os = "solaris"), not(target_os = "illumos"), not(target_os = "cygwin"), ))) )] pub fn set_reuseport(&self, reuseport: bool) -> io::Result<()> { self.inner.set_reuse_port(reuseport) } /// Allows the socket to bind to an in-use port. Only available for unix systems /// (excluding Solaris, Illumos, and Cygwin). /// /// Behavior is platform specific. Refer to the target platform's /// documentation for more details. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// socket.set_reuseport(true)?; /// assert!(socket.reuseport().unwrap()); /// socket.bind(addr)?; /// /// let listener = socket.listen(1024)?; /// Ok(()) /// } /// ``` #[cfg(all( unix, not(target_os = "solaris"), not(target_os = "illumos"), not(target_os = "cygwin"), ))] #[cfg_attr( docsrs, doc(cfg(all( unix, not(target_os = "solaris"), not(target_os = "illumos"), not(target_os = "cygwin"), ))) )] pub fn reuseport(&self) -> io::Result<bool> { self.inner.reuse_port() } /// Sets the size of the TCP send buffer on this socket. /// /// On most operating systems, this sets the `SO_SNDBUF` socket option. pub fn set_send_buffer_size(&self, size: u32) -> io::Result<()> { self.inner.set_send_buffer_size(size as usize) } /// Returns the size of the TCP send buffer for this socket. /// /// On most operating systems, this is the value of the `SO_SNDBUF` socket /// option. /// /// Note that if [`set_send_buffer_size`] has been called on this socket /// previously, the value returned by this function may not be the same as /// the argument provided to `set_send_buffer_size`. This is for the /// following reasons: /// /// * Most operating systems have minimum and maximum allowed sizes for the /// send buffer, and will clamp the provided value if it is below the /// minimum or above the maximum. The minimum and maximum buffer sizes are /// OS-dependent. /// * Linux will double the buffer size to account for internal bookkeeping /// data, and returns the doubled value from `getsockopt(2)`. As per `man /// 7 socket`: /// > Sets or gets the maximum socket send buffer in bytes. The /// > kernel doubles this value (to allow space for bookkeeping /// > overhead) when it is set using `setsockopt(2)`, and this doubled /// > value is returned by `getsockopt(2)`. /// /// [`set_send_buffer_size`]: #method.set_send_buffer_size pub fn send_buffer_size(&self) -> io::Result<u32> { self.inner.send_buffer_size().map(|n| n as u32) } /// Sets the size of the TCP receive buffer on this socket. /// /// On most operating systems, this sets the `SO_RCVBUF` socket option. pub fn set_recv_buffer_size(&self, size: u32) -> io::Result<()> { self.inner.set_recv_buffer_size(size as usize) } /// Returns the size of the TCP receive buffer for this socket. /// /// On most operating systems, this is the value of the `SO_RCVBUF` socket /// option. /// /// Note that if [`set_recv_buffer_size`] has been called on this socket /// previously, the value returned by this function may not be the same as /// the argument provided to `set_recv_buffer_size`. This is for the /// following reasons: /// /// * Most operating systems have minimum and maximum allowed sizes for the /// receive buffer, and will clamp the provided value if it is below the /// minimum or above the maximum. The minimum and maximum buffer sizes are /// OS-dependent. /// * Linux will double the buffer size to account for internal bookkeeping /// data, and returns the doubled value from `getsockopt(2)`. As per `man /// 7 socket`: /// > Sets or gets the maximum socket send buffer in bytes. The /// > kernel doubles this value (to allow space for bookkeeping /// > overhead) when it is set using `setsockopt(2)`, and this doubled /// > value is returned by `getsockopt(2)`. /// /// [`set_recv_buffer_size`]: #method.set_recv_buffer_size pub fn recv_buffer_size(&self) -> io::Result<u32> { self.inner.recv_buffer_size().map(|n| n as u32) } /// Sets the linger duration of this socket by setting the `SO_LINGER` option. /// /// This option controls the action taken when a stream has unsent messages and the stream is /// closed. If `SO_LINGER` is set, the system shall block the process until it can transmit the /// data or until the time expires. /// /// If `SO_LINGER` is not specified, and the socket is closed, the system handles the call in a /// way that allows the process to continue as quickly as possible. /// /// This option is deprecated because setting `SO_LINGER` on a socket used with Tokio is always /// incorrect as it leads to blocking the thread when the socket is closed. For more details, /// please see: /// /// > Volumes of communications have been devoted to the intricacies of `SO_LINGER` versus /// > non-blocking (`O_NONBLOCK`) sockets. From what I can tell, the final word is: don't do /// > it. Rely on the `shutdown()`-followed-by-`read()`-eof technique instead. /// > /// > From [The ultimate `SO_LINGER` page, or: why is my tcp not reliable](https://blog.netherlabs.nl/articles/2009/01/18/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable) #[deprecated = "`SO_LINGER` causes the socket to block the thread on drop"] pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> { self.inner.set_linger(dur) } /// Reads the linger duration for this socket by getting the `SO_LINGER` /// option. /// /// For more information about this option, see [`set_linger`]. /// /// [`set_linger`]: TcpSocket::set_linger pub fn linger(&self) -> io::Result<Option<Duration>> { self.inner.linger() } /// Sets the value of the `TCP_NODELAY` option on this socket. /// /// If set, this option disables the Nagle algorithm. This means that segments are always /// sent as soon as possible, even if there is only a small amount of data. When not set, /// data is buffered until there is a sufficient amount to send out, thereby avoiding /// the frequent sending of small packets. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpSocket; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let socket = TcpSocket::new_v4()?; /// /// socket.set_nodelay(true)?; /// # Ok(()) /// # } /// ``` pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { self.inner.set_tcp_nodelay(nodelay) } /// Gets the value of the `TCP_NODELAY` option on this socket. /// /// For more information about this option, see [`set_nodelay`]. /// /// [`set_nodelay`]: TcpSocket::set_nodelay /// /// # Examples /// /// ```no_run /// use tokio::net::TcpSocket; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let socket = TcpSocket::new_v4()?; /// /// println!("{:?}", socket.nodelay()?); /// # Ok(()) /// # } /// ``` pub fn nodelay(&self) -> io::Result<bool> { self.inner.tcp_nodelay() } /// Gets the value of the `IPV6_TCLASS` option for this socket. /// /// For more information about this option, see [`set_tclass_v6`]. /// /// [`set_tclass_v6`]: Self::set_tclass_v6 // https://docs.rs/socket2/0.6.1/src/socket2/sys/unix.rs.html#2541 #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", ))] #[cfg_attr( docsrs, doc(cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", ))) )] pub fn tclass_v6(&self) -> io::Result<u32> { self.inner.tclass_v6() } /// Sets the value for the `IPV6_TCLASS` option on this socket. /// /// Specifies the traffic class field that is used in every packet /// sent from this socket. /// /// # Note /// /// This may not have any effect on IPv4 sockets. // https://docs.rs/socket2/0.6.1/src/socket2/sys/unix.rs.html#2566 #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", ))] #[cfg_attr( docsrs, doc(cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", ))) )] pub fn set_tclass_v6(&self, tclass: u32) -> io::Result<()> { self.inner.set_tclass_v6(tclass) } /// Gets the value of the `IP_TOS` option for this socket. /// /// For more information about this option, see [`set_tos_v4`]. /// /// [`set_tos_v4`]: Self::set_tos_v4 // https://docs.rs/socket2/0.6.1/src/socket2/socket.rs.html#1585 #[cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))] #[cfg_attr( docsrs, doc(cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))) )] pub fn tos_v4(&self) -> io::Result<u32> { self.inner.tos_v4() } /// Deprecated. Use [`tos_v4()`] instead. /// /// [`tos_v4()`]: Self::tos_v4 #[deprecated( note = "`tos` related methods have been renamed `tos_v4` since they are IPv4-specific." )] #[doc(hidden)] #[cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))] #[cfg_attr( docsrs, doc(cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))) )] pub fn tos(&self) -> io::Result<u32> { self.tos_v4() } /// Sets the value for the `IP_TOS` option on this socket. /// /// This value sets the type-of-service field that is used in every packet /// sent from this socket. /// /// # Note /// /// - This may not have any effect on IPv6 sockets. /// - On Windows, `IP_TOS` is only supported on [Windows 8+ or /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) // https://docs.rs/socket2/0.6.1/src/socket2/socket.rs.html#1566 #[cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))] #[cfg_attr( docsrs, doc(cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))) )] pub fn set_tos_v4(&self, tos: u32) -> io::Result<()> { self.inner.set_tos_v4(tos) } /// Deprecated. Use [`set_tos_v4()`] instead. /// /// [`set_tos_v4()`]: Self::set_tos_v4 #[deprecated( note = "`tos` related methods have been renamed `tos_v4` since they are IPv4-specific." )] #[doc(hidden)] #[cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))] #[cfg_attr( docsrs, doc(cfg(not(any( target_os = "fuchsia", target_os = "redox", target_os = "solaris", target_os = "illumos", target_os = "haiku" )))) )] pub fn set_tos(&self, tos: u32) -> io::Result<()> { self.set_tos_v4(tos) } /// Gets the value for the `SO_BINDTODEVICE` option on this socket /// /// This value gets the socket binded device's interface name. #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux",))] #[cfg_attr( docsrs, doc(cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux",))) )] pub fn device(&self) -> io::Result<Option<Vec<u8>>> { self.inner.device() } /// Sets the value for the `SO_BINDTODEVICE` option on this socket /// /// If a socket is bound to an interface, only packets received from that /// particular interface are processed by the socket. Note that this only /// works for some socket types, particularly `AF_INET` sockets. /// /// If `interface` is `None` or an empty string it removes the binding. #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] #[cfg_attr( docsrs, doc(cfg(all(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))) )] pub fn bind_device(&self, interface: Option<&[u8]>) -> io::Result<()> { self.inner.bind_device(interface) } /// Gets the local address of this socket. /// /// Will fail on windows if called before `bind`. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// socket.bind(addr)?; /// assert_eq!(socket.local_addr().unwrap().to_string(), "127.0.0.1:8080"); /// let listener = socket.listen(1024)?; /// Ok(()) /// } /// ``` pub fn local_addr(&self) -> io::Result<SocketAddr> { self.inner.local_addr().and_then(convert_address) } /// Returns the value of the `SO_ERROR` option. pub fn take_error(&self) -> io::Result<Option<io::Error>> { self.inner.take_error() } /// Binds the socket to the given address. /// /// This calls the `bind(2)` operating-system function. Behavior is /// platform specific. Refer to the target platform's documentation for more /// details. /// /// # Examples /// /// Bind a socket before listening. /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// socket.bind(addr)?; /// /// let listener = socket.listen(1024)?; /// # drop(listener); /// /// Ok(()) /// } /// ``` pub fn bind(&self, addr: SocketAddr) -> io::Result<()> { self.inner.bind(&addr.into()) } /// Establishes a TCP connection with a peer at the specified socket address. /// /// The `TcpSocket` is consumed. Once the connection is established, a /// connected [`TcpStream`] is returned. If the connection fails, the /// encountered error is returned. /// /// [`TcpStream`]: TcpStream /// /// This calls the `connect(2)` operating-system function. Behavior is /// platform specific. Refer to the target platform's documentation for more /// details. /// /// # Examples /// /// Connecting to a peer. /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// let stream = socket.connect(addr).await?; /// # drop(stream); /// /// Ok(()) /// } /// ``` pub async fn connect(self, addr: SocketAddr) -> io::Result<TcpStream> { if let Err(err) = self.inner.connect(&addr.into()) { #[cfg(unix)] if err.raw_os_error() != Some(libc::EINPROGRESS) { return Err(err); } #[cfg(windows)] if err.kind() != io::ErrorKind::WouldBlock { return Err(err); } } #[cfg(unix)] let mio = { use std::os::unix::io::{FromRawFd, IntoRawFd}; let raw_fd = self.inner.into_raw_fd(); unsafe { mio::net::TcpStream::from_raw_fd(raw_fd) } }; #[cfg(windows)] let mio = { use std::os::windows::io::{FromRawSocket, IntoRawSocket}; let raw_socket = self.inner.into_raw_socket(); unsafe { mio::net::TcpStream::from_raw_socket(raw_socket) } }; TcpStream::connect_mio(mio).await } /// Converts the socket into a `TcpListener`. /// /// `backlog` defines the maximum number of pending connections are queued /// by the operating system at any given time. Connection are removed from /// the queue with [`TcpListener::accept`]. When the queue is full, the /// operating-system will start rejecting connections. /// /// [`TcpListener::accept`]: TcpListener::accept /// /// This calls the `listen(2)` operating-system function, marking the socket /// as a passive socket. Behavior is platform specific. Refer to the target /// platform's documentation for more details. /// /// # Examples /// /// Create a `TcpListener`. /// /// ```no_run /// use tokio::net::TcpSocket; /// /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let addr = "127.0.0.1:8080".parse().unwrap(); /// /// let socket = TcpSocket::new_v4()?; /// socket.bind(addr)?; /// /// let listener = socket.listen(1024)?; /// # drop(listener); /// /// Ok(()) /// } /// ``` pub fn listen(self, backlog: u32) -> io::Result<TcpListener> { self.inner.listen(backlog as i32)?; #[cfg(unix)] let mio = { use std::os::unix::io::{FromRawFd, IntoRawFd}; let raw_fd = self.inner.into_raw_fd(); unsafe { mio::net::TcpListener::from_raw_fd(raw_fd) } }; #[cfg(windows)] let mio = { use std::os::windows::io::{FromRawSocket, IntoRawSocket}; let raw_socket = self.inner.into_raw_socket(); unsafe { mio::net::TcpListener::from_raw_socket(raw_socket) } }; TcpListener::new(mio) } /// Converts a [`std::net::TcpStream`] into a `TcpSocket`. The provided /// socket must not have been connected prior to calling this function. This /// function is typically used together with crates such as [`socket2`] to /// configure socket options that are not available on `TcpSocket`. /// /// [`std::net::TcpStream`]: struct@std::net::TcpStream /// [`socket2`]: https://docs.rs/socket2/ /// /// # Notes /// /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. Otherwise all I/O operations on the socket /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. /// /// [`set_nonblocking`]: std::net::TcpStream::set_nonblocking /// /// # Examples /// /// ``` /// use tokio::net::TcpSocket; /// use socket2::{Domain, Socket, Type}; /// /// #[tokio::main] /// async fn main() -> std::io::Result<()> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// let socket2_socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; /// socket2_socket.set_nonblocking(true)?; /// /// let socket = TcpSocket::from_std_stream(socket2_socket.into()); /// /// Ok(()) /// } /// ``` pub fn from_std_stream(std_stream: std::net::TcpStream) -> TcpSocket { #[cfg(unix)] { use std::os::unix::io::{FromRawFd, IntoRawFd}; let raw_fd = std_stream.into_raw_fd(); unsafe { TcpSocket::from_raw_fd(raw_fd) } } #[cfg(windows)] { use std::os::windows::io::{FromRawSocket, IntoRawSocket}; let raw_socket = std_stream.into_raw_socket(); unsafe { TcpSocket::from_raw_socket(raw_socket) } } } } fn convert_address(address: socket2::SockAddr) -> io::Result<SocketAddr> { match address.as_socket() { Some(address) => Ok(address), None => Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid address family (not IPv4 or IPv6)", )), } } impl fmt::Debug for TcpSocket { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.inner.fmt(fmt) } } // These trait implementations can't be build on Windows, so we completely // ignore them, even when building documentation. #[cfg(unix)] cfg_unix! { impl AsRawFd for TcpSocket { fn as_raw_fd(&self) -> RawFd { self.inner.as_raw_fd() } } impl AsFd for TcpSocket { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } impl FromRawFd for TcpSocket { /// Converts a `RawFd` to a `TcpSocket`. /// /// # Notes /// /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. unsafe fn from_raw_fd(fd: RawFd) -> TcpSocket { // Safety: exactly the same safety requirements as the // `FromRawFd::from_raw_fd` trait method. let inner = unsafe { socket2::Socket::from_raw_fd(fd) }; TcpSocket { inner } } } impl IntoRawFd for TcpSocket { fn into_raw_fd(self) -> RawFd { self.inner.into_raw_fd() } } } cfg_windows! { impl IntoRawSocket for TcpSocket { fn into_raw_socket(self) -> RawSocket { self.inner.into_raw_socket() } } impl AsRawSocket for TcpSocket { fn as_raw_socket(&self) -> RawSocket { self.inner.as_raw_socket() } } impl AsSocket for TcpSocket { fn as_socket(&self) -> BorrowedSocket<'_> {
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/tcp/split.rs
tokio/src/net/tcp/split.rs
//! `TcpStream` split support. //! //! A `TcpStream` can be split into a `ReadHalf` and a //! `WriteHalf` with the `TcpStream::split` method. `ReadHalf` //! implements `AsyncRead` while `WriteHalf` implements `AsyncWrite`. //! //! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized //! split has no associated overhead and enforces all invariants at the type //! level. use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; use crate::net::TcpStream; use std::future::poll_fn; use std::io; use std::net::{Shutdown, SocketAddr}; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_util! { use bytes::BufMut; } /// Borrowed read half of a [`TcpStream`], created by [`split`]. /// /// Reading from a `ReadHalf` is usually done using the convenience methods found on the /// [`AsyncReadExt`] trait. /// /// [`TcpStream`]: TcpStream /// [`split`]: TcpStream::split() /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt #[derive(Debug)] pub struct ReadHalf<'a>(&'a TcpStream); /// Borrowed write half of a [`TcpStream`], created by [`split`]. /// /// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will /// shut down the TCP stream in the write direction. /// /// Writing to an `WriteHalf` is usually done using the convenience methods found /// on the [`AsyncWriteExt`] trait. /// /// [`TcpStream`]: TcpStream /// [`split`]: TcpStream::split() /// [`AsyncWrite`]: trait@crate::io::AsyncWrite /// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt #[derive(Debug)] pub struct WriteHalf<'a>(&'a TcpStream); pub(crate) fn split(stream: &mut TcpStream) -> (ReadHalf<'_>, WriteHalf<'_>) { (ReadHalf(&*stream), WriteHalf(&*stream)) } impl ReadHalf<'_> { /// Attempts to receive data on the socket, without removing that data from /// the queue, registering the current task for wakeup if data is not yet /// available. /// /// Note that on multiple calls to `poll_peek` or `poll_read`, only the /// `Waker` from the `Context` passed to the most recent call is scheduled /// to receive a wakeup. /// /// See the [`TcpStream::poll_peek`] level documentation for more details. /// /// # Examples /// /// ```no_run /// use tokio::io::{self, ReadBuf}; /// use tokio::net::TcpStream; /// /// use std::future::poll_fn; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?; /// let (mut read_half, _) = stream.split(); /// let mut buf = [0; 10]; /// let mut buf = ReadBuf::new(&mut buf); /// /// poll_fn(|cx| { /// read_half.poll_peek(cx, &mut buf) /// }).await?; /// /// Ok(()) /// } /// ``` /// /// [`TcpStream::poll_peek`]: TcpStream::poll_peek pub fn poll_peek( &mut self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<usize>> { self.0.poll_peek(cx, buf) } /// Receives data on the socket from the remote address to which it is /// connected, without removing that data from the queue. On success, /// returns the number of bytes peeked. /// /// See the [`TcpStream::peek`] level documentation for more details. /// /// [`TcpStream::peek`]: TcpStream::peek /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use tokio::io::AsyncReadExt; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; /// let (mut read_half, _) = stream.split(); /// /// let mut b1 = [0; 10]; /// let mut b2 = [0; 10]; /// /// // Peek at the data /// let n = read_half.peek(&mut b1).await?; /// /// // Read the data /// assert_eq!(n, read_half.read(&mut b2[..n]).await?); /// assert_eq!(&b1[..n], &b2[..n]); /// /// Ok(()) /// } /// ``` /// /// The [`read`] method is defined on the [`AsyncReadExt`] trait. /// /// [`read`]: fn@crate::io::AsyncReadExt::read /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> { let mut buf = ReadBuf::new(buf); poll_fn(|cx| self.poll_peek(cx, &mut buf)).await } /// Waits for any of the requested ready states. /// /// This function is usually paired with [`try_read()`]. It can be used instead /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] /// and [`Ready::READ_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`TcpStream::ready`]. /// /// [`try_read()`]: Self::try_read /// [`readable()`]: Self::readable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.0.ready(interest).await } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// This function is also equivalent to [`TcpStream::ready`]. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn readable(&self) -> io::Result<()> { self.0.readable().await } /// Tries to read data from the stream into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. The stream's read half is closed and will no longer yield data. /// 2. The specified buffer was 0 bytes in length. /// /// If the stream is not ready to read data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { self.0.try_read(buf) } /// Tries to read data from the stream into the provided buffers, returning /// how many bytes were read. /// /// Data is copied to fill each buffer in order, with the final buffer /// written to possibly being only partially filled. This method behaves /// equivalently to a single call to [`try_read()`] with concatenated /// buffers. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_vectored()` is non-blocking, the buffer does not have to be /// stored by the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`try_read()`]: Self::try_read() /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> { self.0.try_read_vectored(bufs) } cfg_io_util! { /// Tries to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.0.try_read_buf(buf) } } /// Returns the remote address that this stream is connected to. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.0.peer_addr() } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.0.local_addr() } } impl WriteHalf<'_> { /// Waits for any of the requested ready states. /// /// This function is usually paired with [`try_write()`]. It can be used instead /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] /// and [`Ready::WRITE_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`TcpStream::ready`]. /// /// [`try_write()`]: Self::try_write /// [`writable()`]: Self::writable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.0.ready(interest).await } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn writable(&self) -> io::Result<()> { self.0.writable().await } /// Tries to write a buffer to the stream, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { self.0.try_write(buf) } /// Tries to write several buffers to the stream, returning how many bytes /// were written. /// /// Data is written from each buffer in order, with the final buffer read /// from possible being only partially consumed. This method behaves /// equivalently to a single call to [`try_write()`] with concatenated /// buffers. /// /// This function is usually paired with `writable()`. /// /// [`try_write()`]: Self::try_write() /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write_vectored(&self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> { self.0.try_write_vectored(bufs) } /// Returns the remote address that this stream is connected to. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.0.peer_addr() } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.0.local_addr() } } impl AsyncRead for ReadHalf<'_> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.0.poll_read_priv(cx, buf) } } impl AsyncWrite for WriteHalf<'_> { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.0.poll_write_priv(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.0.poll_write_vectored_priv(cx, bufs) } fn is_write_vectored(&self) -> bool { self.0.is_write_vectored() } #[inline] fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { // tcp flush is a no-op Poll::Ready(Ok(())) } // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { self.0.shutdown_std(Shutdown::Write).into() } } impl AsRef<TcpStream> for ReadHalf<'_> { fn as_ref(&self) -> &TcpStream { self.0 } } impl AsRef<TcpStream> for WriteHalf<'_> { fn as_ref(&self) -> &TcpStream { self.0 } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/stream.rs
tokio/src/net/unix/stream.rs
use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready}; use crate::net::unix::split::{split, ReadHalf, WriteHalf}; use crate::net::unix::split_owned::{split_owned, OwnedReadHalf, OwnedWriteHalf}; use crate::net::unix::ucred::{self, UCred}; use crate::net::unix::SocketAddr; use crate::util::check_socket_for_blocking; use std::fmt; use std::future::poll_fn; use std::io::{self, Read, Write}; use std::net::Shutdown; #[cfg(target_os = "android")] use std::os::android::net::SocketAddrExt; #[cfg(target_os = "linux")] use std::os::linux::net::SocketAddrExt; #[cfg(any(target_os = "linux", target_os = "android"))] use std::os::unix::ffi::OsStrExt; use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; use std::os::unix::net::{self, SocketAddr as StdSocketAddr}; use std::path::Path; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_util! { use bytes::BufMut; } cfg_net_unix! { /// A structure representing a connected Unix socket. /// /// This socket can be connected directly with [`UnixStream::connect`] or accepted /// from a listener with [`UnixListener::accept`]. Additionally, a pair of /// anonymous Unix sockets can be created with `UnixStream::pair`. /// /// To shut down the stream in the write direction, you can call the /// [`shutdown()`] method. This will cause the other peer to receive a read of /// length 0, indicating that no more data will be sent. This only closes /// the stream in one direction. /// /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown /// [`UnixListener::accept`]: crate::net::UnixListener::accept #[cfg_attr(docsrs, doc(alias = "uds"))] pub struct UnixStream { io: PollEvented<mio::net::UnixStream>, } } impl UnixStream { pub(crate) async fn connect_mio(sys: mio::net::UnixStream) -> io::Result<UnixStream> { let stream = UnixStream::new(sys)?; // Once we've connected, wait for the stream to be writable as // that's when the actual connection has been initiated. Once we're // writable we check for `take_socket_error` to see if the connect // actually hit an error or not. // // If all that succeeded then we ship everything on up. poll_fn(|cx| stream.io.registration().poll_write_ready(cx)).await?; if let Some(e) = stream.io.take_error()? { return Err(e); } Ok(stream) } /// Connects to the socket named by `path`. /// /// This function will create a new Unix socket and connect to the path /// specified, associating the returned stream with the default event loop's /// handle. pub async fn connect<P>(path: P) -> io::Result<UnixStream> where P: AsRef<Path>, { // On linux, abstract socket paths need to be considered. #[cfg(any(target_os = "linux", target_os = "android"))] let addr = { let os_str_bytes = path.as_ref().as_os_str().as_bytes(); if os_str_bytes.starts_with(b"\0") { StdSocketAddr::from_abstract_name(&os_str_bytes[1..])? } else { StdSocketAddr::from_pathname(path)? } }; #[cfg(not(any(target_os = "linux", target_os = "android")))] let addr = StdSocketAddr::from_pathname(path)?; let stream = mio::net::UnixStream::connect_addr(&addr)?; let stream = UnixStream::new(stream)?; poll_fn(|cx| stream.io.registration().poll_write_ready(cx)).await?; if let Some(e) = stream.io.take_error()? { return Err(e); } Ok(stream) } /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_read()` or `try_write()`. It /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// Concurrently read and write to the stream on the same task without /// splitting. /// /// ```no_run /// use tokio::io::Interest; /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// loop { /// let ready = stream.ready(Interest::READABLE | Interest::WRITABLE).await?; /// /// if ready.is_readable() { /// let mut data = vec![0; 1024]; /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut data) { /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// /// } /// /// if ready.is_writable() { /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// println!("write {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// } /// } /// ``` pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { let event = self.io.registration().readiness(interest).await?; Ok(event.ready) } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// let mut msg = vec![0; 1024]; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut msg) { /// Ok(n) => { /// msg.truncate(n); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// println!("GOT = {:?}", msg); /// Ok(()) /// } /// ``` pub async fn readable(&self) -> io::Result<()> { self.ready(Interest::READABLE).await?; Ok(()) } /// Polls for read readiness. /// /// If the unix stream is not currently ready for reading, this method will /// store a clone of the `Waker` from the provided `Context`. When the unix /// stream becomes ready for reading, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_read_ready` or `poll_read`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_write_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the unix stream is not ready for reading. /// * `Poll::Ready(Ok(()))` if the unix stream is ready for reading. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`readable`]: method@Self::readable pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_read_ready(cx).map_ok(|_| ()) } /// Try to read data from the stream into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: UnixStream::readable() /// [`ready()`]: UnixStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. The stream's read half is closed and will no longer yield data. /// 2. The specified buffer was 0 bytes in length. /// /// If the stream is not ready to read data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Creating the buffer **after** the `await` prevents it from /// // being stored in the async task. /// let mut buf = [0; 4096]; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::READABLE, || (&*self.io).read(buf)) } /// Tries to read data from the stream into the provided buffers, returning /// how many bytes were read. /// /// Data is copied to fill each buffer in order, with the final buffer /// written to possibly being only partially filled. This method behaves /// equivalently to a single call to [`try_read()`] with concatenated /// buffers. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_vectored()` is non-blocking, the buffer does not have to be /// stored by the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`try_read()`]: UnixStream::try_read() /// [`readable()`]: UnixStream::readable() /// [`ready()`]: UnixStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io::{self, IoSliceMut}; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Creating the buffer **after** the `await` prevents it from /// // being stored in the async task. /// let mut buf_a = [0; 512]; /// let mut buf_b = [0; 1024]; /// let mut bufs = [ /// IoSliceMut::new(&mut buf_a), /// IoSliceMut::new(&mut buf_b), /// ]; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read_vectored(&mut bufs) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> { self.io .registration() .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) } cfg_io_util! { /// Tries to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: UnixStream::readable() /// [`ready()`]: UnixStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// let mut buf = Vec::with_capacity(4096); /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read_buf(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.io.registration().try_io(Interest::READABLE, || { use std::io::Read; let dst = buf.chunk_mut(); let dst = unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; // Safety: We trust `UnixStream::read` to have filled up `n` bytes in the // buffer. let n = (&*self.io).read(dst)?; unsafe { buf.advance_mut(n); } Ok(n) }) } } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// loop { /// // Wait for the socket to be writable /// stream.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn writable(&self) -> io::Result<()> { self.ready(Interest::WRITABLE).await?; Ok(()) } /// Polls for write readiness. /// /// If the unix stream is not currently ready for writing, this method will /// store a clone of the `Waker` from the provided `Context`. When the unix /// stream becomes ready for writing, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the unix stream is not ready for writing. /// * `Poll::Ready(Ok(()))` if the unix stream is ready for writing. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`writable`]: method@Self::writable pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_write_ready(cx).map_ok(|_| ()) } /// Tries to write a buffer to the stream, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// loop { /// // Wait for the socket to be writable /// stream.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) } /// Tries to write several buffers to the stream, returning how many bytes /// were written. /// /// Data is written from each buffer in order, with the final buffer read /// from possible being only partially consumed. This method behaves /// equivalently to a single call to [`try_write()`] with concatenated /// buffers. /// /// This function is usually paired with `writable()`. /// /// [`try_write()`]: UnixStream::try_write() /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// let stream = UnixStream::connect(bind_path).await?; /// /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; /// /// loop { /// // Wait for the socket to be writable /// stream.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write_vectored(&bufs) { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) } /// Tries to read or write from the socket using a user-provided IO operation. /// /// If the socket is ready, the provided closure is called. The closure /// should attempt to perform IO operation on the socket by manually /// calling the appropriate syscall. If the operation fails because the /// socket is not actually ready, then the closure should return a /// `WouldBlock` error and the readiness flag is cleared. The return value /// of the closure is then returned by `try_io`. /// /// If the socket is not ready, then the closure is not called /// and a `WouldBlock` error is returned. /// /// The closure should only return a `WouldBlock` error if it has performed /// an IO operation on the socket that failed due to the socket not being /// ready. Returning a `WouldBlock` error in any other situation will /// incorrectly clear the readiness flag, which can cause the socket to /// behave incorrectly. /// /// The closure should not perform the IO operation using any of the methods /// defined on the Tokio `UnixStream` type, as this will mess with the /// readiness flag and can cause the socket to behave incorrectly. /// /// This method is not intended to be used with combined interests. /// The closure should perform only one type of IO operation, so it should not /// require more than one ready state. This method may panic or sleep forever /// if it is called with a combined interest. /// /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: UnixStream::readable() /// [`writable()`]: UnixStream::writable() /// [`ready()`]: UnixStream::ready() pub fn try_io<R>( &self, interest: Interest, f: impl FnOnce() -> io::Result<R>, ) -> io::Result<R> { self.io .registration() .try_io(interest, || self.io.try_io(f)) } /// Reads or writes from the socket using a user-provided IO operation. /// /// The readiness of the socket is awaited and when the socket is ready, /// the provided closure is called. The closure should attempt to perform /// IO operation on the socket by manually calling the appropriate syscall. /// If the operation fails because the socket is not actually ready, /// then the closure should return a `WouldBlock` error. In such case the /// readiness flag is cleared and the socket readiness is awaited again. /// This loop is repeated until the closure returns an `Ok` or an error /// other than `WouldBlock`. /// /// The closure should only return a `WouldBlock` error if it has performed /// an IO operation on the socket that failed due to the socket not being /// ready. Returning a `WouldBlock` error in any other situation will /// incorrectly clear the readiness flag, which can cause the socket to /// behave incorrectly. /// /// The closure should not perform the IO operation using any of the methods /// defined on the Tokio `UnixStream` type, as this will mess with the /// readiness flag and can cause the socket to behave incorrectly. /// /// This method is not intended to be used with combined interests. /// The closure should perform only one type of IO operation, so it should not /// require more than one ready state. This method may panic or sleep forever /// if it is called with a combined interest. pub async fn async_io<R>( &self, interest: Interest, mut f: impl FnMut() -> io::Result<R>, ) -> io::Result<R> { self.io .registration() .async_io(interest, || self.io.try_io(&mut f)) .await } /// Creates new [`UnixStream`] from a [`std::os::unix::net::UnixStream`]. /// /// This function is intended to be used to wrap a `UnixStream` from the /// standard library in the Tokio equivalent. /// /// # Notes /// /// The caller is responsible for ensuring that the stream is in /// non-blocking mode. Otherwise all I/O operations on the stream /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. /// /// Passing a listener in blocking mode is always erroneous, /// and the behavior in that case may change in the future. /// For example, it could panic. /// /// [`set_nonblocking`]: std::os::unix::net::UnixStream::set_nonblocking /// /// # Examples /// /// ```no_run /// use tokio::net::UnixStream; /// use std::os::unix::net::UnixStream as StdUnixStream; /// # use std::error::Error; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let std_stream = StdUnixStream::connect("/path/to/the/socket")?; /// std_stream.set_nonblocking(true)?; /// let stream = UnixStream::from_std(std_stream)?; /// # Ok(()) /// # } /// ``` /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[track_caller] pub fn from_std(stream: net::UnixStream) -> io::Result<UnixStream> { check_socket_for_blocking(&stream)?; let stream = mio::net::UnixStream::from_std(stream); let io = PollEvented::new(stream)?; Ok(UnixStream { io }) } /// Turns a [`tokio::net::UnixStream`] into a [`std::os::unix::net::UnixStream`]. /// /// The returned [`std::os::unix::net::UnixStream`] will have nonblocking /// mode set as `true`. Use [`set_nonblocking`] to change the blocking /// mode if needed. /// /// # Examples /// /// ``` /// use std::error::Error; /// use std::io::Read; /// use tokio::net::UnixListener; /// # use tokio::net::UnixStream; /// # use tokio::io::AsyncWriteExt; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// let dir = tempfile::tempdir().unwrap(); /// let bind_path = dir.path().join("bind_path"); /// /// let mut data = [0u8; 12]; /// let listener = UnixListener::bind(&bind_path)?;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/split_owned.rs
tokio/src/net/unix/split_owned.rs
//! `UnixStream` owned split support. //! //! A `UnixStream` can be split into an `OwnedReadHalf` and a `OwnedWriteHalf` //! with the `UnixStream::into_split` method. `OwnedReadHalf` implements //! `AsyncRead` while `OwnedWriteHalf` implements `AsyncWrite`. //! //! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized //! split has no associated overhead and enforces all invariants at the type //! level. use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; use crate::net::UnixStream; use crate::net::unix::SocketAddr; use std::error::Error; use std::net::Shutdown; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::{fmt, io}; cfg_io_util! { use bytes::BufMut; } /// Owned read half of a [`UnixStream`], created by [`into_split`]. /// /// Reading from an `OwnedReadHalf` is usually done using the convenience methods found /// on the [`AsyncReadExt`] trait. /// /// [`UnixStream`]: crate::net::UnixStream /// [`into_split`]: crate::net::UnixStream::into_split() /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt #[derive(Debug)] pub struct OwnedReadHalf { inner: Arc<UnixStream>, } /// Owned write half of a [`UnixStream`], created by [`into_split`]. /// /// Note that in the [`AsyncWrite`] implementation of this type, /// [`poll_shutdown`] will shut down the stream in the write direction. /// Dropping the write half will also shut down the write half of the stream. /// /// Writing to an `OwnedWriteHalf` is usually done using the convenience methods /// found on the [`AsyncWriteExt`] trait. /// /// [`UnixStream`]: crate::net::UnixStream /// [`into_split`]: crate::net::UnixStream::into_split() /// [`AsyncWrite`]: trait@crate::io::AsyncWrite /// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt #[derive(Debug)] pub struct OwnedWriteHalf { inner: Arc<UnixStream>, shutdown_on_drop: bool, } pub(crate) fn split_owned(stream: UnixStream) -> (OwnedReadHalf, OwnedWriteHalf) { let arc = Arc::new(stream); let read = OwnedReadHalf { inner: Arc::clone(&arc), }; let write = OwnedWriteHalf { inner: arc, shutdown_on_drop: true, }; (read, write) } pub(crate) fn reunite( read: OwnedReadHalf, write: OwnedWriteHalf, ) -> Result<UnixStream, ReuniteError> { if Arc::ptr_eq(&read.inner, &write.inner) { write.forget(); // This unwrap cannot fail as the api does not allow creating more than two Arcs, // and we just dropped the other half. Ok(Arc::try_unwrap(read.inner).expect("UnixStream: try_unwrap failed in reunite")) } else { Err(ReuniteError(read, write)) } } /// Error indicating that two halves were not from the same socket, and thus could /// not be reunited. #[derive(Debug)] pub struct ReuniteError(pub OwnedReadHalf, pub OwnedWriteHalf); impl fmt::Display for ReuniteError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "tried to reunite halves that are not from the same socket" ) } } impl Error for ReuniteError {} impl OwnedReadHalf { /// Attempts to put the two halves of a `UnixStream` back together and /// recover the original socket. Succeeds only if the two halves /// originated from the same call to [`into_split`]. /// /// [`into_split`]: crate::net::UnixStream::into_split() pub fn reunite(self, other: OwnedWriteHalf) -> Result<UnixStream, ReuniteError> { reunite(self, other) } /// Waits for any of the requested ready states. /// /// This function is usually paired with [`try_read()`]. It can be used instead /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] /// and [`Ready::READ_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`UnixStream::ready`]. /// /// [`try_read()`]: Self::try_read /// [`readable()`]: Self::readable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.inner.ready(interest).await } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn readable(&self) -> io::Result<()> { self.inner.readable().await } /// Tries to read data from the stream into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. The stream's read half is closed and will no longer yield data. /// 2. The specified buffer was 0 bytes in length. /// /// If the stream is not ready to read data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { self.inner.try_read(buf) } cfg_io_util! { /// Tries to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.inner.try_read_buf(buf) } } /// Tries to read data from the stream into the provided buffers, returning /// how many bytes were read. /// /// Data is copied to fill each buffer in order, with the final buffer /// written to possibly being only partially filled. This method behaves /// equivalently to a single call to [`try_read()`] with concatenated /// buffers. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_vectored()` is non-blocking, the buffer does not have to be /// stored by the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`try_read()`]: Self::try_read() /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> { self.inner.try_read_vectored(bufs) } /// Returns the socket address of the remote half of this connection. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.inner.peer_addr() } /// Returns the socket address of the local half of this connection. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.inner.local_addr() } } impl AsyncRead for OwnedReadHalf { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.inner.poll_read_priv(cx, buf) } } impl OwnedWriteHalf { /// Attempts to put the two halves of a `UnixStream` back together and /// recover the original socket. Succeeds only if the two halves /// originated from the same call to [`into_split`]. /// /// [`into_split`]: crate::net::UnixStream::into_split() pub fn reunite(self, other: OwnedReadHalf) -> Result<UnixStream, ReuniteError> { reunite(other, self) } /// Destroys the write half, but don't close the write half of the stream /// until the read half is dropped. If the read half has already been /// dropped, this closes the stream. pub fn forget(mut self) { self.shutdown_on_drop = false; drop(self); } /// Waits for any of the requested ready states. /// /// This function is usually paired with [`try_write()`]. It can be used instead /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] /// and [`Ready::WRITE_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`UnixStream::ready`]. /// /// [`try_write()`]: Self::try_write /// [`writable()`]: Self::writable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.inner.ready(interest).await } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn writable(&self) -> io::Result<()> { self.inner.writable().await } /// Tries to write a buffer to the stream, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { self.inner.try_write(buf) } /// Tries to write several buffers to the stream, returning how many bytes /// were written. /// /// Data is written from each buffer in order, with the final buffer read /// from possible being only partially consumed. This method behaves /// equivalently to a single call to [`try_write()`] with concatenated /// buffers. /// /// This function is usually paired with `writable()`. /// /// [`try_write()`]: Self::try_write() /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result<usize> { self.inner.try_write_vectored(buf) } /// Returns the socket address of the remote half of this connection. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.inner.peer_addr() } /// Returns the socket address of the local half of this connection. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.inner.local_addr() } } impl Drop for OwnedWriteHalf { fn drop(&mut self) { if self.shutdown_on_drop { let _ = self.inner.shutdown_std(Shutdown::Write); } } } impl AsyncWrite for OwnedWriteHalf { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.inner.poll_write_priv(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.inner.poll_write_vectored_priv(cx, bufs) } fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } #[inline] fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { // flush is a no-op Poll::Ready(Ok(())) } // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { let res = self.inner.shutdown_std(Shutdown::Write); if res.is_ok() { Pin::into_inner(self).shutdown_on_drop = false; } res.into() } } impl AsRef<UnixStream> for OwnedReadHalf { fn as_ref(&self) -> &UnixStream { &self.inner } } impl AsRef<UnixStream> for OwnedWriteHalf { fn as_ref(&self) -> &UnixStream { &self.inner } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/listener.rs
tokio/src/net/unix/listener.rs
use crate::io::{Interest, PollEvented}; use crate::net::unix::{SocketAddr, UnixStream}; use crate::util::check_socket_for_blocking; use std::fmt; use std::io; #[cfg(target_os = "android")] use std::os::android::net::SocketAddrExt; #[cfg(target_os = "linux")] use std::os::linux::net::SocketAddrExt; #[cfg(any(target_os = "linux", target_os = "android"))] use std::os::unix::ffi::OsStrExt; use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; use std::os::unix::net::{self, SocketAddr as StdSocketAddr}; use std::path::Path; use std::task::{ready, Context, Poll}; cfg_net_unix! { /// A Unix socket which can accept connections from other Unix sockets. /// /// You can accept a new connection by using the [`accept`](`UnixListener::accept`) method. /// /// A `UnixListener` can be turned into a `Stream` with [`UnixListenerStream`]. /// /// [`UnixListenerStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.UnixListenerStream.html /// /// # Errors /// /// Note that accepting a connection can lead to various errors and not all /// of them are necessarily fatal ‒ for example having too many open file /// descriptors or the other side closing the connection while it waits in /// an accept queue. These would terminate the stream if not handled in any /// way. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixListener; /// /// #[tokio::main] /// async fn main() { /// let listener = UnixListener::bind("/path/to/the/socket").unwrap(); /// loop { /// match listener.accept().await { /// Ok((stream, _addr)) => { /// println!("new client!"); /// } /// Err(e) => { /* connection failed */ } /// } /// } /// } /// ``` #[cfg_attr(docsrs, doc(alias = "uds"))] pub struct UnixListener { io: PollEvented<mio::net::UnixListener>, } } impl UnixListener { pub(crate) fn new(listener: mio::net::UnixListener) -> io::Result<UnixListener> { let io = PollEvented::new(listener)?; Ok(UnixListener { io }) } /// Creates a new `UnixListener` bound to the specified path. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[track_caller] pub fn bind<P>(path: P) -> io::Result<UnixListener> where P: AsRef<Path>, { // For now, we handle abstract socket paths on linux here. #[cfg(any(target_os = "linux", target_os = "android"))] let addr = { let os_str_bytes = path.as_ref().as_os_str().as_bytes(); if os_str_bytes.starts_with(b"\0") { StdSocketAddr::from_abstract_name(&os_str_bytes[1..])? } else { StdSocketAddr::from_pathname(path)? } }; #[cfg(not(any(target_os = "linux", target_os = "android")))] let addr = StdSocketAddr::from_pathname(path)?; let listener = mio::net::UnixListener::bind_addr(&addr)?; let io = PollEvented::new(listener)?; Ok(UnixListener { io }) } /// Creates new [`UnixListener`] from a [`std::os::unix::net::UnixListener`]. /// /// This function is intended to be used to wrap a `UnixListener` from the /// standard library in the Tokio equivalent. /// /// # Notes /// /// The caller is responsible for ensuring that the listener is in /// non-blocking mode. Otherwise all I/O operations on the listener /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. /// /// Passing a listener in blocking mode is always erroneous, /// and the behavior in that case may change in the future. /// For example, it could panic. /// /// [`set_nonblocking`]: std::os::unix::net::UnixListener::set_nonblocking /// /// # Examples /// /// ```no_run /// use tokio::net::UnixListener; /// use std::os::unix::net::UnixListener as StdUnixListener; /// # use std::error::Error; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let std_listener = StdUnixListener::bind("/path/to/the/socket")?; /// std_listener.set_nonblocking(true)?; /// let listener = UnixListener::from_std(std_listener)?; /// # Ok(()) /// # } /// ``` /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[track_caller] pub fn from_std(listener: net::UnixListener) -> io::Result<UnixListener> { check_socket_for_blocking(&listener)?; let listener = mio::net::UnixListener::from_std(listener); let io = PollEvented::new(listener)?; Ok(UnixListener { io }) } /// Turns a [`tokio::net::UnixListener`] into a [`std::os::unix::net::UnixListener`]. /// /// The returned [`std::os::unix::net::UnixListener`] will have nonblocking mode /// set as `true`. Use [`set_nonblocking`] to change the blocking mode if needed. /// /// # Examples /// /// ```rust,no_run /// # use std::error::Error; /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let tokio_listener = tokio::net::UnixListener::bind("/path/to/the/socket")?; /// let std_listener = tokio_listener.into_std()?; /// std_listener.set_nonblocking(false)?; /// # Ok(()) /// # } /// ``` /// /// [`tokio::net::UnixListener`]: UnixListener /// [`std::os::unix::net::UnixListener`]: std::os::unix::net::UnixListener /// [`set_nonblocking`]: fn@std::os::unix::net::UnixListener::set_nonblocking pub fn into_std(self) -> io::Result<std::os::unix::net::UnixListener> { self.io .into_inner() .map(IntoRawFd::into_raw_fd) .map(|raw_fd| unsafe { net::UnixListener::from_raw_fd(raw_fd) }) } /// Returns the local socket address of this listener. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.local_addr().map(SocketAddr) } /// Returns the value of the `SO_ERROR` option. pub fn take_error(&self) -> io::Result<Option<io::Error>> { self.io.take_error() } /// Accepts a new incoming connection to this listener. /// /// # Cancel safety /// /// This method is cancel safe. If the method is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no new connections were /// accepted by this method. pub async fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> { let (mio, addr) = self .io .registration() .async_io(Interest::READABLE, || self.io.accept()) .await?; let addr = SocketAddr(addr); let stream = UnixStream::new(mio)?; Ok((stream, addr)) } /// Polls to accept a new incoming connection to this listener. /// /// If there is no connection to accept, `Poll::Pending` is returned and the /// current task will be notified by a waker. Note that on multiple calls /// to `poll_accept`, only the `Waker` from the `Context` passed to the most /// recent call is scheduled to receive a wakeup. pub fn poll_accept(&self, cx: &mut Context<'_>) -> Poll<io::Result<(UnixStream, SocketAddr)>> { let (sock, addr) = ready!(self.io.registration().poll_read_io(cx, || self.io.accept()))?; let addr = SocketAddr(addr); let sock = UnixStream::new(sock)?; Poll::Ready(Ok((sock, addr))) } } impl TryFrom<std::os::unix::net::UnixListener> for UnixListener { type Error = io::Error; /// Consumes stream, returning the tokio I/O object. /// /// This is equivalent to /// [`UnixListener::from_std(stream)`](UnixListener::from_std). fn try_from(stream: std::os::unix::net::UnixListener) -> io::Result<Self> { Self::from_std(stream) } } impl fmt::Debug for UnixListener { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (*self.io).fmt(f) } } impl AsRawFd for UnixListener { fn as_raw_fd(&self) -> RawFd { self.io.as_raw_fd() } } impl AsFd for UnixListener { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/ucred.rs
tokio/src/net/unix/ucred.rs
use crate::net::unix; /// Credentials of a process. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] pub struct UCred { /// PID (process ID) of the process. pid: Option<unix::pid_t>, /// UID (user ID) of the process. uid: unix::uid_t, /// GID (group ID) of the process. gid: unix::gid_t, } impl UCred { /// Gets UID (user ID) of the process. pub fn uid(&self) -> unix::uid_t { self.uid } /// Gets GID (group ID) of the process. pub fn gid(&self) -> unix::gid_t { self.gid } /// Gets PID (process ID) of the process. /// /// This is only implemented under Linux, Android, iOS, macOS, Solaris, /// Illumos and Cygwin. On other platforms this will always return `None`. pub fn pid(&self) -> Option<unix::pid_t> { self.pid } } #[cfg(any( target_os = "linux", target_os = "redox", target_os = "android", target_os = "openbsd", target_os = "haiku", target_os = "cygwin" ))] pub(crate) use self::impl_linux::get_peer_cred; #[cfg(any(target_os = "netbsd", target_os = "nto"))] pub(crate) use self::impl_netbsd::get_peer_cred; #[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] pub(crate) use self::impl_bsd::get_peer_cred; #[cfg(any( target_os = "macos", target_os = "ios", target_os = "tvos", target_os = "watchos", target_os = "visionos" ))] pub(crate) use self::impl_macos::get_peer_cred; #[cfg(any(target_os = "solaris", target_os = "illumos"))] pub(crate) use self::impl_solaris::get_peer_cred; #[cfg(target_os = "aix")] pub(crate) use self::impl_aix::get_peer_cred; #[cfg(any(target_os = "espidf", target_os = "vita"))] pub(crate) use self::impl_noproc::get_peer_cred; #[cfg(any( target_os = "linux", target_os = "redox", target_os = "android", target_os = "openbsd", target_os = "haiku", target_os = "cygwin" ))] pub(crate) mod impl_linux { use crate::net::unix::{self, UnixStream}; use libc::{c_void, getsockopt, socklen_t, SOL_SOCKET, SO_PEERCRED}; use std::{io, mem}; #[cfg(target_os = "openbsd")] use libc::sockpeercred as ucred; #[cfg(any( target_os = "linux", target_os = "redox", target_os = "android", target_os = "haiku", target_os = "cygwin" ))] use libc::ucred; pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { use std::os::unix::io::AsRawFd; unsafe { let raw_fd = sock.as_raw_fd(); let mut ucred = ucred { pid: 0, uid: 0, gid: 0, }; let ucred_size = mem::size_of::<ucred>(); // These paranoid checks should be optimized-out assert!(mem::size_of::<u32>() <= mem::size_of::<usize>()); assert!(ucred_size <= u32::MAX as usize); let mut ucred_size = ucred_size as socklen_t; let ret = getsockopt( raw_fd, SOL_SOCKET, SO_PEERCRED, &mut ucred as *mut ucred as *mut c_void, &mut ucred_size, ); if ret == 0 && ucred_size as usize == mem::size_of::<ucred>() { Ok(super::UCred { uid: ucred.uid as unix::uid_t, gid: ucred.gid as unix::gid_t, pid: Some(ucred.pid as unix::pid_t), }) } else { Err(io::Error::last_os_error()) } } } } #[cfg(any(target_os = "netbsd", target_os = "nto"))] pub(crate) mod impl_netbsd { use crate::net::unix::{self, UnixStream}; use libc::{c_void, getsockopt, socklen_t, unpcbid, LOCAL_PEEREID, SOL_SOCKET}; use std::io; use std::mem::size_of; use std::os::unix::io::AsRawFd; pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { unsafe { let raw_fd = sock.as_raw_fd(); let mut unpcbid = unpcbid { unp_pid: 0, unp_euid: 0, unp_egid: 0, }; let unpcbid_size = size_of::<unpcbid>(); let mut unpcbid_size = unpcbid_size as socklen_t; let ret = getsockopt( raw_fd, SOL_SOCKET, LOCAL_PEEREID, &mut unpcbid as *mut unpcbid as *mut c_void, &mut unpcbid_size, ); if ret == 0 && unpcbid_size as usize == size_of::<unpcbid>() { Ok(super::UCred { uid: unpcbid.unp_euid as unix::uid_t, gid: unpcbid.unp_egid as unix::gid_t, pid: Some(unpcbid.unp_pid as unix::pid_t), }) } else { Err(io::Error::last_os_error()) } } } } #[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] pub(crate) mod impl_bsd { use crate::net::unix::{self, UnixStream}; use libc::getpeereid; use std::io; use std::mem::MaybeUninit; use std::os::unix::io::AsRawFd; pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { unsafe { let raw_fd = sock.as_raw_fd(); let mut uid = MaybeUninit::uninit(); let mut gid = MaybeUninit::uninit(); let ret = getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); if ret == 0 { Ok(super::UCred { uid: uid.assume_init() as unix::uid_t, gid: gid.assume_init() as unix::gid_t, pid: None, }) } else { Err(io::Error::last_os_error()) } } } } #[cfg(any( target_os = "macos", target_os = "ios", target_os = "tvos", target_os = "watchos", target_os = "visionos" ))] pub(crate) mod impl_macos { use crate::net::unix::{self, UnixStream}; use libc::{c_void, getpeereid, getsockopt, pid_t, LOCAL_PEEREPID, SOL_LOCAL}; use std::io; use std::mem::size_of; use std::mem::MaybeUninit; use std::os::unix::io::AsRawFd; pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { unsafe { let raw_fd = sock.as_raw_fd(); let mut uid = MaybeUninit::uninit(); let mut gid = MaybeUninit::uninit(); let mut pid: MaybeUninit<pid_t> = MaybeUninit::uninit(); let mut pid_size: MaybeUninit<u32> = MaybeUninit::new(size_of::<pid_t>() as u32); if getsockopt( raw_fd, SOL_LOCAL, LOCAL_PEEREPID, pid.as_mut_ptr() as *mut c_void, pid_size.as_mut_ptr(), ) != 0 { return Err(io::Error::last_os_error()); } assert!(pid_size.assume_init() == (size_of::<pid_t>() as u32)); let ret = getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); if ret == 0 { Ok(super::UCred { uid: uid.assume_init() as unix::uid_t, gid: gid.assume_init() as unix::gid_t, pid: Some(pid.assume_init() as unix::pid_t), }) } else { Err(io::Error::last_os_error()) } } } } #[cfg(any(target_os = "solaris", target_os = "illumos"))] pub(crate) mod impl_solaris { use crate::net::unix::{self, UnixStream}; use std::io; use std::os::unix::io::AsRawFd; use std::ptr; pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { unsafe { let raw_fd = sock.as_raw_fd(); let mut cred = ptr::null_mut(); let ret = libc::getpeerucred(raw_fd, &mut cred); if ret == 0 { let uid = libc::ucred_geteuid(cred); let gid = libc::ucred_getegid(cred); let pid = libc::ucred_getpid(cred); libc::ucred_free(cred); Ok(super::UCred { uid: uid as unix::uid_t, gid: gid as unix::gid_t, pid: Some(pid as unix::pid_t), }) } else { Err(io::Error::last_os_error()) } } } } #[cfg(target_os = "aix")] pub(crate) mod impl_aix { use crate::net::unix::UnixStream; use std::io; use std::os::unix::io::AsRawFd; pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { unsafe { let raw_fd = sock.as_raw_fd(); let mut uid = std::mem::MaybeUninit::uninit(); let mut gid = std::mem::MaybeUninit::uninit(); let ret = libc::getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); if ret == 0 { Ok(super::UCred { uid: uid.assume_init(), gid: gid.assume_init(), pid: None, }) } else { Err(io::Error::last_os_error()) } } } } #[cfg(any(target_os = "espidf", target_os = "vita"))] pub(crate) mod impl_noproc { use crate::net::unix::UnixStream; use std::io; pub(crate) fn get_peer_cred(_sock: &UnixStream) -> io::Result<super::UCred> { Ok(super::UCred { uid: 0, gid: 0, pid: None, }) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/mod.rs
tokio/src/net/unix/mod.rs
//! Unix specific network types. // This module does not currently provide any public API, but it was // unintentionally defined as a public module. Hide it from the documentation // instead of changing it to a private module to avoid breakage. #[doc(hidden)] pub mod datagram; pub(crate) mod listener; pub(crate) mod socket; mod split; pub use split::{ReadHalf, WriteHalf}; mod split_owned; pub use split_owned::{OwnedReadHalf, OwnedWriteHalf, ReuniteError}; mod socketaddr; pub use socketaddr::SocketAddr; pub(crate) mod stream; pub(crate) use stream::UnixStream; mod ucred; pub use ucred::UCred; pub mod pipe; /// A type representing user ID. #[allow(non_camel_case_types)] pub type uid_t = u32; /// A type representing group ID. #[allow(non_camel_case_types)] pub type gid_t = u32; /// A type representing process and process group IDs. #[allow(non_camel_case_types)] pub type pid_t = i32;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/pipe.rs
tokio/src/net/unix/pipe.rs
//! Unix pipe types. use crate::io::interest::Interest; use crate::io::{AsyncRead, AsyncWrite, PollEvented, ReadBuf, Ready}; use mio::unix::pipe as mio_pipe; use std::fs::File; use std::io::{self, Read, Write}; use std::os::unix::fs::OpenOptionsExt; use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}; use std::path::Path; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_util! { use bytes::BufMut; } /// Creates a new anonymous Unix pipe. /// /// This function will open a new pipe and associate both pipe ends with the default /// event loop. /// /// If you need to create a pipe for communication with a spawned process, you can /// use [`Stdio::piped()`] instead. /// /// [`Stdio::piped()`]: std::process::Stdio::piped /// /// # Errors /// /// If creating a pipe fails, this function will return with the related OS error. /// /// # Examples /// /// Create a pipe and pass the writing end to a spawned process. /// /// ```no_run /// use tokio::net::unix::pipe; /// use tokio::process::Command; /// # use tokio::io::AsyncReadExt; /// # use std::error::Error; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let (tx, mut rx) = pipe::pipe()?; /// let mut buffer = String::new(); /// /// let status = Command::new("echo") /// .arg("Hello, world!") /// .stdout(tx.into_blocking_fd()?) /// .status(); /// rx.read_to_string(&mut buffer).await?; /// /// assert!(status.await?.success()); /// assert_eq!(buffer, "Hello, world!\n"); /// # Ok(()) /// # } /// ``` /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn pipe() -> io::Result<(Sender, Receiver)> { let (tx, rx) = mio_pipe::new()?; Ok((Sender::from_mio(tx)?, Receiver::from_mio(rx)?)) } /// Options and flags which can be used to configure how a FIFO file is opened. /// /// This builder allows configuring how to create a pipe end from a FIFO file. /// Generally speaking, when using `OpenOptions`, you'll first call [`new`], /// then chain calls to methods to set each option, then call either /// [`open_receiver`] or [`open_sender`], passing the path of the FIFO file you /// are trying to open. This will give you a [`io::Result`] with a pipe end /// inside that you can further operate on. /// /// [`new`]: OpenOptions::new /// [`open_receiver`]: OpenOptions::open_receiver /// [`open_sender`]: OpenOptions::open_sender /// /// # Examples /// /// Opening a pair of pipe ends from a FIFO file: /// /// ```no_run /// use tokio::net::unix::pipe; /// # use std::error::Error; /// /// const FIFO_NAME: &str = "path/to/a/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; /// let tx = pipe::OpenOptions::new().open_sender(FIFO_NAME)?; /// # Ok(()) /// # } /// ``` /// /// Opening a [`Sender`] on Linux when you are sure the file is a FIFO: /// /// ```ignore /// use tokio::net::unix::pipe; /// use nix::{unistd::mkfifo, sys::stat::Mode}; /// # use std::error::Error; /// /// // Our program has exclusive access to this path. /// const FIFO_NAME: &str = "path/to/a/new/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// mkfifo(FIFO_NAME, Mode::S_IRWXU)?; /// let tx = pipe::OpenOptions::new() /// .read_write(true) /// .unchecked(true) /// .open_sender(FIFO_NAME)?; /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct OpenOptions { #[cfg(any(target_os = "linux", target_os = "android"))] read_write: bool, unchecked: bool, } impl OpenOptions { /// Creates a blank new set of options ready for configuration. /// /// All options are initially set to `false`. pub fn new() -> OpenOptions { OpenOptions { #[cfg(any(target_os = "linux", target_os = "android"))] read_write: false, unchecked: false, } } /// Sets the option for read-write access. /// /// This option, when true, will indicate that a FIFO file will be opened /// in read-write access mode. This operation is not defined by the POSIX /// standard and is only guaranteed to work on Linux. /// /// # Examples /// /// Opening a [`Sender`] even if there are no open reading ends: /// /// ```ignore /// use tokio::net::unix::pipe; /// /// let tx = pipe::OpenOptions::new() /// .read_write(true) /// .open_sender("path/to/a/fifo"); /// ``` /// /// Opening a resilient [`Receiver`] i.e. a reading pipe end which will not /// fail with [`UnexpectedEof`] during reading if all writing ends of the /// pipe close the FIFO file. /// /// [`UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof /// /// ```ignore /// use tokio::net::unix::pipe; /// /// let tx = pipe::OpenOptions::new() /// .read_write(true) /// .open_receiver("path/to/a/fifo"); /// ``` #[cfg(any(target_os = "linux", target_os = "android"))] #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] pub fn read_write(&mut self, value: bool) -> &mut Self { self.read_write = value; self } /// Sets the option to skip the check for FIFO file type. /// /// By default, [`open_receiver`] and [`open_sender`] functions will check /// if the opened file is a FIFO file. Set this option to `true` if you are /// sure the file is a FIFO file. /// /// [`open_receiver`]: OpenOptions::open_receiver /// [`open_sender`]: OpenOptions::open_sender /// /// # Examples /// /// ```no_run /// use tokio::net::unix::pipe; /// use nix::{unistd::mkfifo, sys::stat::Mode}; /// # use std::error::Error; /// /// // Our program has exclusive access to this path. /// const FIFO_NAME: &str = "path/to/a/new/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// mkfifo(FIFO_NAME, Mode::S_IRWXU)?; /// let rx = pipe::OpenOptions::new() /// .unchecked(true) /// .open_receiver(FIFO_NAME)?; /// # Ok(()) /// # } /// ``` pub fn unchecked(&mut self, value: bool) -> &mut Self { self.unchecked = value; self } /// Creates a [`Receiver`] from a FIFO file with the options specified by `self`. /// /// This function will open the FIFO file at the specified path, possibly /// check if it is a pipe, and associate the pipe with the default event /// loop for reading. /// /// # Errors /// /// If the file type check fails, this function will fail with `io::ErrorKind::InvalidInput`. /// This function may also fail with other standard OS errors. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn open_receiver<P: AsRef<Path>>(&self, path: P) -> io::Result<Receiver> { let file = self.open(path.as_ref(), PipeEnd::Receiver)?; Receiver::from_file_unchecked(file) } /// Creates a [`Sender`] from a FIFO file with the options specified by `self`. /// /// This function will open the FIFO file at the specified path, possibly /// check if it is a pipe, and associate the pipe with the default event /// loop for writing. /// /// # Errors /// /// If the file type check fails, this function will fail with `io::ErrorKind::InvalidInput`. /// If the file is not opened in read-write access mode and the file is not /// currently open for reading, this function will fail with `ENXIO`. /// This function may also fail with other standard OS errors. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn open_sender<P: AsRef<Path>>(&self, path: P) -> io::Result<Sender> { let file = self.open(path.as_ref(), PipeEnd::Sender)?; Sender::from_file_unchecked(file) } fn open(&self, path: &Path, pipe_end: PipeEnd) -> io::Result<File> { let mut options = std::fs::OpenOptions::new(); options .read(pipe_end == PipeEnd::Receiver) .write(pipe_end == PipeEnd::Sender) .custom_flags(libc::O_NONBLOCK); #[cfg(any(target_os = "linux", target_os = "android"))] if self.read_write { options.read(true).write(true); } let file = options.open(path)?; if !self.unchecked && !is_pipe(file.as_fd())? { return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); } Ok(file) } } impl Default for OpenOptions { fn default() -> OpenOptions { OpenOptions::new() } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] enum PipeEnd { Sender, Receiver, } /// Writing end of a Unix pipe. /// /// It can be constructed from a FIFO file with [`OpenOptions::open_sender`]. /// /// Opening a named pipe for writing involves a few steps. /// Call to [`OpenOptions::open_sender`] might fail with an error indicating /// different things: /// /// * [`io::ErrorKind::NotFound`] - There is no file at the specified path. /// * [`io::ErrorKind::InvalidInput`] - The file exists, but it is not a FIFO. /// * [`ENXIO`] - The file is a FIFO, but no process has it open for reading. /// Sleep for a while and try again. /// * Other OS errors not specific to opening FIFO files. /// /// Opening a `Sender` from a FIFO file should look like this: /// /// ```no_run /// use tokio::net::unix::pipe; /// use tokio::time::{self, Duration}; /// /// const FIFO_NAME: &str = "path/to/a/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// // Wait for a reader to open the file. /// let tx = loop { /// match pipe::OpenOptions::new().open_sender(FIFO_NAME) { /// Ok(tx) => break tx, /// Err(e) if e.raw_os_error() == Some(libc::ENXIO) => {}, /// Err(e) => return Err(e.into()), /// } /// /// time::sleep(Duration::from_millis(50)).await; /// }; /// # Ok(()) /// # } /// ``` /// /// On Linux, it is possible to create a `Sender` without waiting in a sleeping /// loop. This is done by opening a named pipe in read-write access mode with /// `OpenOptions::read_write`. This way, a `Sender` can at the same time hold /// both a writing end and a reading end, and the latter allows to open a FIFO /// without [`ENXIO`] error since the pipe is open for reading as well. /// /// `Sender` cannot be used to read from a pipe, so in practice the read access /// is only used when a FIFO is opened. However, using a `Sender` in read-write /// mode **may lead to lost data**, because written data will be dropped by the /// system as soon as all pipe ends are closed. To avoid lost data you have to /// make sure that a reading end has been opened before dropping a `Sender`. /// /// Note that using read-write access mode with FIFO files is not defined by /// the POSIX standard and it is only guaranteed to work on Linux. /// /// ```ignore /// use tokio::io::AsyncWriteExt; /// use tokio::net::unix::pipe; /// /// const FIFO_NAME: &str = "path/to/a/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let mut tx = pipe::OpenOptions::new() /// .read_write(true) /// .open_sender(FIFO_NAME)?; /// /// // Asynchronously write to the pipe before a reader. /// tx.write_all(b"hello world").await?; /// # Ok(()) /// # } /// ``` /// /// [`ENXIO`]: https://docs.rs/libc/latest/libc/constant.ENXIO.html #[derive(Debug)] pub struct Sender { io: PollEvented<mio_pipe::Sender>, } impl Sender { fn from_mio(mio_tx: mio_pipe::Sender) -> io::Result<Sender> { let io = PollEvented::new_with_interest(mio_tx, Interest::WRITABLE)?; Ok(Sender { io }) } /// Creates a new `Sender` from a [`File`]. /// /// This function is intended to construct a pipe from a [`File`] representing /// a special FIFO file. It will check if the file is a pipe and has write access, /// set it in non-blocking mode and perform the conversion. /// /// # Errors /// /// Fails with `io::ErrorKind::InvalidInput` if the file is not a pipe or it /// does not have write access. Also fails with any standard OS error if it occurs. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_file(file: File) -> io::Result<Sender> { Sender::from_owned_fd(file.into()) } /// Creates a new `Sender` from an [`OwnedFd`]. /// /// This function is intended to construct a pipe from an [`OwnedFd`] representing /// an anonymous pipe or a special FIFO file. It will check if the file descriptor /// is a pipe and has write access, set it in non-blocking mode and perform the /// conversion. /// /// # Errors /// /// Fails with `io::ErrorKind::InvalidInput` if the file descriptor is not a pipe /// or it does not have write access. Also fails with any standard OS error if it /// occurs. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_owned_fd(owned_fd: OwnedFd) -> io::Result<Sender> { if !is_pipe(owned_fd.as_fd())? { return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); } let flags = get_file_flags(owned_fd.as_fd())?; if has_write_access(flags) { set_nonblocking(owned_fd.as_fd(), flags)?; Sender::from_owned_fd_unchecked(owned_fd) } else { Err(io::Error::new( io::ErrorKind::InvalidInput, "not in O_WRONLY or O_RDWR access mode", )) } } /// Creates a new `Sender` from a [`File`] without checking pipe properties. /// /// This function is intended to construct a pipe from a File representing /// a special FIFO file. The conversion assumes nothing about the underlying /// file; it is left up to the user to make sure it is opened with write access, /// represents a pipe and is set in non-blocking mode. /// /// # Examples /// /// ```no_run /// use tokio::net::unix::pipe; /// use std::fs::OpenOptions; /// use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; /// # use std::error::Error; /// /// const FIFO_NAME: &str = "path/to/a/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let file = OpenOptions::new() /// .write(true) /// .custom_flags(libc::O_NONBLOCK) /// .open(FIFO_NAME)?; /// if file.metadata()?.file_type().is_fifo() { /// let tx = pipe::Sender::from_file_unchecked(file)?; /// /* use the Sender */ /// } /// # Ok(()) /// # } /// ``` /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_file_unchecked(file: File) -> io::Result<Sender> { Sender::from_owned_fd_unchecked(file.into()) } /// Creates a new `Sender` from an [`OwnedFd`] without checking pipe properties. /// /// This function is intended to construct a pipe from an [`OwnedFd`] representing /// an anonymous pipe or a special FIFO file. The conversion assumes nothing about /// the underlying pipe; it is left up to the user to make sure that the file /// descriptor represents the writing end of a pipe and the pipe is set in /// non-blocking mode. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_owned_fd_unchecked(owned_fd: OwnedFd) -> io::Result<Sender> { // Safety: OwnedFd represents a valid, open file descriptor. let mio_tx = unsafe { mio_pipe::Sender::from_raw_fd(owned_fd.into_raw_fd()) }; Sender::from_mio(mio_tx) } /// Waits for any of the requested ready states. /// /// This function can be used instead of [`writable()`] to check the returned /// ready set for [`Ready::WRITABLE`] and [`Ready::WRITE_CLOSED`] events. /// /// The function may complete without the pipe being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// [`writable()`]: Self::writable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { let event = self.io.registration().readiness(interest).await?; Ok(event.ready) } /// Waits for the pipe to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with [`try_write()`]. /// /// [`try_write()`]: Self::try_write /// /// # Examples /// /// ```no_run /// use tokio::net::unix::pipe; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Open a writing end of a fifo /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; /// /// loop { /// // Wait for the pipe to be writable /// tx.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match tx.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn writable(&self) -> io::Result<()> { self.ready(Interest::WRITABLE).await?; Ok(()) } /// Polls for write readiness. /// /// If the pipe is not currently ready for writing, this method will /// store a clone of the `Waker` from the provided `Context`. When the pipe /// becomes ready for writing, `Waker::wake` will be called on the waker. /// /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. /// /// This function is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// [`writable`]: Self::writable /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the pipe is not ready for writing. /// * `Poll::Ready(Ok(()))` if the pipe is ready for writing. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_write_ready(cx).map_ok(|_| ()) } /// Tries to write a buffer to the pipe, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. If the length of `buf` is not /// greater than `PIPE_BUF` (an OS constant, 4096 under Linux), then the /// write is guaranteed to be atomic, i.e. either the entire content of /// `buf` will be written or this method will fail with `WouldBlock`. There /// is no such guarantee if `buf` is larger than `PIPE_BUF`. /// /// This function is usually paired with [`writable`]. /// /// [`writable`]: Self::writable /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the pipe is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::unix::pipe; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Open a writing end of a fifo /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; /// /// loop { /// // Wait for the pipe to be writable /// tx.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match tx.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) } /// Tries to write several buffers to the pipe, returning how many bytes /// were written. /// /// Data is written from each buffer in order, with the final buffer read /// from possible being only partially consumed. This method behaves /// equivalently to a single call to [`try_write()`] with concatenated /// buffers. /// /// If the total length of buffers is not greater than `PIPE_BUF` (an OS /// constant, 4096 under Linux), then the write is guaranteed to be atomic, /// i.e. either the entire contents of buffers will be written or this /// method will fail with `WouldBlock`. There is no such guarantee if the /// total length of buffers is greater than `PIPE_BUF`. /// /// This function is usually paired with [`writable`]. /// /// [`try_write()`]: Self::try_write() /// [`writable`]: Self::writable /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the pipe is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::unix::pipe; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Open a writing end of a fifo /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; /// /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; /// /// loop { /// // Wait for the pipe to be writable /// tx.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match tx.try_write_vectored(&bufs) { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) } /// Converts the pipe into an [`OwnedFd`] in blocking mode. /// /// This function will deregister this pipe end from the event loop, set /// it in blocking mode and perform the conversion. pub fn into_blocking_fd(self) -> io::Result<OwnedFd> { let fd = self.into_nonblocking_fd()?; set_blocking(&fd)?; Ok(fd) } /// Converts the pipe into an [`OwnedFd`] in nonblocking mode. /// /// This function will deregister this pipe end from the event loop and /// perform the conversion. The returned file descriptor will be in nonblocking /// mode. pub fn into_nonblocking_fd(self) -> io::Result<OwnedFd> { let mio_pipe = self.io.into_inner()?; // Safety: the pipe is now deregistered from the event loop // and we are the only owner of this pipe end. let owned_fd = unsafe { OwnedFd::from_raw_fd(mio_pipe.into_raw_fd()) }; Ok(owned_fd) } } impl AsyncWrite for Sender { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.io.poll_write(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.io.poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { true } fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } } impl AsRawFd for Sender { fn as_raw_fd(&self) -> RawFd { self.io.as_raw_fd() } } impl AsFd for Sender { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } /// Reading end of a Unix pipe. /// /// It can be constructed from a FIFO file with [`OpenOptions::open_receiver`]. /// /// # Examples /// /// Receiving messages from a named pipe in a loop: /// /// ```no_run /// use tokio::net::unix::pipe; /// use tokio::io::{self, AsyncReadExt}; /// /// const FIFO_NAME: &str = "path/to/a/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let mut rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; /// loop { /// let mut msg = vec![0; 256]; /// match rx.read_exact(&mut msg).await { /// Ok(_) => { /// /* handle the message */ /// } /// Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => { /// // Writing end has been closed, we should reopen the pipe. /// rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; /// } /// Err(e) => return Err(e.into()), /// } /// } /// # } /// ``` /// /// On Linux, you can use a `Receiver` in read-write access mode to implement /// resilient reading from a named pipe. Unlike `Receiver` opened in read-only /// mode, read from a pipe in read-write mode will not fail with `UnexpectedEof` /// when the writing end is closed. This way, a `Receiver` can asynchronously /// wait for the next writer to open the pipe. /// /// You should not use functions waiting for EOF such as [`read_to_end`] with /// a `Receiver` in read-write access mode, since it **may wait forever**. /// `Receiver` in this mode also holds an open writing end, which prevents /// receiving EOF. /// /// To set the read-write access mode you can use `OpenOptions::read_write`. /// Note that using read-write access mode with FIFO files is not defined by /// the POSIX standard and it is only guaranteed to work on Linux. /// /// ```ignore /// use tokio::net::unix::pipe; /// use tokio::io::AsyncReadExt; /// # use std::error::Error; /// /// const FIFO_NAME: &str = "path/to/a/fifo"; /// /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let mut rx = pipe::OpenOptions::new() /// .read_write(true) /// .open_receiver(FIFO_NAME)?; /// loop { /// let mut msg = vec![0; 256]; /// rx.read_exact(&mut msg).await?; /// /* handle the message */ /// } /// # } /// ``` /// /// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end #[derive(Debug)] pub struct Receiver { io: PollEvented<mio_pipe::Receiver>, } impl Receiver { fn from_mio(mio_rx: mio_pipe::Receiver) -> io::Result<Receiver> { let io = PollEvented::new_with_interest(mio_rx, Interest::READABLE)?; Ok(Receiver { io }) } /// Creates a new `Receiver` from a [`File`]. /// /// This function is intended to construct a pipe from a [`File`] representing /// a special FIFO file. It will check if the file is a pipe and has read access, /// set it in non-blocking mode and perform the conversion. /// /// # Errors /// /// Fails with `io::ErrorKind::InvalidInput` if the file is not a pipe or it /// does not have read access. Also fails with any standard OS error if it occurs. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_file(file: File) -> io::Result<Receiver> { Receiver::from_owned_fd(file.into()) } /// Creates a new `Receiver` from an [`OwnedFd`]. /// /// This function is intended to construct a pipe from an [`OwnedFd`] representing /// an anonymous pipe or a special FIFO file. It will check if the file descriptor /// is a pipe and has read access, set it in non-blocking mode and perform the /// conversion. /// /// # Errors /// /// Fails with `io::ErrorKind::InvalidInput` if the file descriptor is not a pipe /// or it does not have read access. Also fails with any standard OS error if it /// occurs. /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_owned_fd(owned_fd: OwnedFd) -> io::Result<Receiver> { if !is_pipe(owned_fd.as_fd())? { return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); } let flags = get_file_flags(owned_fd.as_fd())?; if has_read_access(flags) {
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/socketaddr.rs
tokio/src/net/unix/socketaddr.rs
use std::fmt; use std::path::Path; /// An address associated with a Tokio Unix socket. /// /// This type is a thin wrapper around [`std::os::unix::net::SocketAddr`]. You /// can convert to and from the standard library `SocketAddr` type using the /// [`From`] trait. #[derive(Clone)] pub struct SocketAddr(pub(super) std::os::unix::net::SocketAddr); impl SocketAddr { /// Returns `true` if the address is unnamed. /// /// Documentation reflected in [`SocketAddr`]. /// /// [`SocketAddr`]: std::os::unix::net::SocketAddr pub fn is_unnamed(&self) -> bool { self.0.is_unnamed() } /// Returns the contents of this address if it is a `pathname` address. /// /// Documentation reflected in [`SocketAddr`]. /// /// [`SocketAddr`]: std::os::unix::net::SocketAddr pub fn as_pathname(&self) -> Option<&Path> { self.0.as_pathname() } /// Returns the contents of this address if it is in the abstract namespace. /// /// Documentation reflected in [`SocketAddrExt`]. /// The abstract namespace is a Linux-specific feature. /// /// /// [`SocketAddrExt`]: std::os::linux::net::SocketAddrExt #[cfg(any(target_os = "linux", target_os = "android"))] #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] pub fn as_abstract_name(&self) -> Option<&[u8]> { #[cfg(target_os = "android")] use std::os::android::net::SocketAddrExt; #[cfg(target_os = "linux")] use std::os::linux::net::SocketAddrExt; self.0.as_abstract_name() } } impl fmt::Debug for SocketAddr { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(fmt) } } impl From<std::os::unix::net::SocketAddr> for SocketAddr { fn from(value: std::os::unix::net::SocketAddr) -> Self { SocketAddr(value) } } impl From<SocketAddr> for std::os::unix::net::SocketAddr { fn from(value: SocketAddr) -> Self { value.0 } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/socket.rs
tokio/src/net/unix/socket.rs
use std::io; use std::path::Path; use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; use crate::net::{UnixDatagram, UnixListener, UnixStream}; cfg_net_unix! { /// A Unix socket that has not yet been converted to a [`UnixStream`], [`UnixDatagram`], or /// [`UnixListener`]. /// /// `UnixSocket` wraps an operating system socket and enables the caller to /// configure the socket before establishing a connection or accepting /// inbound connections. The caller is able to set socket option and explicitly /// bind the socket with a socket address. /// /// The underlying socket is closed when the `UnixSocket` value is dropped. /// /// `UnixSocket` should only be used directly if the default configuration used /// by [`UnixStream::connect`], [`UnixDatagram::bind`], and [`UnixListener::bind`] /// does not meet the required use case. /// /// Calling `UnixStream::connect(path)` effectively performs the same function as: /// /// ```no_run /// use tokio::net::UnixSocket; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let dir = tempfile::tempdir().unwrap(); /// let path = dir.path().join("bind_path"); /// let socket = UnixSocket::new_stream()?; /// /// let stream = socket.connect(path).await?; /// /// Ok(()) /// } /// ``` /// /// Calling `UnixDatagram::bind(path)` effectively performs the same function as: /// /// ```no_run /// use tokio::net::UnixSocket; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let dir = tempfile::tempdir().unwrap(); /// let path = dir.path().join("bind_path"); /// let socket = UnixSocket::new_datagram()?; /// socket.bind(path)?; /// /// let datagram = socket.datagram()?; /// /// Ok(()) /// } /// ``` /// /// Calling `UnixListener::bind(path)` effectively performs the same function as: /// /// ```no_run /// use tokio::net::UnixSocket; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let dir = tempfile::tempdir().unwrap(); /// let path = dir.path().join("bind_path"); /// let socket = UnixSocket::new_stream()?; /// socket.bind(path)?; /// /// let listener = socket.listen(1024)?; /// /// Ok(()) /// } /// ``` /// /// Setting socket options not explicitly provided by `UnixSocket` may be done by /// accessing the [`RawFd`]/[`RawSocket`] using [`AsRawFd`]/[`AsRawSocket`] and /// setting the option with a crate like [`socket2`]. /// /// [`RawFd`]: std::os::fd::RawFd /// [`RawSocket`]: https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html /// [`AsRawFd`]: std::os::fd::AsRawFd /// [`AsRawSocket`]: https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html /// [`socket2`]: https://docs.rs/socket2/ #[derive(Debug)] pub struct UnixSocket { inner: socket2::Socket, } } impl UnixSocket { fn ty(&self) -> socket2::Type { self.inner.r#type().unwrap() } /// Creates a new Unix datagram socket. /// /// Calls `socket(2)` with `AF_UNIX` and `SOCK_DGRAM`. /// /// # Returns /// /// On success, the newly created [`UnixSocket`] is returned. If an error is /// encountered, it is returned instead. pub fn new_datagram() -> io::Result<UnixSocket> { UnixSocket::new(socket2::Type::DGRAM) } /// Creates a new Unix stream socket. /// /// Calls `socket(2)` with `AF_UNIX` and `SOCK_STREAM`. /// /// # Returns /// /// On success, the newly created [`UnixSocket`] is returned. If an error is /// encountered, it is returned instead. pub fn new_stream() -> io::Result<UnixSocket> { UnixSocket::new(socket2::Type::STREAM) } fn new(ty: socket2::Type) -> io::Result<UnixSocket> { #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "linux", target_os = "netbsd", target_os = "openbsd" ))] let ty = ty.nonblocking(); let inner = socket2::Socket::new(socket2::Domain::UNIX, ty, None)?; #[cfg(not(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "linux", target_os = "netbsd", target_os = "openbsd" )))] inner.set_nonblocking(true)?; Ok(UnixSocket { inner }) } /// Binds the socket to the given address. /// /// This calls the `bind(2)` operating-system function. pub fn bind(&self, path: impl AsRef<Path>) -> io::Result<()> { let addr = socket2::SockAddr::unix(path)?; self.inner.bind(&addr) } /// Converts the socket into a `UnixListener`. /// /// `backlog` defines the maximum number of pending connections are queued /// by the operating system at any given time. Connection are removed from /// the queue with [`UnixListener::accept`]. When the queue is full, the /// operating-system will start rejecting connections. /// /// Calling this function on a socket created by [`new_datagram`] will return an error. /// /// This calls the `listen(2)` operating-system function, marking the socket /// as a passive socket. /// /// [`new_datagram`]: `UnixSocket::new_datagram` pub fn listen(self, backlog: u32) -> io::Result<UnixListener> { if self.ty() == socket2::Type::DGRAM { return Err(io::Error::new( io::ErrorKind::Other, "listen cannot be called on a datagram socket", )); } self.inner.listen(backlog as i32)?; let mio = { use std::os::unix::io::{FromRawFd, IntoRawFd}; let raw_fd = self.inner.into_raw_fd(); unsafe { mio::net::UnixListener::from_raw_fd(raw_fd) } }; UnixListener::new(mio) } /// Establishes a Unix connection with a peer at the specified socket address. /// /// The `UnixSocket` is consumed. Once the connection is established, a /// connected [`UnixStream`] is returned. If the connection fails, the /// encountered error is returned. /// /// Calling this function on a socket created by [`new_datagram`] will return an error. /// /// This calls the `connect(2)` operating-system function. /// /// [`new_datagram`]: `UnixSocket::new_datagram` pub async fn connect(self, path: impl AsRef<Path>) -> io::Result<UnixStream> { if self.ty() == socket2::Type::DGRAM { return Err(io::Error::new( io::ErrorKind::Other, "connect cannot be called on a datagram socket", )); } let addr = socket2::SockAddr::unix(path)?; if let Err(err) = self.inner.connect(&addr) { if err.raw_os_error() != Some(libc::EINPROGRESS) { return Err(err); } } let mio = { use std::os::unix::io::{FromRawFd, IntoRawFd}; let raw_fd = self.inner.into_raw_fd(); unsafe { mio::net::UnixStream::from_raw_fd(raw_fd) } }; UnixStream::connect_mio(mio).await } /// Converts the socket into a [`UnixDatagram`]. /// /// Calling this function on a socket created by [`new_stream`] will return an error. /// /// [`new_stream`]: `UnixSocket::new_stream` pub fn datagram(self) -> io::Result<UnixDatagram> { if self.ty() == socket2::Type::STREAM { return Err(io::Error::new( io::ErrorKind::Other, "datagram cannot be called on a stream socket", )); } let mio = { use std::os::unix::io::{FromRawFd, IntoRawFd}; let raw_fd = self.inner.into_raw_fd(); unsafe { mio::net::UnixDatagram::from_raw_fd(raw_fd) } }; UnixDatagram::from_mio(mio) } } impl AsRawFd for UnixSocket { fn as_raw_fd(&self) -> RawFd { self.inner.as_raw_fd() } } impl AsFd for UnixSocket { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } impl FromRawFd for UnixSocket { unsafe fn from_raw_fd(fd: RawFd) -> UnixSocket { // Safety: exactly the same safety requirements as the // `FromRawFd::from_raw_fd` trait method. let inner = unsafe { socket2::Socket::from_raw_fd(fd) }; UnixSocket { inner } } } impl IntoRawFd for UnixSocket { fn into_raw_fd(self) -> RawFd { self.inner.into_raw_fd() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/split.rs
tokio/src/net/unix/split.rs
//! `UnixStream` split support. //! //! A `UnixStream` can be split into a read half and a write half with //! `UnixStream::split`. The read half implements `AsyncRead` while the write //! half implements `AsyncWrite`. //! //! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized //! split has no associated overhead and enforces all invariants at the type //! level. use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; use crate::net::UnixStream; use crate::net::unix::SocketAddr; use std::io; use std::net::Shutdown; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_util! { use bytes::BufMut; } /// Borrowed read half of a [`UnixStream`], created by [`split`]. /// /// Reading from a `ReadHalf` is usually done using the convenience methods found on the /// [`AsyncReadExt`] trait. /// /// [`UnixStream`]: UnixStream /// [`split`]: UnixStream::split() /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt #[derive(Debug)] pub struct ReadHalf<'a>(&'a UnixStream); /// Borrowed write half of a [`UnixStream`], created by [`split`]. /// /// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will /// shut down the [`UnixStream`] stream in the write direction. /// /// Writing to an `WriteHalf` is usually done using the convenience methods found /// on the [`AsyncWriteExt`] trait. /// /// [`UnixStream`]: UnixStream /// [`split`]: UnixStream::split() /// [`AsyncWrite`]: trait@crate::io::AsyncWrite /// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt #[derive(Debug)] pub struct WriteHalf<'a>(&'a UnixStream); pub(crate) fn split(stream: &mut UnixStream) -> (ReadHalf<'_>, WriteHalf<'_>) { (ReadHalf(stream), WriteHalf(stream)) } impl ReadHalf<'_> { /// Wait for any of the requested ready states. /// /// This function is usually paired with [`try_read()`]. It can be used instead /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] /// and [`Ready::READ_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`UnixStream::ready`]. /// /// [`try_read()`]: Self::try_read /// [`readable()`]: Self::readable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.0.ready(interest).await } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn readable(&self) -> io::Result<()> { self.0.readable().await } /// Tries to read data from the stream into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. The stream's read half is closed and will no longer yield data. /// 2. The specified buffer was 0 bytes in length. /// /// If the stream is not ready to read data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { self.0.try_read(buf) } cfg_io_util! { /// Tries to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.0.try_read_buf(buf) } } /// Tries to read data from the stream into the provided buffers, returning /// how many bytes were read. /// /// Data is copied to fill each buffer in order, with the final buffer /// written to possibly being only partially filled. This method behaves /// equivalently to a single call to [`try_read()`] with concatenated /// buffers. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_vectored()` is non-blocking, the buffer does not have to be /// stored by the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`try_read()`]: Self::try_read() /// [`readable()`]: Self::readable() /// [`ready()`]: Self::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> { self.0.try_read_vectored(bufs) } /// Returns the socket address of the remote half of this connection. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.0.peer_addr() } /// Returns the socket address of the local half of this connection. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.0.local_addr() } } impl WriteHalf<'_> { /// Waits for any of the requested ready states. /// /// This function is usually paired with [`try_write()`]. It can be used instead /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] /// and [`Ready::WRITE_CLOSED`] events. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// This function is equivalent to [`UnixStream::ready`]. /// /// [`try_write()`]: Self::try_write /// [`writable()`]: Self::writable /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.0.ready(interest).await } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. pub async fn writable(&self) -> io::Result<()> { self.0.writable().await } /// Tries to write a buffer to the stream, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { self.0.try_write(buf) } /// Tries to write several buffers to the stream, returning how many bytes /// were written. /// /// Data is written from each buffer in order, with the final buffer read /// from possible being only partially consumed. This method behaves /// equivalently to a single call to [`try_write()`] with concatenated /// buffers. /// /// This function is usually paired with `writable()`. /// /// [`try_write()`]: Self::try_write() /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result<usize> { self.0.try_write_vectored(buf) } /// Returns the socket address of the remote half of this connection. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.0.peer_addr() } /// Returns the socket address of the local half of this connection. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.0.local_addr() } } impl AsyncRead for ReadHalf<'_> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.0.poll_read_priv(cx, buf) } } impl AsyncWrite for WriteHalf<'_> { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.0.poll_write_priv(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.0.poll_write_vectored_priv(cx, bufs) } fn is_write_vectored(&self) -> bool { self.0.is_write_vectored() } fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { self.0.shutdown_std(Shutdown::Write).into() } } impl AsRef<UnixStream> for ReadHalf<'_> { fn as_ref(&self) -> &UnixStream { self.0 } } impl AsRef<UnixStream> for WriteHalf<'_> { fn as_ref(&self) -> &UnixStream { self.0 } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/datagram/mod.rs
tokio/src/net/unix/datagram/mod.rs
//! Unix datagram types. pub(crate) mod socket;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/unix/datagram/socket.rs
tokio/src/net/unix/datagram/socket.rs
use crate::io::{Interest, PollEvented, ReadBuf, Ready}; use crate::net::unix::SocketAddr; use crate::util::check_socket_for_blocking; use std::fmt; use std::io; use std::net::Shutdown; use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; use std::os::unix::net; use std::path::Path; use std::task::{ready, Context, Poll}; cfg_io_util! { use bytes::BufMut; } cfg_net_unix! { /// An I/O object representing a Unix datagram socket. /// /// A socket can be either named (associated with a filesystem path) or /// unnamed. /// /// This type does not provide a `split` method, because this functionality /// can be achieved by wrapping the socket in an [`Arc`]. Note that you do /// not need a `Mutex` to share the `UnixDatagram` — an `Arc<UnixDatagram>` /// is enough. This is because all of the methods take `&self` instead of /// `&mut self`. /// /// **Note:** named sockets are persisted even after the object is dropped /// and the program has exited, and cannot be reconnected. It is advised /// that you either check for and unlink the existing socket if it exists, /// or use a temporary file that is guaranteed to not already exist. /// /// [`Arc`]: std::sync::Arc /// /// # Examples /// Using named sockets, associated with a filesystem path: /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// use tokio::net::UnixDatagram; /// use tempfile::tempdir; /// /// // We use a temporary directory so that the socket /// // files left by the bound sockets will get cleaned up. /// let tmp = tempdir()?; /// /// // Bind each socket to a filesystem path /// let tx_path = tmp.path().join("tx"); /// let tx = UnixDatagram::bind(&tx_path)?; /// let rx_path = tmp.path().join("rx"); /// let rx = UnixDatagram::bind(&rx_path)?; /// /// let bytes = b"hello world"; /// tx.send_to(bytes, &rx_path).await?; /// /// let mut buf = vec![0u8; 24]; /// let (size, addr) = rx.recv_from(&mut buf).await?; /// /// let dgram = &buf[..size]; /// assert_eq!(dgram, bytes); /// assert_eq!(addr.as_pathname().unwrap(), &tx_path); /// /// # Ok(()) /// # } /// ``` /// /// Using unnamed sockets, created as a pair /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No SOCK_DGRAM for `socketpair` in miri. /// use tokio::net::UnixDatagram; /// /// // Create the pair of sockets /// let (sock1, sock2) = UnixDatagram::pair()?; /// /// // Since the sockets are paired, the paired send/recv /// // functions can be used /// let bytes = b"hello world"; /// sock1.send(bytes).await?; /// /// let mut buff = vec![0u8; 24]; /// let size = sock2.recv(&mut buff).await?; /// /// let dgram = &buff[..size]; /// assert_eq!(dgram, bytes); /// /// # Ok(()) /// # } /// ``` #[cfg_attr(docsrs, doc(alias = "uds"))] pub struct UnixDatagram { io: PollEvented<mio::net::UnixDatagram>, } } impl UnixDatagram { pub(crate) fn from_mio(sys: mio::net::UnixDatagram) -> io::Result<UnixDatagram> { let datagram = UnixDatagram::new(sys)?; if let Some(e) = datagram.io.take_error()? { return Err(e); } Ok(datagram) } /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_recv()` or `try_send()`. It /// can be used to concurrently `recv` / `send` to the same socket on a single /// task without splitting the socket. /// /// The function may complete without the socket being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// Concurrently receive from and send to the socket on the same task /// without splitting. /// /// ```no_run /// use tokio::io::Interest; /// use tokio::net::UnixDatagram; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let dir = tempfile::tempdir().unwrap(); /// let client_path = dir.path().join("client.sock"); /// let server_path = dir.path().join("server.sock"); /// let socket = UnixDatagram::bind(&client_path)?; /// socket.connect(&server_path)?; /// /// loop { /// let ready = socket.ready(Interest::READABLE | Interest::WRITABLE).await?; /// /// if ready.is_readable() { /// let mut data = [0; 1024]; /// match socket.try_recv(&mut data[..]) { /// Ok(n) => { /// println!("received {:?}", &data[..n]); /// } /// // False-positive, continue /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// if ready.is_writable() { /// // Write some data /// match socket.try_send(b"hello world") { /// Ok(n) => { /// println!("sent {} bytes", n); /// } /// // False-positive, continue /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} /// Err(e) => { /// return Err(e); /// } /// } /// } /// } /// } /// ``` pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { let event = self.io.registration().readiness(interest).await?; Ok(event.ready) } /// Waits for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is /// usually paired with `try_send()` or `try_send_to()`. /// /// The function may complete without the socket being writable. This is a /// false-positive and attempting a `try_send()` will return with /// `io::ErrorKind::WouldBlock`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to write that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixDatagram; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let dir = tempfile::tempdir().unwrap(); /// let client_path = dir.path().join("client.sock"); /// let server_path = dir.path().join("server.sock"); /// let socket = UnixDatagram::bind(&client_path)?; /// socket.connect(&server_path)?; /// /// loop { /// // Wait for the socket to be writable /// socket.writable().await?; /// /// // Try to send data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_send(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn writable(&self) -> io::Result<()> { self.ready(Interest::WRITABLE).await?; Ok(()) } /// Polls for write/send readiness. /// /// If the socket is not currently ready for sending, this method will /// store a clone of the `Waker` from the provided `Context`. When the socket /// becomes ready for sending, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_send_ready` or `poll_send`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_recv_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the socket is not ready for writing. /// * `Poll::Ready(Ok(()))` if the socket is ready for writing. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`writable`]: method@Self::writable pub fn poll_send_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_write_ready(cx).map_ok(|_| ()) } /// Waits for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_recv()`. /// /// The function may complete without the socket being readable. This is a /// false-positive and attempting a `try_recv()` will return with /// `io::ErrorKind::WouldBlock`. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read that fails with `WouldBlock` or /// `Poll::Pending`. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixDatagram; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let client_path = dir.path().join("client.sock"); /// let server_path = dir.path().join("server.sock"); /// let socket = UnixDatagram::bind(&client_path)?; /// socket.connect(&server_path)?; /// /// loop { /// // Wait for the socket to be readable /// socket.readable().await?; /// /// // The buffer is **not** included in the async task and will /// // only exist on the stack. /// let mut buf = [0; 1024]; /// /// // Try to recv data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_recv(&mut buf) { /// Ok(n) => { /// println!("GOT {:?}", &buf[..n]); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn readable(&self) -> io::Result<()> { self.ready(Interest::READABLE).await?; Ok(()) } /// Polls for read/receive readiness. /// /// If the socket is not currently ready for receiving, this method will /// store a clone of the `Waker` from the provided `Context`. When the /// socket becomes ready for reading, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_recv_ready`, `poll_recv` or /// `poll_peek`, only the `Waker` from the `Context` passed to the most /// recent call is scheduled to receive a wakeup. (However, /// `poll_send_ready` retains a second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the socket is not ready for reading. /// * `Poll::Ready(Ok(()))` if the socket is ready for reading. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`readable`]: method@Self::readable pub fn poll_recv_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_read_ready(cx).map_ok(|_| ()) } /// Creates a new `UnixDatagram` bound to the specified path. /// /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// use tokio::net::UnixDatagram; /// use tempfile::tempdir; /// /// // We use a temporary directory so that the socket /// // files left by the bound sockets will get cleaned up. /// let tmp = tempdir()?; /// /// // Bind the socket to a filesystem path /// let socket_path = tmp.path().join("socket"); /// let socket = UnixDatagram::bind(&socket_path)?; /// /// # Ok(()) /// # } /// ``` pub fn bind<P>(path: P) -> io::Result<UnixDatagram> where P: AsRef<Path>, { let socket = mio::net::UnixDatagram::bind(path)?; UnixDatagram::new(socket) } /// Creates an unnamed pair of connected sockets. /// /// This function will create a pair of interconnected Unix sockets for /// communicating back and forth between one another. /// /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No SOCK_DGRAM for `socketpair` in miri. /// use tokio::net::UnixDatagram; /// /// // Create the pair of sockets /// let (sock1, sock2) = UnixDatagram::pair()?; /// /// // Since the sockets are paired, the paired send/recv /// // functions can be used /// let bytes = b"hail eris"; /// sock1.send(bytes).await?; /// /// let mut buff = vec![0u8; 24]; /// let size = sock2.recv(&mut buff).await?; /// /// let dgram = &buff[..size]; /// assert_eq!(dgram, bytes); /// /// # Ok(()) /// # } /// ``` pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { let (a, b) = mio::net::UnixDatagram::pair()?; let a = UnixDatagram::new(a)?; let b = UnixDatagram::new(b)?; Ok((a, b)) } /// Creates new [`UnixDatagram`] from a [`std::os::unix::net::UnixDatagram`]. /// /// This function is intended to be used to wrap a `UnixDatagram` from the /// standard library in the Tokio equivalent. /// /// # Notes /// /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. Otherwise all I/O operations on the socket /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. /// /// Passing a listener in blocking mode is always erroneous, /// and the behavior in that case may change in the future. /// For example, it could panic. /// /// [`set_nonblocking`]: std::os::unix::net::UnixDatagram::set_nonblocking /// /// # Panics /// /// This function panics if it is not called from within a runtime with /// IO enabled. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a Tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// use tokio::net::UnixDatagram; /// use std::os::unix::net::UnixDatagram as StdUDS; /// use tempfile::tempdir; /// /// // We use a temporary directory so that the socket /// // files left by the bound sockets will get cleaned up. /// let tmp = tempdir()?; /// /// // Bind the socket to a filesystem path /// let socket_path = tmp.path().join("socket"); /// let std_socket = StdUDS::bind(&socket_path)?; /// std_socket.set_nonblocking(true)?; /// let tokio_socket = UnixDatagram::from_std(std_socket)?; /// /// # Ok(()) /// # } /// ``` #[track_caller] pub fn from_std(datagram: net::UnixDatagram) -> io::Result<UnixDatagram> { check_socket_for_blocking(&datagram)?; let socket = mio::net::UnixDatagram::from_std(datagram); let io = PollEvented::new(socket)?; Ok(UnixDatagram { io }) } /// Turns a [`tokio::net::UnixDatagram`] into a [`std::os::unix::net::UnixDatagram`]. /// /// The returned [`std::os::unix::net::UnixDatagram`] will have nonblocking /// mode set as `true`. Use [`set_nonblocking`] to change the blocking mode /// if needed. /// /// # Examples /// /// ```rust,no_run /// # use std::error::Error; /// # async fn dox() -> Result<(), Box<dyn Error>> { /// let tokio_socket = tokio::net::UnixDatagram::bind("/path/to/the/socket")?; /// let std_socket = tokio_socket.into_std()?; /// std_socket.set_nonblocking(false)?; /// # Ok(()) /// # } /// ``` /// /// [`tokio::net::UnixDatagram`]: UnixDatagram /// [`std::os::unix::net::UnixDatagram`]: std::os::unix::net::UnixDatagram /// [`set_nonblocking`]: fn@std::os::unix::net::UnixDatagram::set_nonblocking pub fn into_std(self) -> io::Result<std::os::unix::net::UnixDatagram> { self.io .into_inner() .map(IntoRawFd::into_raw_fd) .map(|raw_fd| unsafe { std::os::unix::net::UnixDatagram::from_raw_fd(raw_fd) }) } fn new(socket: mio::net::UnixDatagram) -> io::Result<UnixDatagram> { let io = PollEvented::new(socket)?; Ok(UnixDatagram { io }) } /// Creates a new `UnixDatagram` which is not bound to any address. /// /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// use tokio::net::UnixDatagram; /// use tempfile::tempdir; /// /// // Create an unbound socket /// let tx = UnixDatagram::unbound()?; /// /// // Create another, bound socket /// let tmp = tempdir()?; /// let rx_path = tmp.path().join("rx"); /// let rx = UnixDatagram::bind(&rx_path)?; /// /// // Send to the bound socket /// let bytes = b"hello world"; /// tx.send_to(bytes, &rx_path).await?; /// /// let mut buf = vec![0u8; 24]; /// let (size, addr) = rx.recv_from(&mut buf).await?; /// /// let dgram = &buf[..size]; /// assert_eq!(dgram, bytes); /// /// # Ok(()) /// # } /// ``` pub fn unbound() -> io::Result<UnixDatagram> { let socket = mio::net::UnixDatagram::unbound()?; UnixDatagram::new(socket) } /// Connects the socket to the specified address. /// /// The `send` method may be used to send data to the specified address. /// `recv` and `recv_from` will only receive data from that address. /// /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// use tokio::net::UnixDatagram; /// use tempfile::tempdir; /// /// // Create an unbound socket /// let tx = UnixDatagram::unbound()?; /// /// // Create another, bound socket /// let tmp = tempdir()?; /// let rx_path = tmp.path().join("rx"); /// let rx = UnixDatagram::bind(&rx_path)?; /// /// // Connect to the bound socket /// tx.connect(&rx_path)?; /// /// // Send to the bound socket /// let bytes = b"hello world"; /// tx.send(bytes).await?; /// /// let mut buf = vec![0u8; 24]; /// let (size, addr) = rx.recv_from(&mut buf).await?; /// /// let dgram = &buf[..size]; /// assert_eq!(dgram, bytes); /// /// # Ok(()) /// # } /// ``` pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { self.io.connect(path) } /// Sends data on the socket to the socket's peer. /// /// # Cancel safety /// /// This method is cancel safe. If `send` is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that the message was not sent. /// /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No SOCK_DGRAM for `socketpair` in miri. /// use tokio::net::UnixDatagram; /// /// // Create the pair of sockets /// let (sock1, sock2) = UnixDatagram::pair()?; /// /// // Since the sockets are paired, the paired send/recv /// // functions can be used /// let bytes = b"hello world"; /// sock1.send(bytes).await?; /// /// let mut buff = vec![0u8; 24]; /// let size = sock2.recv(&mut buff).await?; /// /// let dgram = &buff[..size]; /// assert_eq!(dgram, bytes); /// /// # Ok(()) /// # } /// ``` pub async fn send(&self, buf: &[u8]) -> io::Result<usize> { self.io .registration() .async_io(Interest::WRITABLE, || self.io.send(buf)) .await } /// Tries to send a datagram to the peer without waiting. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixDatagram; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let dir = tempfile::tempdir().unwrap(); /// let client_path = dir.path().join("client.sock"); /// let server_path = dir.path().join("server.sock"); /// let socket = UnixDatagram::bind(&client_path)?; /// socket.connect(&server_path)?; /// /// loop { /// // Wait for the socket to be writable /// socket.writable().await?; /// /// // Try to send data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_send(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_send(&self, buf: &[u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || self.io.send(buf)) } /// Tries to send a datagram to the peer without waiting. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixDatagram; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let dir = tempfile::tempdir().unwrap(); /// let client_path = dir.path().join("client.sock"); /// let server_path = dir.path().join("server.sock"); /// let socket = UnixDatagram::bind(&client_path)?; /// /// loop { /// // Wait for the socket to be writable /// socket.writable().await?; /// /// // Try to send data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_send_to(b"hello world", &server_path) { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_send_to<P>(&self, buf: &[u8], target: P) -> io::Result<usize> where P: AsRef<Path>, { self.io .registration() .try_io(Interest::WRITABLE, || self.io.send_to(buf, target)) } /// Receives data from the socket. /// /// # Cancel safety /// /// This method is cancel safe. If `recv` is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, it is guaranteed that no messages were received on this /// socket. /// /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No SOCK_DGRAM for `socketpair` in miri. /// use tokio::net::UnixDatagram; /// /// // Create the pair of sockets /// let (sock1, sock2) = UnixDatagram::pair()?; /// /// // Since the sockets are paired, the paired send/recv /// // functions can be used /// let bytes = b"hello world"; /// sock1.send(bytes).await?; /// /// let mut buff = vec![0u8; 24]; /// let size = sock2.recv(&mut buff).await?; /// /// let dgram = &buff[..size]; /// assert_eq!(dgram, bytes); /// /// # Ok(()) /// # } /// ``` pub async fn recv(&self, buf: &mut [u8]) -> io::Result<usize> { self.io .registration() .async_io(Interest::READABLE, || self.io.recv(buf)) .await } /// Tries to receive a datagram from the peer without waiting. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixDatagram; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let client_path = dir.path().join("client.sock"); /// let server_path = dir.path().join("server.sock"); /// let socket = UnixDatagram::bind(&client_path)?; /// socket.connect(&server_path)?; /// /// loop { /// // Wait for the socket to be readable /// socket.readable().await?; /// /// // The buffer is **not** included in the async task and will /// // only exist on the stack. /// let mut buf = [0; 1024]; /// /// // Try to recv data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_recv(&mut buf) { /// Ok(n) => { /// println!("GOT {:?}", &buf[..n]); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_recv(&self, buf: &mut [u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::READABLE, || self.io.recv(buf)) } cfg_io_util! { /// Tries to receive data from the socket without waiting. /// /// This method can be used even if `buf` is uninitialized. /// /// # Examples /// /// ```no_run /// use tokio::net::UnixDatagram; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// // Connect to a peer /// let dir = tempfile::tempdir().unwrap(); /// let client_path = dir.path().join("client.sock"); /// let server_path = dir.path().join("server.sock"); /// let socket = UnixDatagram::bind(&client_path)?; /// /// loop { /// // Wait for the socket to be readable /// socket.readable().await?; /// /// let mut buf = Vec::with_capacity(1024); /// /// // Try to recv data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match socket.try_recv_buf_from(&mut buf) { /// Ok((n, _addr)) => { /// println!("GOT {:?}", &buf[..n]); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_recv_buf_from<B: BufMut>(&self, buf: &mut B) -> io::Result<(usize, SocketAddr)> { let (n, addr) = self.io.registration().try_io(Interest::READABLE, || { let dst = buf.chunk_mut(); let dst = unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; // Safety: We trust `UnixDatagram::recv_from` to have filled up `n` bytes in the // buffer. let (n, addr) = (*self.io).recv_from(dst)?; unsafe { buf.advance_mut(n); } Ok((n, addr)) })?; Ok((n, SocketAddr(addr))) } /// Receives from the socket, advances the /// buffer's internal cursor and returns how many bytes were read and the origin. /// /// This method can be used even if `buf` is uninitialized. /// /// # Examples /// ``` /// # use std::error::Error; /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn Error>> { /// # if cfg!(miri) { return Ok(()); } // No `socket` in miri. /// use tokio::net::UnixDatagram; /// use tempfile::tempdir; /// /// // We use a temporary directory so that the socket /// // files left by the bound sockets will get cleaned up. /// let tmp = tempdir()?; /// /// // Bind each socket to a filesystem path /// let tx_path = tmp.path().join("tx"); /// let tx = UnixDatagram::bind(&tx_path)?; /// let rx_path = tmp.path().join("rx"); /// let rx = UnixDatagram::bind(&rx_path)?; /// /// let bytes = b"hello world"; /// tx.send_to(bytes, &rx_path).await?; /// /// let mut buf = Vec::with_capacity(24); /// let (size, addr) = rx.recv_buf_from(&mut buf).await?; /// /// let dgram = &buf[..size]; /// assert_eq!(dgram, bytes); /// assert_eq!(addr.as_pathname().unwrap(), &tx_path); /// /// # Ok(()) /// # } /// ``` pub async fn recv_buf_from<B: BufMut>(&self, buf: &mut B) -> io::Result<(usize, SocketAddr)> { self.io.registration().async_io(Interest::READABLE, || {
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/windows/mod.rs
tokio/src/net/windows/mod.rs
//! Windows specific network types. pub mod named_pipe;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/net/windows/named_pipe.rs
tokio/src/net/windows/named_pipe.rs
//! Tokio support for [Windows named pipes]. //! //! [Windows named pipes]: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipes use std::ffi::c_void; use std::ffi::OsStr; use std::io::{self, Read, Write}; use std::pin::Pin; use std::ptr; use std::ptr::null_mut; use std::task::{Context, Poll}; use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready}; use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, RawHandle}; cfg_io_util! { use bytes::BufMut; } // Hide imports which are not used when generating documentation. #[cfg(windows)] mod doc { pub(super) use crate::os::windows::ffi::OsStrExt; pub(super) mod windows_sys { pub(crate) use windows_sys::{ Win32::Foundation::*, Win32::Storage::FileSystem::*, Win32::System::Pipes::*, Win32::System::SystemServices::*, }; } pub(super) use mio::windows as mio_windows; } // NB: none of these shows up in public API, so don't document them. #[cfg(not(windows))] mod doc { pub(super) mod mio_windows { pub type NamedPipe = crate::doc::NotDefinedHere; } } use self::doc::*; /// A [Windows named pipe] server. /// /// Accepting client connections involves creating a server with /// [`ServerOptions::create`] and waiting for clients to connect using /// [`NamedPipeServer::connect`]. /// /// To avoid having clients sporadically fail with /// [`std::io::ErrorKind::NotFound`] when they connect to a server, we must /// ensure that at least one server instance is available at all times. This /// means that the typical listen loop for a server is a bit involved, because /// we have to ensure that we never drop a server accidentally while a client /// might connect. /// /// So a correctly implemented server looks like this: /// /// ```no_run /// use std::io; /// use tokio::net::windows::named_pipe::ServerOptions; /// /// const PIPE_NAME: &str = r"\\.\pipe\named-pipe-idiomatic-server"; /// /// # #[tokio::main] async fn main() -> std::io::Result<()> { /// // The first server needs to be constructed early so that clients can /// // be correctly connected. Otherwise calling .wait will cause the client to /// // error. /// // /// // Here we also make use of `first_pipe_instance`, which will ensure that /// // there are no other servers up and running already. /// let mut server = ServerOptions::new() /// .first_pipe_instance(true) /// .create(PIPE_NAME)?; /// /// // Spawn the server loop. /// let server = tokio::spawn(async move { /// loop { /// // Wait for a client to connect. /// server.connect().await?; /// let connected_client = server; /// /// // Construct the next server to be connected before sending the one /// // we already have of onto a task. This ensures that the server /// // isn't closed (after it's done in the task) before a new one is /// // available. Otherwise the client might error with /// // `io::ErrorKind::NotFound`. /// server = ServerOptions::new().create(PIPE_NAME)?; /// /// let client = tokio::spawn(async move { /// /* use the connected client */ /// # Ok::<_, std::io::Error>(()) /// }); /// # if true { break } // needed for type inference to work /// } /// /// Ok::<_, io::Error>(()) /// }); /// /// /* do something else not server related here */ /// # Ok(()) } /// ``` /// /// [Windows named pipe]: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipes #[derive(Debug)] pub struct NamedPipeServer { io: PollEvented<mio_windows::NamedPipe>, } impl NamedPipeServer { /// Constructs a new named pipe server from the specified raw handle. /// /// This function will consume ownership of the handle given, passing /// responsibility for closing the handle to the returned object. /// /// This function is also unsafe as the primitives currently returned have /// the contract that they are the sole owner of the file descriptor they /// are wrapping. Usage of this function could accidentally allow violating /// this contract which can cause memory unsafety in code that relies on it /// being true. /// /// # Errors /// /// This errors if called outside of a [Tokio Runtime], or in a runtime that /// has not [enabled I/O], or if any OS-specific I/O errors occur. /// /// [Tokio Runtime]: crate::runtime::Runtime /// [enabled I/O]: crate::runtime::Builder::enable_io pub unsafe fn from_raw_handle(handle: RawHandle) -> io::Result<Self> { let named_pipe = unsafe { mio_windows::NamedPipe::from_raw_handle(handle) }; Ok(Self { io: PollEvented::new(named_pipe)?, }) } /// Retrieves information about the named pipe the server is associated /// with. /// /// ```no_run /// use tokio::net::windows::named_pipe::{PipeEnd, PipeMode, ServerOptions}; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-info"; /// /// # #[tokio::main] async fn main() -> std::io::Result<()> { /// let server = ServerOptions::new() /// .pipe_mode(PipeMode::Message) /// .max_instances(5) /// .create(PIPE_NAME)?; /// /// let server_info = server.info()?; /// /// assert_eq!(server_info.end, PipeEnd::Server); /// assert_eq!(server_info.mode, PipeMode::Message); /// assert_eq!(server_info.max_instances, 5); /// # Ok(()) } /// ``` pub fn info(&self) -> io::Result<PipeInfo> { // Safety: we're ensuring the lifetime of the named pipe. unsafe { named_pipe_info(self.io.as_raw_handle()) } } /// Enables a named pipe server process to wait for a client process to /// connect to an instance of a named pipe. A client process connects by /// creating a named pipe with the same name. /// /// This corresponds to the [`ConnectNamedPipe`] system call. /// /// # Cancel safety /// /// This method is cancellation safe in the sense that if it is used as the /// event in a [`select!`](crate::select) statement and some other branch /// completes first, then no connection events have been lost. /// /// [`ConnectNamedPipe`]: https://docs.microsoft.com/en-us/windows/win32/api/namedpipeapi/nf-namedpipeapi-connectnamedpipe /// /// # Example /// /// ```no_run /// use tokio::net::windows::named_pipe::ServerOptions; /// /// const PIPE_NAME: &str = r"\\.\pipe\mynamedpipe"; /// /// # #[tokio::main] async fn main() -> std::io::Result<()> { /// let pipe = ServerOptions::new().create(PIPE_NAME)?; /// /// // Wait for a client to connect. /// pipe.connect().await?; /// /// // Use the connected client... /// # Ok(()) } /// ``` pub async fn connect(&self) -> io::Result<()> { match self.io.connect() { Err(e) if e.kind() == io::ErrorKind::WouldBlock => { self.io .registration() .async_io(Interest::WRITABLE, || self.io.connect()) .await } x => x, } } /// Disconnects the server end of a named pipe instance from a client /// process. /// /// ``` /// use tokio::io::AsyncWriteExt; /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; /// use windows_sys::Win32::Foundation::ERROR_PIPE_NOT_CONNECTED; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-disconnect"; /// /// # #[tokio::main] async fn main() -> std::io::Result<()> { /// let server = ServerOptions::new() /// .create(PIPE_NAME)?; /// /// let mut client = ClientOptions::new() /// .open(PIPE_NAME)?; /// /// // Wait for a client to become connected. /// server.connect().await?; /// /// // Forcibly disconnect the client. /// server.disconnect()?; /// /// // Write fails with an OS-specific error after client has been /// // disconnected. /// let e = client.write(b"ping").await.unwrap_err(); /// assert_eq!(e.raw_os_error(), Some(ERROR_PIPE_NOT_CONNECTED as i32)); /// # Ok(()) } /// ``` pub fn disconnect(&self) -> io::Result<()> { self.io.disconnect() } /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_read()` or `try_write()`. It /// can be used to concurrently read / write to the same pipe on a single /// task without splitting the pipe. /// /// The function may complete without the pipe being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// # Examples /// /// Concurrently read and write to the pipe on the same task without /// splitting. /// /// ```no_run /// use tokio::io::Interest; /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-ready"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new() /// .create(PIPE_NAME)?; /// /// loop { /// let ready = server.ready(Interest::READABLE | Interest::WRITABLE).await?; /// /// if ready.is_readable() { /// let mut data = vec![0; 1024]; /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_read(&mut data) { /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// if ready.is_writable() { /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_write(b"hello world") { /// Ok(n) => { /// println!("write {} bytes", n); /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// } /// } /// ``` pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { let event = self.io.registration().readiness(interest).await?; Ok(event.ready) } /// Waits for the pipe to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// # Examples /// /// ```no_run /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-readable"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new() /// .create(PIPE_NAME)?; /// /// let mut msg = vec![0; 1024]; /// /// loop { /// // Wait for the pipe to be readable /// server.readable().await?; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_read(&mut msg) { /// Ok(n) => { /// msg.truncate(n); /// break; /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// println!("GOT = {:?}", msg); /// Ok(()) /// } /// ``` pub async fn readable(&self) -> io::Result<()> { self.ready(Interest::READABLE).await?; Ok(()) } /// Polls for read readiness. /// /// If the pipe is not currently ready for reading, this method will /// store a clone of the `Waker` from the provided `Context`. When the pipe /// becomes ready for reading, `Waker::wake` will be called on the waker. /// /// Note that on multiple calls to `poll_read_ready` or `poll_read`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_write_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the pipe is not ready for reading. /// * `Poll::Ready(Ok(()))` if the pipe is ready for reading. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`readable`]: method@Self::readable pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_read_ready(cx).map_ok(|_| ()) } /// Tries to read data from the pipe into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the pipe but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: NamedPipeServer::readable() /// [`ready()`]: NamedPipeServer::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. The pipe's read half is closed and will no longer yield data. /// 2. The specified buffer was 0 bytes in length. /// /// If the pipe is not ready to read data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-read"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new() /// .create(PIPE_NAME)?; /// /// loop { /// // Wait for the pipe to be readable /// server.readable().await?; /// /// // Creating the buffer **after** the `await` prevents it from /// // being stored in the async task. /// let mut buf = [0; 4096]; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_read(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::READABLE, || (&*self.io).read(buf)) } /// Tries to read data from the pipe into the provided buffers, returning /// how many bytes were read. /// /// Data is copied to fill each buffer in order, with the final buffer /// written to possibly being only partially filled. This method behaves /// equivalently to a single call to [`try_read()`] with concatenated /// buffers. /// /// Receives any pending data from the pipe but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_vectored()` is non-blocking, the buffer does not have to be /// stored by the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`try_read()`]: NamedPipeServer::try_read() /// [`readable()`]: NamedPipeServer::readable() /// [`ready()`]: NamedPipeServer::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the pipe's read half is closed /// and will no longer yield data. If the pipe is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io::{self, IoSliceMut}; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-read-vectored"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new() /// .create(PIPE_NAME)?; /// /// loop { /// // Wait for the pipe to be readable /// server.readable().await?; /// /// // Creating the buffer **after** the `await` prevents it from /// // being stored in the async task. /// let mut buf_a = [0; 512]; /// let mut buf_b = [0; 1024]; /// let mut bufs = [ /// IoSliceMut::new(&mut buf_a), /// IoSliceMut::new(&mut buf_b), /// ]; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_read_vectored(&mut bufs) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> { self.io .registration() .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) } cfg_io_util! { /// Tries to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the pipe but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: NamedPipeServer::readable() /// [`ready()`]: NamedPipeServer::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-readable"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new().create(PIPE_NAME)?; /// /// loop { /// // Wait for the pipe to be readable /// server.readable().await?; /// /// let mut buf = Vec::with_capacity(4096); /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_read_buf(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.io.registration().try_io(Interest::READABLE, || { use std::io::Read; let dst = buf.chunk_mut(); let dst = unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; // Safety: We trust `NamedPipeServer::read` to have filled up `n` bytes in the // buffer. let n = (&*self.io).read(dst)?; unsafe { buf.advance_mut(n); } Ok(n) }) } } /// Waits for the pipe to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Examples /// /// ```no_run /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-writable"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new() /// .create(PIPE_NAME)?; /// /// loop { /// // Wait for the pipe to be writable /// server.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn writable(&self) -> io::Result<()> { self.ready(Interest::WRITABLE).await?; Ok(()) } /// Polls for write readiness. /// /// If the pipe is not currently ready for writing, this method will /// store a clone of the `Waker` from the provided `Context`. When the pipe /// becomes ready for writing, `Waker::wake` will be called on the waker. /// /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the pipe is not ready for writing. /// * `Poll::Ready(Ok(()))` if the pipe is ready for writing. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`writable`]: method@Self::writable pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_write_ready(cx).map_ok(|_| ()) } /// Tries to write a buffer to the pipe, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the pipe is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-write"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new() /// .create(PIPE_NAME)?; /// /// loop { /// // Wait for the pipe to be writable /// server.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) } /// Tries to write several buffers to the pipe, returning how many bytes /// were written. /// /// Data is written from each buffer in order, with the final buffer read /// from possible being only partially consumed. This method behaves /// equivalently to a single call to [`try_write()`] with concatenated /// buffers. /// /// This function is usually paired with `writable()`. /// /// [`try_write()`]: NamedPipeServer::try_write() /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the pipe is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::windows::named_pipe; /// use std::error::Error; /// use std::io; /// /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-write-vectored"; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let server = named_pipe::ServerOptions::new() /// .create(PIPE_NAME)?; /// /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; /// /// loop { /// // Wait for the pipe to be writable /// server.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match server.try_write_vectored(&bufs) { /// Ok(n) => { /// break; /// } /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result<usize> { self.io .registration() .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) } /// Tries to read or write from the pipe using a user-provided IO operation. /// /// If the pipe is ready, the provided closure is called. The closure /// should attempt to perform IO operation from the pipe by manually /// calling the appropriate syscall. If the operation fails because the /// pipe is not actually ready, then the closure should return a /// `WouldBlock` error and the readiness flag is cleared. The return value /// of the closure is then returned by `try_io`. /// /// If the pipe is not ready, then the closure is not called /// and a `WouldBlock` error is returned. /// /// The closure should only return a `WouldBlock` error if it has performed /// an IO operation on the pipe that failed due to the pipe not being /// ready. Returning a `WouldBlock` error in any other situation will /// incorrectly clear the readiness flag, which can cause the pipe to /// behave incorrectly. /// /// The closure should not perform the IO operation using any of the /// methods defined on the Tokio `NamedPipeServer` type, as this will mess with /// the readiness flag and can cause the pipe to behave incorrectly. /// /// This method is not intended to be used with combined interests. /// The closure should perform only one type of IO operation, so it should not /// require more than one ready state. This method may panic or sleep forever /// if it is called with a combined interest. /// /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: NamedPipeServer::readable() /// [`writable()`]: NamedPipeServer::writable() /// [`ready()`]: NamedPipeServer::ready() pub fn try_io<R>( &self, interest: Interest, f: impl FnOnce() -> io::Result<R>, ) -> io::Result<R> { self.io.registration().try_io(interest, f) } /// Reads or writes from the pipe using a user-provided IO operation. /// /// The readiness of the pipe is awaited and when the pipe is ready, /// the provided closure is called. The closure should attempt to perform /// IO operation on the pipe by manually calling the appropriate syscall. /// If the operation fails because the pipe is not actually ready, /// then the closure should return a `WouldBlock` error. In such case the /// readiness flag is cleared and the pipe readiness is awaited again. /// This loop is repeated until the closure returns an `Ok` or an error /// other than `WouldBlock`. /// /// The closure should only return a `WouldBlock` error if it has performed /// an IO operation on the pipe that failed due to the pipe not being /// ready. Returning a `WouldBlock` error in any other situation will /// incorrectly clear the readiness flag, which can cause the pipe to /// behave incorrectly. ///
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/unix.rs
tokio/src/signal/unix.rs
//! Unix-specific types for signal handling. //! //! This module is only defined on Unix platforms and contains the primary //! `Signal` type for receiving notifications of signals. #![cfg(unix)] #![cfg_attr(docsrs, doc(cfg(all(unix, feature = "signal"))))] use crate::runtime::scheduler; use crate::runtime::signal::Handle; use crate::signal::registry::{globals, EventId, EventInfo, Globals, Storage}; use crate::signal::RxFuture; use crate::sync::watch; use mio::net::UnixStream; use std::io::{self, Error, ErrorKind, Write}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Once; use std::task::{Context, Poll}; #[cfg(not(any(target_os = "linux", target_os = "illumos")))] pub(crate) struct OsStorage([SignalInfo; 33]); #[cfg(any(target_os = "linux", target_os = "illumos"))] pub(crate) struct OsStorage(Box<[SignalInfo]>); impl OsStorage { fn get(&self, id: EventId) -> Option<&SignalInfo> { self.0.get(id - 1) } } impl Default for OsStorage { fn default() -> Self { // There are reliable signals ranging from 1 to 33 available on every Unix platform. #[cfg(not(any(target_os = "linux", target_os = "illumos")))] let inner = std::array::from_fn(|_| SignalInfo::default()); // On Linux and illumos, there are additional real-time signals // available. (This is also likely true on Solaris, but this should be // verified before being enabled.) #[cfg(any(target_os = "linux", target_os = "illumos"))] let inner = std::iter::repeat_with(SignalInfo::default) .take(libc::SIGRTMAX() as usize) .collect(); Self(inner) } } impl Storage for OsStorage { fn event_info(&self, id: EventId) -> Option<&EventInfo> { self.get(id).map(|si| &si.event_info) } fn for_each<'a, F>(&'a self, f: F) where F: FnMut(&'a EventInfo), { self.0.iter().map(|si| &si.event_info).for_each(f); } } #[derive(Debug)] pub(crate) struct OsExtraData { sender: UnixStream, pub(crate) receiver: UnixStream, } impl Default for OsExtraData { fn default() -> Self { let (receiver, sender) = UnixStream::pair().expect("failed to create UnixStream"); Self { sender, receiver } } } /// Represents the specific kind of signal to listen for. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct SignalKind(libc::c_int); impl SignalKind { /// Allows for listening to any valid OS signal. /// /// For example, this can be used for listening for platform-specific /// signals. /// ```rust,no_run /// # use tokio::signal::unix::SignalKind; /// # let signum = -1; /// // let signum = libc::OS_SPECIFIC_SIGNAL; /// let kind = SignalKind::from_raw(signum); /// ``` // Use `std::os::raw::c_int` on public API to prevent leaking a non-stable // type alias from libc. // `libc::c_int` and `std::os::raw::c_int` are currently the same type, and are // unlikely to change to other types, but technically libc can change this // in the future minor version. // See https://github.com/tokio-rs/tokio/issues/3767 for more. pub const fn from_raw(signum: std::os::raw::c_int) -> Self { Self(signum as libc::c_int) } /// Get the signal's numeric value. /// /// ```rust /// # use tokio::signal::unix::SignalKind; /// let kind = SignalKind::interrupt(); /// assert_eq!(kind.as_raw_value(), libc::SIGINT); /// ``` pub const fn as_raw_value(&self) -> std::os::raw::c_int { self.0 } /// Represents the `SIGALRM` signal. /// /// On Unix systems this signal is sent when a real-time timer has expired. /// By default, the process is terminated by this signal. pub const fn alarm() -> Self { Self(libc::SIGALRM) } /// Represents the `SIGCHLD` signal. /// /// On Unix systems this signal is sent when the status of a child process /// has changed. By default, this signal is ignored. pub const fn child() -> Self { Self(libc::SIGCHLD) } /// Represents the `SIGHUP` signal. /// /// On Unix systems this signal is sent when the terminal is disconnected. /// By default, the process is terminated by this signal. pub const fn hangup() -> Self { Self(libc::SIGHUP) } /// Represents the `SIGINFO` signal. /// /// On Unix systems this signal is sent to request a status update from the /// process. By default, this signal is ignored. #[cfg(any( target_os = "dragonfly", target_os = "freebsd", target_os = "macos", target_os = "netbsd", target_os = "openbsd", target_os = "illumos" ))] pub const fn info() -> Self { Self(libc::SIGINFO) } /// Represents the `SIGINT` signal. /// /// On Unix systems this signal is sent to interrupt a program. /// By default, the process is terminated by this signal. pub const fn interrupt() -> Self { Self(libc::SIGINT) } #[cfg(target_os = "haiku")] /// Represents the `SIGPOLL` signal. /// /// On POSIX systems this signal is sent when I/O operations are possible /// on some file descriptor. By default, this signal is ignored. pub const fn io() -> Self { Self(libc::SIGPOLL) } #[cfg(not(target_os = "haiku"))] /// Represents the `SIGIO` signal. /// /// On Unix systems this signal is sent when I/O operations are possible /// on some file descriptor. By default, this signal is ignored. pub const fn io() -> Self { Self(libc::SIGIO) } /// Represents the `SIGPIPE` signal. /// /// On Unix systems this signal is sent when the process attempts to write /// to a pipe which has no reader. By default, the process is terminated by /// this signal. pub const fn pipe() -> Self { Self(libc::SIGPIPE) } /// Represents the `SIGQUIT` signal. /// /// On Unix systems this signal is sent to issue a shutdown of the /// process, after which the OS will dump the process core. /// By default, the process is terminated by this signal. pub const fn quit() -> Self { Self(libc::SIGQUIT) } /// Represents the `SIGTERM` signal. /// /// On Unix systems this signal is sent to issue a shutdown of the /// process. By default, the process is terminated by this signal. pub const fn terminate() -> Self { Self(libc::SIGTERM) } /// Represents the `SIGUSR1` signal. /// /// On Unix systems this is a user defined signal. /// By default, the process is terminated by this signal. pub const fn user_defined1() -> Self { Self(libc::SIGUSR1) } /// Represents the `SIGUSR2` signal. /// /// On Unix systems this is a user defined signal. /// By default, the process is terminated by this signal. pub const fn user_defined2() -> Self { Self(libc::SIGUSR2) } /// Represents the `SIGWINCH` signal. /// /// On Unix systems this signal is sent when the terminal window is resized. /// By default, this signal is ignored. pub const fn window_change() -> Self { Self(libc::SIGWINCH) } } impl From<std::os::raw::c_int> for SignalKind { fn from(signum: std::os::raw::c_int) -> Self { Self::from_raw(signum as libc::c_int) } } impl From<SignalKind> for std::os::raw::c_int { fn from(kind: SignalKind) -> Self { kind.as_raw_value() } } pub(crate) struct SignalInfo { event_info: EventInfo, init: Once, initialized: AtomicBool, } impl Default for SignalInfo { fn default() -> SignalInfo { SignalInfo { event_info: EventInfo::default(), init: Once::new(), initialized: AtomicBool::new(false), } } } /// Our global signal handler for all signals registered by this module. /// /// The purpose of this signal handler is to primarily: /// /// 1. Flag that our specific signal was received (e.g. store an atomic flag) /// 2. Wake up the driver by writing a byte to a pipe /// /// Those two operations should both be async-signal safe. fn action(globals: &'static Globals, signal: libc::c_int) { globals.record_event(signal as EventId); // Send a wakeup, ignore any errors (anything reasonably possible is // full pipe and then it will wake up anyway). let mut sender = &globals.sender; drop(sender.write(&[1])); } /// Enables this module to receive signal notifications for the `signal` /// provided. /// /// This will register the signal handler if it hasn't already been registered, /// returning any error along the way if that fails. fn signal_enable(signal: SignalKind, handle: &Handle) -> io::Result<()> { let signal = signal.0; if signal <= 0 || signal_hook_registry::FORBIDDEN.contains(&signal) { return Err(Error::new( ErrorKind::Other, format!("Refusing to register signal {signal}"), )); } // Check that we have a signal driver running handle.check_inner()?; let globals = globals(); let siginfo = match globals.storage().get(signal as EventId) { Some(slot) => slot, None => return Err(io::Error::new(io::ErrorKind::Other, "signal too large")), }; let mut registered = Ok(()); siginfo.init.call_once(|| { registered = unsafe { signal_hook_registry::register(signal, move || action(globals, signal)).map(|_| ()) }; if registered.is_ok() { siginfo.initialized.store(true, Ordering::Relaxed); } }); registered?; // If the call_once failed, it won't be retried on the next attempt to register the signal. In // such case it is not run, registered is still `Ok(())`, initialized is still `false`. if siginfo.initialized.load(Ordering::Relaxed) { Ok(()) } else { Err(Error::new( ErrorKind::Other, "Failed to register signal handler", )) } } /// An listener for receiving a particular type of OS signal. /// /// The listener can be turned into a `Stream` using [`SignalStream`]. /// /// [`SignalStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.SignalStream.html /// /// In general signal handling on Unix is a pretty tricky topic, and this /// structure is no exception! There are some important limitations to keep in /// mind when using `Signal` streams: /// /// * Signals handling in Unix already necessitates coalescing signals /// together sometimes. This `Signal` stream is also no exception here in /// that it will also coalesce signals. That is, even if the signal handler /// for this process runs multiple times, the `Signal` stream may only return /// one signal notification. Specifically, before `poll` is called, all /// signal notifications are coalesced into one item returned from `poll`. /// Once `poll` has been called, however, a further signal is guaranteed to /// be yielded as an item. /// /// Put another way, any element pulled off the returned listener corresponds to /// *at least one* signal, but possibly more. /// /// * Signal handling in general is relatively inefficient. Although some /// improvements are possible in this crate, it's recommended to not plan on /// having millions of signal channels open. /// /// If you've got any questions about this feel free to open an issue on the /// repo! New approaches to alleviate some of these limitations are always /// appreciated! /// /// # Caveats /// /// The first time that a `Signal` instance is registered for a particular /// signal kind, an OS signal-handler is installed which replaces the default /// platform behavior when that signal is received, **for the duration of the /// entire process**. /// /// For example, Unix systems will terminate a process by default when it /// receives `SIGINT`. But, when a `Signal` instance is created to listen for /// this signal, the next `SIGINT` that arrives will be translated to a stream /// event, and the process will continue to execute. **Even if this `Signal` /// instance is dropped, subsequent `SIGINT` deliveries will end up captured by /// Tokio, and the default platform behavior will NOT be reset**. /// /// Thus, applications should take care to ensure the expected signal behavior /// occurs as expected after listening for specific signals. /// /// # Examples /// /// Wait for `SIGHUP` /// /// ```rust,no_run /// use tokio::signal::unix::{signal, SignalKind}; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // An infinite stream of hangup signals. /// let mut sig = signal(SignalKind::hangup())?; /// /// // Print whenever a HUP signal is received /// loop { /// sig.recv().await; /// println!("got signal HUP"); /// } /// } /// ``` #[must_use = "streams do nothing unless polled"] #[derive(Debug)] pub struct Signal { inner: RxFuture, } /// Creates a new listener which will receive notifications when the current /// process receives the specified signal `kind`. /// /// This function will create a new stream which binds to the default reactor. /// The `Signal` stream is an infinite stream which will receive /// notifications whenever a signal is received. More documentation can be /// found on `Signal` itself, but to reiterate: /// /// * Signals may be coalesced beyond what the kernel already does. /// * Once a signal handler is registered with the process the underlying /// libc signal handler is never unregistered. /// /// A `Signal` stream can be created for a particular signal number /// multiple times. When a signal is received then all the associated /// channels will receive the signal notification. /// /// # Errors /// /// * If the lower-level C functions fail for some reason. /// * If the previous initialization of this specific signal failed. /// * If the signal is one of /// [`signal_hook::FORBIDDEN`](fn@signal_hook_registry::register#panics) /// /// # Panics /// /// This function panics if there is no current reactor set, or if the `rt` /// feature flag is not enabled. #[track_caller] pub fn signal(kind: SignalKind) -> io::Result<Signal> { let handle = scheduler::Handle::current(); let rx = signal_with_handle(kind, handle.driver().signal())?; Ok(Signal { inner: RxFuture::new(rx), }) } pub(crate) fn signal_with_handle( kind: SignalKind, handle: &Handle, ) -> io::Result<watch::Receiver<()>> { // Turn the signal delivery on once we are ready for it signal_enable(kind, handle)?; Ok(globals().register_listener(kind.0 as EventId)) } impl Signal { /// Receives the next signal notification event. /// /// `None` is returned if no more events can be received by this stream. /// /// # Cancel safety /// /// This method is cancel safe. If you use it as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no signal is lost. /// /// # Examples /// /// Wait for `SIGHUP` /// /// ```rust,no_run /// use tokio::signal::unix::{signal, SignalKind}; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // An infinite stream of hangup signals. /// let mut stream = signal(SignalKind::hangup())?; /// /// // Print whenever a HUP signal is received /// loop { /// stream.recv().await; /// println!("got signal HUP"); /// } /// } /// ``` pub async fn recv(&mut self) -> Option<()> { self.inner.recv().await } /// Polls to receive the next signal notification event, outside of an /// `async` context. /// /// This method returns: /// /// * `Poll::Pending` if no signals are available but the channel is not /// closed. /// * `Poll::Ready(Some(()))` if a signal is available. /// * `Poll::Ready(None)` if the channel has been closed and all signals /// sent before it was closed have been received. /// /// # Examples /// /// Polling from a manually implemented future /// /// ```rust,no_run /// use std::pin::Pin; /// use std::future::Future; /// use std::task::{Context, Poll}; /// use tokio::signal::unix::Signal; /// /// struct MyFuture { /// signal: Signal, /// } /// /// impl Future for MyFuture { /// type Output = Option<()>; /// /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { /// println!("polling MyFuture"); /// self.signal.poll_recv(cx) /// } /// } /// ``` pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { self.inner.poll_recv(cx) } } // Work around for abstracting streams internally #[cfg(feature = "process")] pub(crate) trait InternalStream { fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>>; } #[cfg(feature = "process")] impl InternalStream for Signal { fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { self.poll_recv(cx) } } pub(crate) fn ctrl_c() -> io::Result<Signal> { signal(SignalKind::interrupt()) } #[cfg(all(test, not(loom)))] mod tests { use super::*; #[test] fn signal_enable_error_on_invalid_input() { let inputs = [-1, 0]; for input in inputs { assert_eq!( signal_enable(SignalKind::from_raw(input), &Handle::default()) .unwrap_err() .kind(), ErrorKind::Other, ); } } #[test] fn signal_enable_error_on_forbidden_input() { let inputs = signal_hook_registry::FORBIDDEN; for &input in inputs { assert_eq!( signal_enable(SignalKind::from_raw(input), &Handle::default()) .unwrap_err() .kind(), ErrorKind::Other, ); } } #[test] fn from_c_int() { assert_eq!(SignalKind::from(2), SignalKind::interrupt()); } #[test] fn into_c_int() { let value: std::os::raw::c_int = SignalKind::interrupt().into(); assert_eq!(value, libc::SIGINT as _); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/windows.rs
tokio/src/signal/windows.rs
//! Windows-specific types for signal handling. //! //! This module is only defined on Windows and allows receiving "ctrl-c", //! "ctrl-break", "ctrl-logoff", "ctrl-shutdown", and "ctrl-close" //! notifications. These events are listened for via the `SetConsoleCtrlHandler` //! function which receives the corresponding `windows_sys` event type. #![cfg(any(windows, docsrs))] #![cfg_attr(docsrs, doc(cfg(all(windows, feature = "signal"))))] use crate::signal::RxFuture; use std::io; use std::task::{Context, Poll}; #[cfg(windows)] #[path = "windows/sys.rs"] mod imp; #[cfg(windows)] pub(crate) use self::imp::{OsExtraData, OsStorage}; // For building documentation on Unix machines when the `docsrs` flag is set. #[cfg(not(windows))] #[path = "windows/stub.rs"] mod imp; /// Creates a new listener which receives "ctrl-c" notifications sent to the /// process. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_c; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-C events. /// let mut signal = ctrl_c()?; /// /// // Print whenever a CTRL-C event is received. /// for countdown in (0..3).rev() { /// signal.recv().await; /// println!("got CTRL-C. {} more to exit", countdown); /// } /// /// Ok(()) /// } /// ``` pub fn ctrl_c() -> io::Result<CtrlC> { Ok(CtrlC { inner: self::imp::ctrl_c()?, }) } /// Represents a listener which receives "ctrl-c" notifications sent to the process /// via `SetConsoleCtrlHandler`. /// /// This event can be turned into a `Stream` using [`CtrlCStream`]. /// /// [`CtrlCStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.CtrlCStream.html /// /// A notification to this process notifies *all* receivers for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, /// then the listener may only receive one item about the two notifications. #[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlC { inner: RxFuture, } impl CtrlC { /// Receives the next signal notification event. /// /// `None` is returned if no more events can be received by the listener. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_c; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// let mut signal = ctrl_c()?; /// /// // Print whenever a CTRL-C event is received. /// for countdown in (0..3).rev() { /// signal.recv().await; /// println!("got CTRL-C. {} more to exit", countdown); /// } /// /// Ok(()) /// } /// ``` pub async fn recv(&mut self) -> Option<()> { self.inner.recv().await } /// Polls to receive the next signal notification event, outside of an /// `async` context. /// /// `None` is returned if no more events can be received. /// /// # Examples /// /// Polling from a manually implemented future /// /// ```rust,no_run /// use std::pin::Pin; /// use std::future::Future; /// use std::task::{Context, Poll}; /// use tokio::signal::windows::CtrlC; /// /// struct MyFuture { /// ctrl_c: CtrlC, /// } /// /// impl Future for MyFuture { /// type Output = Option<()>; /// /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { /// println!("polling MyFuture"); /// self.ctrl_c.poll_recv(cx) /// } /// } /// ``` pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { self.inner.poll_recv(cx) } } /// Represents a listener which receives "ctrl-break" notifications sent to the process /// via `SetConsoleCtrlHandler`. /// /// This listener can be turned into a `Stream` using [`CtrlBreakStream`]. /// /// [`CtrlBreakStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.CtrlBreakStream.html /// /// A notification to this process notifies *all* receivers for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, /// then the listener may only receive one item about the two notifications. #[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlBreak { inner: RxFuture, } impl CtrlBreak { /// Receives the next signal notification event. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_break; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-BREAK events. /// let mut signal = ctrl_break()?; /// /// // Print whenever a CTRL-BREAK event is received. /// loop { /// signal.recv().await; /// println!("got signal CTRL-BREAK"); /// } /// } /// ``` pub async fn recv(&mut self) -> Option<()> { self.inner.recv().await } /// Polls to receive the next signal notification event, outside of an /// `async` context. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// Polling from a manually implemented future /// /// ```rust,no_run /// use std::pin::Pin; /// use std::future::Future; /// use std::task::{Context, Poll}; /// use tokio::signal::windows::CtrlBreak; /// /// struct MyFuture { /// ctrl_break: CtrlBreak, /// } /// /// impl Future for MyFuture { /// type Output = Option<()>; /// /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { /// println!("polling MyFuture"); /// self.ctrl_break.poll_recv(cx) /// } /// } /// ``` pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { self.inner.poll_recv(cx) } } /// Creates a new listener which receives "ctrl-break" notifications sent to the /// process. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_break; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-BREAK events. /// let mut signal = ctrl_break()?; /// /// // Print whenever a CTRL-BREAK event is received. /// loop { /// signal.recv().await; /// println!("got signal CTRL-BREAK"); /// } /// } /// ``` pub fn ctrl_break() -> io::Result<CtrlBreak> { Ok(CtrlBreak { inner: self::imp::ctrl_break()?, }) } /// Creates a new listener which receives "ctrl-close" notifications sent to the /// process. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_close; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-CLOSE events. /// let mut signal = ctrl_close()?; /// /// // Print whenever a CTRL-CLOSE event is received. /// for countdown in (0..3).rev() { /// signal.recv().await; /// println!("got CTRL-CLOSE. {} more to exit", countdown); /// } /// /// Ok(()) /// } /// ``` pub fn ctrl_close() -> io::Result<CtrlClose> { Ok(CtrlClose { inner: self::imp::ctrl_close()?, }) } /// Represents a listener which receives "ctrl-close" notifications sent to the process /// via `SetConsoleCtrlHandler`. /// /// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, /// then the listener may only receive one item about the two notifications. #[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlClose { inner: RxFuture, } impl CtrlClose { /// Receives the next signal notification event. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_close; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-CLOSE events. /// let mut signal = ctrl_close()?; /// /// // Print whenever a CTRL-CLOSE event is received. /// signal.recv().await; /// println!("got CTRL-CLOSE. Cleaning up before exiting"); /// /// Ok(()) /// } /// ``` pub async fn recv(&mut self) -> Option<()> { self.inner.recv().await } /// Polls to receive the next signal notification event, outside of an /// `async` context. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// Polling from a manually implemented future /// /// ```rust,no_run /// use std::pin::Pin; /// use std::future::Future; /// use std::task::{Context, Poll}; /// use tokio::signal::windows::CtrlClose; /// /// struct MyFuture { /// ctrl_close: CtrlClose, /// } /// /// impl Future for MyFuture { /// type Output = Option<()>; /// /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { /// println!("polling MyFuture"); /// self.ctrl_close.poll_recv(cx) /// } /// } /// ``` pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { self.inner.poll_recv(cx) } } /// Creates a new listener which receives "ctrl-shutdown" notifications sent to the /// process. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_shutdown; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-SHUTDOWN events. /// let mut signal = ctrl_shutdown()?; /// /// signal.recv().await; /// println!("got CTRL-SHUTDOWN. Cleaning up before exiting"); /// /// Ok(()) /// } /// ``` pub fn ctrl_shutdown() -> io::Result<CtrlShutdown> { Ok(CtrlShutdown { inner: self::imp::ctrl_shutdown()?, }) } /// Represents a listener which receives "ctrl-shutdown" notifications sent to the process /// via `SetConsoleCtrlHandler`. /// /// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, /// then the listener may only receive one item about the two notifications. #[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlShutdown { inner: RxFuture, } impl CtrlShutdown { /// Receives the next signal notification event. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_shutdown; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-SHUTDOWN events. /// let mut signal = ctrl_shutdown()?; /// /// // Print whenever a CTRL-SHUTDOWN event is received. /// signal.recv().await; /// println!("got CTRL-SHUTDOWN. Cleaning up before exiting"); /// /// Ok(()) /// } /// ``` pub async fn recv(&mut self) -> Option<()> { self.inner.recv().await } /// Polls to receive the next signal notification event, outside of an /// `async` context. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// Polling from a manually implemented future /// /// ```rust,no_run /// use std::pin::Pin; /// use std::future::Future; /// use std::task::{Context, Poll}; /// use tokio::signal::windows::CtrlShutdown; /// /// struct MyFuture { /// ctrl_shutdown: CtrlShutdown, /// } /// /// impl Future for MyFuture { /// type Output = Option<()>; /// /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { /// println!("polling MyFuture"); /// self.ctrl_shutdown.poll_recv(cx) /// } /// } /// ``` pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { self.inner.poll_recv(cx) } } /// Creates a new listener which receives "ctrl-logoff" notifications sent to the /// process. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_logoff; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // A listener of CTRL-LOGOFF events. /// let mut signal = ctrl_logoff()?; /// /// signal.recv().await; /// println!("got CTRL-LOGOFF. Cleaning up before exiting"); /// /// Ok(()) /// } /// ``` pub fn ctrl_logoff() -> io::Result<CtrlLogoff> { Ok(CtrlLogoff { inner: self::imp::ctrl_logoff()?, }) } /// Represents a listener which receives "ctrl-logoff" notifications sent to the process /// via `SetConsoleCtrlHandler`. /// /// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed /// quickly enough. This means that if two notifications are received back-to-back, /// then the listener may only receive one item about the two notifications. #[must_use = "listeners do nothing unless polled"] #[derive(Debug)] pub struct CtrlLogoff { inner: RxFuture, } impl CtrlLogoff { /// Receives the next signal notification event. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal::windows::ctrl_logoff; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn std::error::Error>> { /// // An listener of CTRL-LOGOFF events. /// let mut signal = ctrl_logoff()?; /// /// // Print whenever a CTRL-LOGOFF event is received. /// signal.recv().await; /// println!("got CTRL-LOGOFF. Cleaning up before exiting"); /// /// Ok(()) /// } /// ``` pub async fn recv(&mut self) -> Option<()> { self.inner.recv().await } /// Polls to receive the next signal notification event, outside of an /// `async` context. /// /// `None` is returned if no more events can be received by this listener. /// /// # Examples /// /// Polling from a manually implemented future /// /// ```rust,no_run /// use std::pin::Pin; /// use std::future::Future; /// use std::task::{Context, Poll}; /// use tokio::signal::windows::CtrlLogoff; /// /// struct MyFuture { /// ctrl_logoff: CtrlLogoff, /// } /// /// impl Future for MyFuture { /// type Output = Option<()>; /// /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { /// println!("polling MyFuture"); /// self.ctrl_logoff.poll_recv(cx) /// } /// } /// ``` pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { self.inner.poll_recv(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/registry.rs
tokio/src/signal/registry.rs
use crate::signal::os::{OsExtraData, OsStorage}; use crate::sync::watch; use std::ops; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::OnceLock; pub(crate) type EventId = usize; /// State for a specific event, whether a notification is pending delivery, /// and what listeners are registered. #[derive(Debug)] pub(crate) struct EventInfo { pending: AtomicBool, tx: watch::Sender<()>, } impl Default for EventInfo { fn default() -> Self { let (tx, _rx) = watch::channel(()); Self { pending: AtomicBool::new(false), tx, } } } /// An interface for retrieving the `EventInfo` for a particular `eventId`. pub(crate) trait Storage { /// Gets the `EventInfo` for `id` if it exists. fn event_info(&self, id: EventId) -> Option<&EventInfo>; /// Invokes `f` once for each defined `EventInfo` in this storage. fn for_each<'a, F>(&'a self, f: F) where F: FnMut(&'a EventInfo); } impl Storage for Vec<EventInfo> { fn event_info(&self, id: EventId) -> Option<&EventInfo> { self.get(id) } fn for_each<'a, F>(&'a self, f: F) where F: FnMut(&'a EventInfo), { self.iter().for_each(f); } } /// Manages and distributes event notifications to any registered listeners. /// /// Generic over the underlying storage to allow for domain specific /// optimizations (e.g. `eventIds` may or may not be contiguous). #[derive(Debug)] pub(crate) struct Registry<S> { storage: S, } impl<S> Registry<S> { fn new(storage: S) -> Self { Self { storage } } } impl<S: Storage> Registry<S> { /// Registers a new listener for `event_id`. fn register_listener(&self, event_id: EventId) -> watch::Receiver<()> { self.storage .event_info(event_id) .unwrap_or_else(|| panic!("invalid event_id: {event_id}")) .tx .subscribe() } /// Marks `event_id` as having been delivered, without broadcasting it to /// any listeners. fn record_event(&self, event_id: EventId) { if let Some(event_info) = self.storage.event_info(event_id) { event_info.pending.store(true, Ordering::SeqCst); } } /// Broadcasts all previously recorded events to their respective listeners. /// /// Returns `true` if an event was delivered to at least one listener. fn broadcast(&self) -> bool { let mut did_notify = false; self.storage.for_each(|event_info| { // Any signal of this kind arrived since we checked last? if !event_info.pending.swap(false, Ordering::SeqCst) { return; } // Ignore errors if there are no listeners if event_info.tx.send(()).is_ok() { did_notify = true; } }); did_notify } } pub(crate) struct Globals { extra: OsExtraData, registry: Registry<OsStorage>, } impl ops::Deref for Globals { type Target = OsExtraData; fn deref(&self) -> &Self::Target { &self.extra } } impl Globals { /// Registers a new listener for `event_id`. pub(crate) fn register_listener(&self, event_id: EventId) -> watch::Receiver<()> { self.registry.register_listener(event_id) } /// Marks `event_id` as having been delivered, without broadcasting it to /// any listeners. pub(crate) fn record_event(&self, event_id: EventId) { self.registry.record_event(event_id); } /// Broadcasts all previously recorded events to their respective listeners. /// /// Returns `true` if an event was delivered to at least one listener. pub(crate) fn broadcast(&self) -> bool { self.registry.broadcast() } #[cfg(unix)] pub(crate) fn storage(&self) -> &OsStorage { &self.registry.storage } } fn globals_init() -> Globals where OsExtraData: 'static + Send + Sync + Default, OsStorage: 'static + Send + Sync + Default, { Globals { extra: OsExtraData::default(), registry: Registry::new(OsStorage::default()), } } pub(crate) fn globals() -> &'static Globals where OsExtraData: 'static + Send + Sync + Default, OsStorage: 'static + Send + Sync + Default, { static GLOBALS: OnceLock<Globals> = OnceLock::new(); GLOBALS.get_or_init(globals_init) } #[cfg(all(test, not(loom)))] mod tests { use super::*; use crate::runtime::{self, Runtime}; use crate::sync::{oneshot, watch}; use futures::future; #[test] fn smoke() { let rt = rt(); rt.block_on(async move { let registry = Registry::new(vec![ EventInfo::default(), EventInfo::default(), EventInfo::default(), ]); let first = registry.register_listener(0); let second = registry.register_listener(1); let third = registry.register_listener(2); let (fire, wait) = oneshot::channel(); crate::spawn(async { wait.await.expect("wait failed"); // Record some events which should get coalesced registry.record_event(0); registry.record_event(0); registry.record_event(1); registry.record_event(1); registry.broadcast(); // Yield so the previous broadcast can get received // // This yields many times since the block_on task is only polled every 61 // ticks. for _ in 0..100 { crate::task::yield_now().await; } // Send subsequent signal registry.record_event(0); registry.broadcast(); drop(registry); }); let _ = fire.send(()); let all = future::join3(collect(first), collect(second), collect(third)); let (first_results, second_results, third_results) = all.await; assert_eq!(2, first_results.len()); assert_eq!(1, second_results.len()); assert_eq!(0, third_results.len()); }); } #[test] #[should_panic = "invalid event_id: 1"] fn register_panics_on_invalid_input() { let registry = Registry::new(vec![EventInfo::default()]); registry.register_listener(1); } #[test] fn record_invalid_event_does_nothing() { let registry = Registry::new(vec![EventInfo::default()]); registry.record_event(1302); } #[test] fn broadcast_returns_if_at_least_one_event_fired() { let registry = Registry::new(vec![EventInfo::default(), EventInfo::default()]); registry.record_event(0); assert!(!registry.broadcast()); let first = registry.register_listener(0); let second = registry.register_listener(1); registry.record_event(0); assert!(registry.broadcast()); drop(first); registry.record_event(0); assert!(!registry.broadcast()); drop(second); } fn rt() -> Runtime { runtime::Builder::new_current_thread() .enable_time() .build() .unwrap() } async fn collect(mut rx: watch::Receiver<()>) -> Vec<()> { let mut ret = vec![]; while let Ok(v) = rx.changed().await { ret.push(v); } ret } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/mod.rs
tokio/src/signal/mod.rs
//! Asynchronous signal handling for Tokio. //! //! Note that signal handling is in general a very tricky topic and should be //! used with great care. This crate attempts to implement 'best practice' for //! signal handling, but it should be evaluated for your own applications' needs //! to see if it's suitable. //! //! There are some fundamental limitations of this crate documented on the OS //! specific structures, as well. //! //! # Examples //! //! Print on "ctrl-c" notification. //! //! ```rust,no_run //! use tokio::signal; //! //! #[tokio::main] //! async fn main() -> Result<(), Box<dyn std::error::Error>> { //! signal::ctrl_c().await?; //! println!("ctrl-c received!"); //! Ok(()) //! } //! ``` //! //! Wait for `SIGHUP` on Unix //! //! ```rust,no_run //! # #[cfg(unix)] { //! use tokio::signal::unix::{signal, SignalKind}; //! //! #[tokio::main] //! async fn main() -> Result<(), Box<dyn std::error::Error>> { //! // An infinite stream of hangup signals. //! let mut stream = signal(SignalKind::hangup())?; //! //! // Print whenever a HUP signal is received //! loop { //! stream.recv().await; //! println!("got signal HUP"); //! } //! } //! # } //! ``` use crate::sync::watch::Receiver; use std::task::{Context, Poll}; #[cfg(feature = "signal")] mod ctrl_c; #[cfg(feature = "signal")] pub use ctrl_c::ctrl_c; pub(crate) mod registry; mod os { #[cfg(unix)] pub(crate) use super::unix::{OsExtraData, OsStorage}; #[cfg(windows)] pub(crate) use super::windows::{OsExtraData, OsStorage}; } pub mod unix; pub mod windows; mod reusable_box; use self::reusable_box::ReusableBoxFuture; #[derive(Debug)] struct RxFuture { inner: ReusableBoxFuture<Receiver<()>>, } async fn make_future(mut rx: Receiver<()>) -> Receiver<()> { rx.changed().await.expect("signal sender went away"); rx } impl RxFuture { fn new(rx: Receiver<()>) -> Self { Self { inner: ReusableBoxFuture::new(make_future(rx)), } } async fn recv(&mut self) -> Option<()> { use std::future::poll_fn; poll_fn(|cx| self.poll_recv(cx)).await } fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { match self.inner.poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(rx) => { self.inner.set(make_future(rx)); Poll::Ready(Some(())) } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/ctrl_c.rs
tokio/src/signal/ctrl_c.rs
#[cfg(unix)] use super::unix::{self as os_impl}; #[cfg(windows)] use super::windows::{self as os_impl}; use std::io; /// Completes when a "ctrl-c" notification is sent to the process. /// /// While signals are handled very differently between Unix and Windows, both /// platforms support receiving a signal on "ctrl-c". This function provides a /// portable API for receiving this notification. /// /// Once the returned future is polled, a listener is registered. The future /// will complete on the first received `ctrl-c` **after** the initial call to /// either `Future::poll` or `.await`. /// /// # Caveats /// /// On Unix platforms, the first time that a `Signal` instance is registered for a /// particular signal kind, an OS signal-handler is installed which replaces the /// default platform behavior when that signal is received, **for the duration of /// the entire process**. /// /// For example, Unix systems will terminate a process by default when it /// receives a signal generated by `"CTRL+C"` on the terminal. But, when a /// `ctrl_c` stream is created to listen for this signal, the time it arrives, /// it will be translated to a stream event, and the process will continue to /// execute. **Even if this `Signal` instance is dropped, subsequent `SIGINT` /// deliveries will end up captured by Tokio, and the default platform behavior /// will NOT be reset**. /// /// Thus, applications should take care to ensure the expected signal behavior /// occurs as expected after listening for specific signals. /// /// # Examples /// /// ```rust,no_run /// use tokio::signal; /// /// #[tokio::main] /// async fn main() { /// println!("waiting for ctrl-c"); /// /// signal::ctrl_c().await.expect("failed to listen for event"); /// /// println!("received ctrl-c event"); /// } /// ``` /// /// Listen in the background: /// /// ```rust,no_run /// tokio::spawn(async move { /// tokio::signal::ctrl_c().await.unwrap(); /// // Your handler here /// }); /// ``` pub async fn ctrl_c() -> io::Result<()> { os_impl::ctrl_c()?.recv().await; Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/reusable_box.rs
tokio/src/signal/reusable_box.rs
use std::alloc::Layout; use std::future::Future; use std::panic::AssertUnwindSafe; use std::pin::Pin; use std::ptr::{self, NonNull}; use std::task::{Context, Poll}; use std::{fmt, panic}; /// A reusable `Pin<Box<dyn Future<Output = T> + Send>>`. /// /// This type lets you replace the future stored in the box without /// reallocating when the size and alignment permits this. pub(crate) struct ReusableBoxFuture<T> { boxed: NonNull<dyn Future<Output = T> + Send>, } impl<T> ReusableBoxFuture<T> { /// Create a new `ReusableBoxFuture<T>` containing the provided future. pub(crate) fn new<F>(future: F) -> Self where F: Future<Output = T> + Send + 'static, { let boxed: Box<dyn Future<Output = T> + Send> = Box::new(future); let boxed = Box::into_raw(boxed); // SAFETY: Box::into_raw does not return null pointers. let boxed = unsafe { NonNull::new_unchecked(boxed) }; Self { boxed } } /// Replaces the future currently stored in this box. /// /// This reallocates if and only if the layout of the provided future is /// different from the layout of the currently stored future. pub(crate) fn set<F>(&mut self, future: F) where F: Future<Output = T> + Send + 'static, { if let Err(future) = self.try_set(future) { *self = Self::new(future); } } /// Replaces the future currently stored in this box. /// /// This function never reallocates, but returns an error if the provided /// future has a different size or alignment from the currently stored /// future. pub(crate) fn try_set<F>(&mut self, future: F) -> Result<(), F> where F: Future<Output = T> + Send + 'static, { // SAFETY: The pointer is not dangling. let self_layout = { let dyn_future: &(dyn Future<Output = T> + Send) = unsafe { self.boxed.as_ref() }; Layout::for_value(dyn_future) }; if Layout::new::<F>() == self_layout { // SAFETY: We just checked that the layout of F is correct. unsafe { self.set_same_layout(future); } Ok(()) } else { Err(future) } } /// Sets the current future. /// /// # Safety /// /// This function requires that the layout of the provided future is the /// same as `self.layout`. unsafe fn set_same_layout<F>(&mut self, future: F) where F: Future<Output = T> + Send + 'static, { // Drop the existing future, catching any panics. let result = panic::catch_unwind(AssertUnwindSafe(|| unsafe { ptr::drop_in_place(self.boxed.as_ptr()); })); // Overwrite the future behind the pointer. This is safe because the // allocation was allocated with the same size and alignment as the type F. let self_ptr: *mut F = self.boxed.as_ptr() as *mut F; // SAFETY: The pointer is valid and the layout is exactly same. unsafe { ptr::write(self_ptr, future); } // Update the vtable of self.boxed. The pointer is not null because we // just got it from self.boxed, which is not null. self.boxed = unsafe { NonNull::new_unchecked(self_ptr) }; // If the old future's destructor panicked, resume unwinding. match result { Ok(()) => {} Err(payload) => { panic::resume_unwind(payload); } } } /// Gets a pinned reference to the underlying future. pub(crate) fn get_pin(&mut self) -> Pin<&mut (dyn Future<Output = T> + Send)> { // SAFETY: The user of this box cannot move the box, and we do not move it // either. unsafe { Pin::new_unchecked(self.boxed.as_mut()) } } /// Polls the future stored inside this box. pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<T> { self.get_pin().poll(cx) } } impl<T> Future for ReusableBoxFuture<T> { type Output = T; /// Polls the future stored inside this box. fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> { Pin::into_inner(self).get_pin().poll(cx) } } // The future stored inside ReusableBoxFuture<T> must be Send. unsafe impl<T> Send for ReusableBoxFuture<T> {} // The only method called on self.boxed is poll, which takes &mut self, so this // struct being Sync does not permit any invalid access to the Future, even if // the future is not Sync. unsafe impl<T> Sync for ReusableBoxFuture<T> {} // Just like a Pin<Box<dyn Future>> is always Unpin, so is this type. impl<T> Unpin for ReusableBoxFuture<T> {} impl<T> Drop for ReusableBoxFuture<T> { fn drop(&mut self) { unsafe { drop(Box::from_raw(self.boxed.as_ptr())); } } } impl<T> fmt::Debug for ReusableBoxFuture<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ReusableBoxFuture").finish() } } #[cfg(test)] mod test { use super::ReusableBoxFuture; use futures::future::FutureExt; use std::alloc::Layout; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; #[test] fn test_different_futures() { let fut = async move { 10 }; // Not zero sized! assert_eq!(Layout::for_value(&fut).size(), 1); let mut b = ReusableBoxFuture::new(fut); assert_eq!(b.get_pin().now_or_never(), Some(10)); b.try_set(async move { 20 }) .unwrap_or_else(|_| panic!("incorrect size")); assert_eq!(b.get_pin().now_or_never(), Some(20)); b.try_set(async move { 30 }) .unwrap_or_else(|_| panic!("incorrect size")); assert_eq!(b.get_pin().now_or_never(), Some(30)); } #[test] fn test_different_sizes() { let fut1 = async move { 10 }; let val = [0u32; 1000]; let fut2 = async move { val[0] }; let fut3 = ZeroSizedFuture {}; assert_eq!(Layout::for_value(&fut1).size(), 1); assert_eq!(Layout::for_value(&fut2).size(), 4004); assert_eq!(Layout::for_value(&fut3).size(), 0); let mut b = ReusableBoxFuture::new(fut1); assert_eq!(b.get_pin().now_or_never(), Some(10)); b.set(fut2); assert_eq!(b.get_pin().now_or_never(), Some(0)); b.set(fut3); assert_eq!(b.get_pin().now_or_never(), Some(5)); } struct ZeroSizedFuture {} impl Future for ZeroSizedFuture { type Output = u32; fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<u32> { Poll::Ready(5) } } #[test] fn test_zero_sized() { let fut = ZeroSizedFuture {}; // Zero sized! assert_eq!(Layout::for_value(&fut).size(), 0); let mut b = ReusableBoxFuture::new(fut); assert_eq!(b.get_pin().now_or_never(), Some(5)); assert_eq!(b.get_pin().now_or_never(), Some(5)); b.try_set(ZeroSizedFuture {}) .unwrap_or_else(|_| panic!("incorrect size")); assert_eq!(b.get_pin().now_or_never(), Some(5)); assert_eq!(b.get_pin().now_or_never(), Some(5)); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/windows/stub.rs
tokio/src/signal/windows/stub.rs
//! Stub implementations for the platform API so that rustdoc can build linkable //! documentation on non-windows platforms. use crate::signal::RxFuture; use std::io; pub(super) fn ctrl_break() -> io::Result<RxFuture> { panic!() } pub(super) fn ctrl_close() -> io::Result<RxFuture> { panic!() } pub(super) fn ctrl_c() -> io::Result<RxFuture> { panic!() } pub(super) fn ctrl_logoff() -> io::Result<RxFuture> { panic!() } pub(super) fn ctrl_shutdown() -> io::Result<RxFuture> { panic!() }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/signal/windows/sys.rs
tokio/src/signal/windows/sys.rs
use std::io; use std::sync::Once; use crate::signal::registry::{globals, EventId, EventInfo, Storage}; use crate::signal::RxFuture; use windows_sys::core::BOOL; use windows_sys::Win32::System::Console as console; pub(super) fn ctrl_break() -> io::Result<RxFuture> { new(console::CTRL_BREAK_EVENT) } pub(super) fn ctrl_close() -> io::Result<RxFuture> { new(console::CTRL_CLOSE_EVENT) } pub(super) fn ctrl_c() -> io::Result<RxFuture> { new(console::CTRL_C_EVENT) } pub(super) fn ctrl_logoff() -> io::Result<RxFuture> { new(console::CTRL_LOGOFF_EVENT) } pub(super) fn ctrl_shutdown() -> io::Result<RxFuture> { new(console::CTRL_SHUTDOWN_EVENT) } fn new(signum: u32) -> io::Result<RxFuture> { global_init()?; let rx = globals().register_listener(signum as EventId); Ok(RxFuture::new(rx)) } fn event_requires_infinite_sleep_in_handler(signum: u32) -> bool { // Returning from the handler function of those events immediately terminates the process. // So for async systems, the easiest solution is to simply never return from // the handler function. // // For more information, see: // https://learn.microsoft.com/en-us/windows/console/handlerroutine#remarks match signum { console::CTRL_CLOSE_EVENT => true, console::CTRL_LOGOFF_EVENT => true, console::CTRL_SHUTDOWN_EVENT => true, _ => false, } } #[derive(Debug, Default)] pub(crate) struct OsStorage { ctrl_break: EventInfo, ctrl_close: EventInfo, ctrl_c: EventInfo, ctrl_logoff: EventInfo, ctrl_shutdown: EventInfo, } impl Storage for OsStorage { fn event_info(&self, id: EventId) -> Option<&EventInfo> { match u32::try_from(id) { Ok(console::CTRL_BREAK_EVENT) => Some(&self.ctrl_break), Ok(console::CTRL_CLOSE_EVENT) => Some(&self.ctrl_close), Ok(console::CTRL_C_EVENT) => Some(&self.ctrl_c), Ok(console::CTRL_LOGOFF_EVENT) => Some(&self.ctrl_logoff), Ok(console::CTRL_SHUTDOWN_EVENT) => Some(&self.ctrl_shutdown), _ => None, } } fn for_each<'a, F>(&'a self, mut f: F) where F: FnMut(&'a EventInfo), { f(&self.ctrl_break); f(&self.ctrl_close); f(&self.ctrl_c); f(&self.ctrl_logoff); f(&self.ctrl_shutdown); } } #[derive(Debug, Default)] pub(crate) struct OsExtraData {} fn global_init() -> io::Result<()> { static INIT: Once = Once::new(); let mut init = None; INIT.call_once(|| unsafe { let rc = console::SetConsoleCtrlHandler(Some(handler), 1); let ret = if rc == 0 { Err(io::Error::last_os_error()) } else { Ok(()) }; init = Some(ret); }); init.unwrap_or_else(|| Ok(())) } unsafe extern "system" fn handler(ty: u32) -> BOOL { let globals = globals(); globals.record_event(ty as EventId); // According to https://docs.microsoft.com/en-us/windows/console/handlerroutine // the handler routine is always invoked in a new thread, thus we don't // have the same restrictions as in Unix signal handlers, meaning we can // go ahead and perform the broadcast here. let event_was_handled = globals.broadcast(); if event_was_handled && event_requires_infinite_sleep_in_handler(ty) { loop { std::thread::park(); } } if event_was_handled { 1 } else { // No one is listening for this notification any more // let the OS fire the next (possibly the default) handler. 0 } } #[cfg(all(test, not(loom)))] mod tests { use super::*; use crate::runtime::Runtime; use tokio_test::{assert_ok, assert_pending, assert_ready_ok, task}; unsafe fn raise_event(signum: u32) { if event_requires_infinite_sleep_in_handler(signum) { // Those events will enter an infinite loop in `handler`, so // we need to run them on a separate thread std::thread::spawn(move || unsafe { super::handler(signum) }); } else { unsafe { super::handler(signum) }; } } #[test] fn ctrl_c() { let rt = rt(); let _enter = rt.enter(); let mut ctrl_c = task::spawn(crate::signal::ctrl_c()); assert_pending!(ctrl_c.poll()); // Windows doesn't have a good programmatic way of sending events // like sending signals on Unix, so we'll stub out the actual OS // integration and test that our handling works. unsafe { raise_event(console::CTRL_C_EVENT); } assert_ready_ok!(ctrl_c.poll()); } #[test] fn ctrl_break() { let rt = rt(); rt.block_on(async { let mut ctrl_break = assert_ok!(crate::signal::windows::ctrl_break()); // Windows doesn't have a good programmatic way of sending events // like sending signals on Unix, so we'll stub out the actual OS // integration and test that our handling works. unsafe { raise_event(console::CTRL_BREAK_EVENT); } ctrl_break.recv().await.unwrap(); }); } #[test] fn ctrl_close() { let rt = rt(); rt.block_on(async { let mut ctrl_close = assert_ok!(crate::signal::windows::ctrl_close()); // Windows doesn't have a good programmatic way of sending events // like sending signals on Unix, so we'll stub out the actual OS // integration and test that our handling works. unsafe { raise_event(console::CTRL_CLOSE_EVENT); } ctrl_close.recv().await.unwrap(); }); } #[test] fn ctrl_shutdown() { let rt = rt(); rt.block_on(async { let mut ctrl_shutdown = assert_ok!(crate::signal::windows::ctrl_shutdown()); // Windows doesn't have a good programmatic way of sending events // like sending signals on Unix, so we'll stub out the actual OS // integration and test that our handling works. unsafe { raise_event(console::CTRL_SHUTDOWN_EVENT); } ctrl_shutdown.recv().await.unwrap(); }); } #[test] fn ctrl_logoff() { let rt = rt(); rt.block_on(async { let mut ctrl_logoff = assert_ok!(crate::signal::windows::ctrl_logoff()); // Windows doesn't have a good programmatic way of sending events // like sending signals on Unix, so we'll stub out the actual OS // integration and test that our handling works. unsafe { raise_event(console::CTRL_LOGOFF_EVENT); } ctrl_logoff.recv().await.unwrap(); }); } fn rt() -> Runtime { crate::runtime::Builder::new_current_thread() .build() .unwrap() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/blocking.rs
tokio/src/task/blocking.rs
use crate::task::JoinHandle; cfg_rt_multi_thread! { /// Runs the provided blocking function on the current thread without /// blocking the executor. /// /// In general, issuing a blocking call or performing a lot of compute in a /// future without yielding is problematic, as it may prevent the executor /// from driving other tasks forward. Calling this function informs the /// executor that the currently executing task is about to block the thread, /// so the executor is able to hand off any other tasks it has to a new /// worker thread before that happens. See the [CPU-bound tasks and blocking /// code][blocking] section for more information. /// /// Be aware that although this function avoids starving other independently /// spawned tasks, any other code running concurrently in the same task will /// be suspended during the call to `block_in_place`. This can happen e.g. /// when using the [`join!`] macro. To avoid this issue, use /// [`spawn_blocking`] instead of `block_in_place`. /// /// Note that this function cannot be used within a [`current_thread`] runtime /// because in this case there are no other worker threads to hand off tasks /// to. On the other hand, calling the function outside a runtime is /// allowed. In this case, `block_in_place` just calls the provided closure /// normally. /// /// Code running behind `block_in_place` cannot be cancelled. When you shut /// down the executor, it will wait indefinitely for all blocking operations /// to finish. You can use [`shutdown_timeout`] to stop waiting for them /// after a certain timeout. Be aware that this will still not cancel the /// tasks — they are simply allowed to keep running after the method /// returns. /// /// [blocking]: ../index.html#cpu-bound-tasks-and-blocking-code /// [`spawn_blocking`]: fn@crate::task::spawn_blocking /// [`join!`]: macro@join /// [`thread::spawn`]: fn@std::thread::spawn /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout /// /// # Examples /// /// ``` /// use tokio::task; /// /// # async fn docs() { /// task::block_in_place(move || { /// // do some compute-heavy work or call synchronous code /// }); /// # } /// ``` /// /// Code running inside `block_in_place` may use `block_on` to reenter the /// async context. /// /// ``` /// use tokio::task; /// use tokio::runtime::Handle; /// /// # async fn docs() { /// task::block_in_place(move || { /// Handle::current().block_on(async move { /// // do something async /// }); /// }); /// # } /// ``` /// /// # Panics /// /// This function panics if called from a [`current_thread`] runtime. /// /// [`current_thread`]: fn@crate::runtime::Builder::new_current_thread #[track_caller] pub fn block_in_place<F, R>(f: F) -> R where F: FnOnce() -> R, { crate::runtime::scheduler::block_in_place(f) } } cfg_rt! { /// Runs the provided closure on a thread where blocking is acceptable. /// /// In general, issuing a blocking call or performing a lot of compute in a /// future without yielding is problematic, as it may prevent the executor from /// driving other futures forward. This function runs the provided closure on a /// thread dedicated to blocking operations. See the [CPU-bound tasks and /// blocking code][blocking] section for more information. /// /// Tokio will spawn more blocking threads when they are requested through this /// function until the upper limit configured on the [`Builder`] is reached. /// After reaching the upper limit, the tasks are put in a queue. /// The thread limit is very large by default, because `spawn_blocking` is often /// used for various kinds of IO operations that cannot be performed /// asynchronously. When you run CPU-bound code using `spawn_blocking`, you /// should keep this large upper limit in mind. When running many CPU-bound /// computations, a semaphore or some other synchronization primitive should be /// used to limit the number of computation executed in parallel. Specialized /// CPU-bound executors, such as [rayon], may also be a good fit. /// /// This function is intended for non-async operations that eventually finish on /// their own. If you want to spawn an ordinary thread, you should use /// [`thread::spawn`] instead. /// /// Be aware that tasks spawned using `spawn_blocking` cannot be aborted /// because they are not async. If you call [`abort`] on a `spawn_blocking` /// task, then this *will not have any effect*, and the task will continue /// running normally. The exception is if the task has not started running /// yet; in that case, calling `abort` may prevent the task from starting. /// /// When you shut down the executor, it will attempt to `abort` all tasks /// including `spawn_blocking` tasks. However, `spawn_blocking` tasks /// cannot be aborted once they start running, which means that runtime /// shutdown will wait indefinitely for all started `spawn_blocking` to /// finish running. You can use [`shutdown_timeout`] to stop waiting for /// them after a certain timeout. Be aware that this will still not cancel /// the tasks — they are simply allowed to keep running after the method /// returns. It is possible for a blocking task to be cancelled if it has /// not yet started running, but this is not guaranteed. /// /// Note that if you are using the single threaded runtime, this function will /// still spawn additional threads for blocking operations. The current-thread /// scheduler's single thread is only used for asynchronous code. /// /// # Related APIs and patterns for bridging asynchronous and blocking code /// /// In simple cases, it is sufficient to have the closure accept input /// parameters at creation time and return a single value (or struct/tuple, etc.). /// /// For more complex situations in which it is desirable to stream data to or from /// the synchronous context, the [`mpsc channel`] has `blocking_send` and /// `blocking_recv` methods for use in non-async code such as the thread created /// by `spawn_blocking`. /// /// Another option is [`SyncIoBridge`] for cases where the synchronous context /// is operating on byte streams. For example, you might use an asynchronous /// HTTP client such as [hyper] to fetch data, but perform complex parsing /// of the payload body using a library written for synchronous I/O. /// /// Finally, see also [Bridging with sync code][bridgesync] for discussions /// around the opposite case of using Tokio as part of a larger synchronous /// codebase. /// /// [`Builder`]: struct@crate::runtime::Builder /// [blocking]: ../index.html#cpu-bound-tasks-and-blocking-code /// [rayon]: https://docs.rs/rayon /// [`mpsc channel`]: crate::sync::mpsc /// [`SyncIoBridge`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.SyncIoBridge.html /// [hyper]: https://docs.rs/hyper /// [`thread::spawn`]: fn@std::thread::spawn /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout /// [bridgesync]: https://tokio.rs/tokio/topics/bridging /// [`AtomicBool`]: struct@std::sync::atomic::AtomicBool /// [`abort`]: crate::task::JoinHandle::abort /// /// # Examples /// /// Pass an input value and receive result of computation: /// /// ``` /// use tokio::task; /// /// # async fn docs() -> Result<(), Box<dyn std::error::Error>>{ /// // Initial input /// let mut v = "Hello, ".to_string(); /// let res = task::spawn_blocking(move || { /// // Stand-in for compute-heavy work or using synchronous APIs /// v.push_str("world"); /// // Pass ownership of the value back to the asynchronous context /// v /// }).await?; /// /// // `res` is the value returned from the thread /// assert_eq!(res.as_str(), "Hello, world"); /// # Ok(()) /// # } /// ``` /// /// Use a channel: /// /// ``` /// use tokio::task; /// use tokio::sync::mpsc; /// /// # async fn docs() { /// let (tx, mut rx) = mpsc::channel(2); /// let start = 5; /// let worker = task::spawn_blocking(move || { /// for x in 0..10 { /// // Stand in for complex computation /// tx.blocking_send(start + x).unwrap(); /// } /// }); /// /// let mut acc = 0; /// while let Some(v) = rx.recv().await { /// acc += v; /// } /// assert_eq!(acc, 95); /// worker.await.unwrap(); /// # } /// ``` #[track_caller] pub fn spawn_blocking<F, R>(f: F) -> JoinHandle<R> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { crate::runtime::spawn_blocking(f) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/local.rs
tokio/src/task/local.rs
//! Runs `!Send` futures on the current thread. use crate::loom::cell::UnsafeCell; use crate::loom::sync::{Arc, Mutex}; use crate::runtime; use crate::runtime::task::{ self, JoinHandle, LocalOwnedTasks, SpawnLocation, Task, TaskHarnessScheduleHooks, }; use crate::runtime::{context, ThreadId, BOX_FUTURE_THRESHOLD}; use crate::sync::AtomicWaker; use crate::util::trace::SpawnMeta; use crate::util::RcCell; use std::cell::Cell; use std::collections::VecDeque; use std::fmt; use std::future::Future; use std::marker::PhantomData; use std::mem; use std::pin::Pin; use std::rc::Rc; use std::task::Poll; use pin_project_lite::pin_project; cfg_rt! { /// A set of tasks which are executed on the same thread. /// /// In some cases, it is necessary to run one or more futures that do not /// implement [`Send`] and thus are unsafe to send between threads. In these /// cases, a [local task set] may be used to schedule one or more `!Send` /// futures to run together on the same thread. /// /// For example, the following code will not compile: /// /// ```rust,compile_fail /// use std::rc::Rc; /// /// #[tokio::main] /// async fn main() { /// // `Rc` does not implement `Send`, and thus may not be sent between /// // threads safely. /// let nonsend_data = Rc::new("my nonsend data..."); /// /// let nonsend_data = nonsend_data.clone(); /// // Because the `async` block here moves `nonsend_data`, the future is `!Send`. /// // Since `tokio::spawn` requires the spawned future to implement `Send`, this /// // will not compile. /// tokio::spawn(async move { /// println!("{}", nonsend_data); /// // ... /// }).await.unwrap(); /// } /// ``` /// /// # Use with `run_until` /// /// To spawn `!Send` futures, we can use a local task set to schedule them /// on the thread calling [`Runtime::block_on`]. When running inside of the /// local task set, we can use [`task::spawn_local`], which can spawn /// `!Send` futures. For example: /// /// ```rust /// use std::rc::Rc; /// use tokio::task; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let nonsend_data = Rc::new("my nonsend data..."); /// /// // Construct a local task set that can run `!Send` futures. /// let local = task::LocalSet::new(); /// /// // Run the local task set. /// local.run_until(async move { /// let nonsend_data = nonsend_data.clone(); /// // `spawn_local` ensures that the future is spawned on the local /// // task set. /// task::spawn_local(async move { /// println!("{}", nonsend_data); /// // ... /// }).await.unwrap(); /// }).await; /// # } /// ``` /// **Note:** The `run_until` method can only be used in `#[tokio::main]`, /// `#[tokio::test]` or directly inside a call to [`Runtime::block_on`]. It /// cannot be used inside a task spawned with `tokio::spawn`. /// /// ## Awaiting a `LocalSet` /// /// Additionally, a `LocalSet` itself implements `Future`, completing when /// *all* tasks spawned on the `LocalSet` complete. This can be used to run /// several futures on a `LocalSet` and drive the whole set until they /// complete. For example, /// /// ```rust /// use tokio::{task, time}; /// use std::rc::Rc; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let nonsend_data = Rc::new("world"); /// let local = task::LocalSet::new(); /// /// let nonsend_data2 = nonsend_data.clone(); /// local.spawn_local(async move { /// // ... /// println!("hello {}", nonsend_data2) /// }); /// /// local.spawn_local(async move { /// time::sleep(time::Duration::from_millis(100)).await; /// println!("goodbye {}", nonsend_data) /// }); /// /// // ... /// /// local.await; /// # } /// ``` /// **Note:** Awaiting a `LocalSet` can only be done inside /// `#[tokio::main]`, `#[tokio::test]` or directly inside a call to /// [`Runtime::block_on`]. It cannot be used inside a task spawned with /// `tokio::spawn`. /// /// ## Use inside `tokio::spawn` /// /// The two methods mentioned above cannot be used inside `tokio::spawn`, so /// to spawn `!Send` futures from inside `tokio::spawn`, we need to do /// something else. The solution is to create the `LocalSet` somewhere else, /// and communicate with it using an [`mpsc`] channel. /// /// The following example puts the `LocalSet` inside a new thread. /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::runtime::Builder; /// use tokio::sync::{mpsc, oneshot}; /// use tokio::task::LocalSet; /// /// // This struct describes the task you want to spawn. Here we include /// // some simple examples. The oneshot channel allows sending a response /// // to the spawner. /// #[derive(Debug)] /// enum Task { /// PrintNumber(u32), /// AddOne(u32, oneshot::Sender<u32>), /// } /// /// #[derive(Clone)] /// struct LocalSpawner { /// send: mpsc::UnboundedSender<Task>, /// } /// /// impl LocalSpawner { /// pub fn new() -> Self { /// let (send, mut recv) = mpsc::unbounded_channel(); /// /// let rt = Builder::new_current_thread() /// .enable_all() /// .build() /// .unwrap(); /// /// std::thread::spawn(move || { /// let local = LocalSet::new(); /// /// local.spawn_local(async move { /// while let Some(new_task) = recv.recv().await { /// tokio::task::spawn_local(run_task(new_task)); /// } /// // If the while loop returns, then all the LocalSpawner /// // objects have been dropped. /// }); /// /// // This will return once all senders are dropped and all /// // spawned tasks have returned. /// rt.block_on(local); /// }); /// /// Self { /// send, /// } /// } /// /// pub fn spawn(&self, task: Task) { /// self.send.send(task).expect("Thread with LocalSet has shut down."); /// } /// } /// /// // This task may do !Send stuff. We use printing a number as an example, /// // but it could be anything. /// // /// // The Task struct is an enum to support spawning many different kinds /// // of operations. /// async fn run_task(task: Task) { /// match task { /// Task::PrintNumber(n) => { /// println!("{}", n); /// }, /// Task::AddOne(n, response) => { /// // We ignore failures to send the response. /// let _ = response.send(n + 1); /// }, /// } /// } /// /// #[tokio::main] /// async fn main() { /// let spawner = LocalSpawner::new(); /// /// let (send, response) = oneshot::channel(); /// spawner.spawn(Task::AddOne(10, send)); /// let eleven = response.await.unwrap(); /// assert_eq!(eleven, 11); /// } /// # } /// ``` /// /// [`Send`]: trait@std::marker::Send /// [local task set]: struct@LocalSet /// [`Runtime::block_on`]: method@crate::runtime::Runtime::block_on /// [`task::spawn_local`]: fn@spawn_local /// [`mpsc`]: mod@crate::sync::mpsc pub struct LocalSet { /// Current scheduler tick. tick: Cell<u8>, /// State available from thread-local. context: Rc<Context>, /// This type should not be Send. _not_send: PhantomData<*const ()>, } } /// State available from the thread-local. struct Context { /// State shared between threads. shared: Arc<Shared>, /// True if a task panicked without being handled and the local set is /// configured to shutdown on unhandled panic. unhandled_panic: Cell<bool>, } /// `LocalSet` state shared between threads. struct Shared { /// # Safety /// /// This field must *only* be accessed from the thread that owns the /// `LocalSet` (i.e., `Thread::current().id() == owner`). local_state: LocalState, /// Remote run queue sender. queue: Mutex<Option<VecDeque<task::Notified<Arc<Shared>>>>>, /// Wake the `LocalSet` task. waker: AtomicWaker, /// How to respond to unhandled task panics. #[cfg(tokio_unstable)] pub(crate) unhandled_panic: crate::runtime::UnhandledPanic, } /// Tracks the `LocalSet` state that must only be accessed from the thread that /// created the `LocalSet`. struct LocalState { /// The `ThreadId` of the thread that owns the `LocalSet`. owner: ThreadId, /// Local run queue sender and receiver. local_queue: UnsafeCell<VecDeque<task::Notified<Arc<Shared>>>>, /// Collection of all active tasks spawned onto this executor. owned: LocalOwnedTasks<Arc<Shared>>, } pin_project! { #[derive(Debug)] struct RunUntil<'a, F> { local_set: &'a LocalSet, #[pin] future: F, } } tokio_thread_local!(static CURRENT: LocalData = const { LocalData { ctx: RcCell::new(), wake_on_schedule: Cell::new(false), } }); struct LocalData { ctx: RcCell<Context>, wake_on_schedule: Cell<bool>, } impl LocalData { /// Should be called except when we call `LocalSet::enter`. /// Especially when we poll a `LocalSet`. #[must_use = "dropping this guard will reset the entered state"] fn enter(&self, ctx: Rc<Context>) -> LocalDataEnterGuard<'_> { let ctx = self.ctx.replace(Some(ctx)); let wake_on_schedule = self.wake_on_schedule.replace(false); LocalDataEnterGuard { local_data_ref: self, ctx, wake_on_schedule, } } } /// A guard for `LocalData::enter()` struct LocalDataEnterGuard<'a> { local_data_ref: &'a LocalData, ctx: Option<Rc<Context>>, wake_on_schedule: bool, } impl<'a> Drop for LocalDataEnterGuard<'a> { fn drop(&mut self) { self.local_data_ref.ctx.set(self.ctx.take()); self.local_data_ref .wake_on_schedule .set(self.wake_on_schedule) } } cfg_rt! { /// Spawns a `!Send` future on the current [`LocalSet`] or [`LocalRuntime`]. /// /// This is possible when either using one of these types /// explicitly, or (with `tokio_unstable`) by opting to use the /// `"local"` runtime flavor in `tokio::main`: /// /// ```ignore /// #[tokio::main(flavor = "local")] /// ``` /// /// The spawned future will run on the same thread that called `spawn_local`. /// /// The provided future will start running in the background immediately /// when `spawn_local` is called, even if you don't await the returned /// `JoinHandle`. /// /// # Panics /// /// This function panics if called outside of a [`LocalSet`] or [`LocalRuntime`]. /// /// Note that if [`tokio::spawn`] is used from within a `LocalSet`, the /// resulting new task will _not_ be inside the `LocalSet`, so you must use /// `spawn_local` if you want to stay within the `LocalSet`. /// /// # Examples /// /// With `LocalSet`: /// /// ```rust /// use std::rc::Rc; /// use tokio::task; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let nonsend_data = Rc::new("my nonsend data..."); /// /// let local = task::LocalSet::new(); /// /// // Run the local task set. /// local.run_until(async move { /// let nonsend_data = nonsend_data.clone(); /// task::spawn_local(async move { /// println!("{}", nonsend_data); /// // ... /// }).await.unwrap(); /// }).await; /// # } /// ``` /// With local runtime flavor ([Unstable API][unstable] only). /// /// ```rust /// # #[cfg(tokio_unstable)] /// #[tokio::main(flavor = "local")] /// async fn main() { /// let join = tokio::task::spawn_local(async { /// println!("my nonsend data...") /// }); /// /// join.await.unwrap() /// } /// # #[cfg(not(tokio_unstable))] /// # fn main() {} /// /// ``` /// /// [`LocalSet`]: struct@crate::task::LocalSet /// [`LocalRuntime`]: struct@crate::runtime::LocalRuntime /// [`tokio::spawn`]: fn@crate::task::spawn /// [unstable]: ../../tokio/index.html#unstable-features #[track_caller] pub fn spawn_local<F>(future: F) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static, { let fut_size = std::mem::size_of::<F>(); if fut_size > BOX_FUTURE_THRESHOLD { spawn_local_inner(Box::pin(future), SpawnMeta::new_unnamed(fut_size)) } else { spawn_local_inner(future, SpawnMeta::new_unnamed(fut_size)) } } #[track_caller] pub(super) fn spawn_local_inner<F>(future: F, meta: SpawnMeta<'_>) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static { use crate::runtime::{context, task}; let mut future = Some(future); let res = context::with_current(|handle| { Some(if handle.is_local() { if !handle.can_spawn_local_on_local_runtime() { return None; } let future = future.take().unwrap(); #[cfg(all( tokio_unstable, feature = "taskdump", feature = "rt", target_os = "linux", any( target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64" ) ))] let future = task::trace::Trace::root(future); let id = task::Id::next(); let task = crate::util::trace::task(future, "task", meta, id.as_u64()); // safety: we have verified that this is a `LocalRuntime` owned by the current thread unsafe { handle.spawn_local(task, id, meta.spawned_at) } } else { match CURRENT.with(|LocalData { ctx, .. }| ctx.get()) { None => panic!("`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"), Some(cx) => cx.spawn(future.take().unwrap(), meta) } }) }); match res { Ok(None) => panic!("Local tasks can only be spawned on a LocalRuntime from the thread the runtime was created on"), Ok(Some(join_handle)) => join_handle, Err(_) => match CURRENT.with(|LocalData { ctx, .. }| ctx.get()) { None => panic!("`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"), Some(cx) => cx.spawn(future.unwrap(), meta) } } } } /// Initial queue capacity. const INITIAL_CAPACITY: usize = 64; /// Max number of tasks to poll per tick. const MAX_TASKS_PER_TICK: usize = 61; /// How often it check the remote queue first. const REMOTE_FIRST_INTERVAL: u8 = 31; /// Context guard for `LocalSet` pub struct LocalEnterGuard { ctx: Option<Rc<Context>>, /// Distinguishes whether the context was entered or being polled. /// When we enter it, the value `wake_on_schedule` is set. In this case /// `spawn_local` refers the context, whereas it is not being polled now. wake_on_schedule: bool, } impl Drop for LocalEnterGuard { fn drop(&mut self) { CURRENT.with( |LocalData { ctx, wake_on_schedule, }| { ctx.set(self.ctx.take()); wake_on_schedule.set(self.wake_on_schedule); }, ); } } impl fmt::Debug for LocalEnterGuard { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("LocalEnterGuard").finish() } } impl LocalSet { /// Returns a new local task set. pub fn new() -> LocalSet { let owner = context::thread_id().expect("cannot create LocalSet during thread shutdown"); LocalSet { tick: Cell::new(0), context: Rc::new(Context { shared: Arc::new(Shared { local_state: LocalState { owner, owned: LocalOwnedTasks::new(), local_queue: UnsafeCell::new(VecDeque::with_capacity(INITIAL_CAPACITY)), }, queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), waker: AtomicWaker::new(), #[cfg(tokio_unstable)] unhandled_panic: crate::runtime::UnhandledPanic::Ignore, }), unhandled_panic: Cell::new(false), }), _not_send: PhantomData, } } /// Enters the context of this `LocalSet`. /// /// The [`spawn_local`] method will spawn tasks on the `LocalSet` whose /// context you are inside. /// /// [`spawn_local`]: fn@crate::task::spawn_local pub fn enter(&self) -> LocalEnterGuard { CURRENT.with( |LocalData { ctx, wake_on_schedule, .. }| { let ctx = ctx.replace(Some(self.context.clone())); let wake_on_schedule = wake_on_schedule.replace(true); LocalEnterGuard { ctx, wake_on_schedule, } }, ) } /// Spawns a `!Send` task onto the local task set. /// /// This task is guaranteed to be run on the current thread. /// /// Unlike the free function [`spawn_local`], this method may be used to /// spawn local tasks when the `LocalSet` is _not_ running. The provided /// future will start running once the `LocalSet` is next started, even if /// you don't await the returned `JoinHandle`. /// /// # Examples /// /// ```rust /// use tokio::task; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let local = task::LocalSet::new(); /// /// // Spawn a future on the local set. This future will be run when /// // we call `run_until` to drive the task set. /// local.spawn_local(async { /// // ... /// }); /// /// // Run the local task set. /// local.run_until(async move { /// // ... /// }).await; /// /// // When `run` finishes, we can spawn _more_ futures, which will /// // run in subsequent calls to `run_until`. /// local.spawn_local(async { /// // ... /// }); /// /// local.run_until(async move { /// // ... /// }).await; /// # } /// ``` /// [`spawn_local`]: fn@spawn_local #[track_caller] pub fn spawn_local<F>(&self, future: F) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static, { let fut_size = mem::size_of::<F>(); if fut_size > BOX_FUTURE_THRESHOLD { self.spawn_named(Box::pin(future), SpawnMeta::new_unnamed(fut_size)) } else { self.spawn_named(future, SpawnMeta::new_unnamed(fut_size)) } } /// Runs a future to completion on the provided runtime, driving any local /// futures spawned on this task set on the current thread. /// /// This runs the given future on the runtime, blocking until it is /// complete, and yielding its resolved result. Any tasks or timers which /// the future spawns internally will be executed on the runtime. The future /// may also call [`spawn_local`] to `spawn_local` additional local futures on the /// current thread. /// /// This method should not be called from an asynchronous context. /// /// # Panics /// /// This function panics if the executor is at capacity, if the provided /// future panics, or if called within an asynchronous execution context. /// /// # Notes /// /// Since this function internally calls [`Runtime::block_on`], and drives /// futures in the local task set inside that call to `block_on`, the local /// futures may not use [in-place blocking]. If a blocking call needs to be /// issued from a local task, the [`spawn_blocking`] API may be used instead. /// /// For example, this will panic: /// ```should_panic,ignore-wasm /// use tokio::runtime::Runtime; /// use tokio::task; /// /// let rt = Runtime::new().unwrap(); /// let local = task::LocalSet::new(); /// local.block_on(&rt, async { /// let join = task::spawn_local(async { /// let blocking_result = task::block_in_place(|| { /// // ... /// }); /// // ... /// }); /// join.await.unwrap(); /// }) /// ``` /// This, however, will not panic: /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::runtime::Runtime; /// use tokio::task; /// /// let rt = Runtime::new().unwrap(); /// let local = task::LocalSet::new(); /// local.block_on(&rt, async { /// let join = task::spawn_local(async { /// let blocking_result = task::spawn_blocking(|| { /// // ... /// }).await; /// // ... /// }); /// join.await.unwrap(); /// }) /// # } /// ``` /// /// [`spawn_local`]: fn@spawn_local /// [`Runtime::block_on`]: method@crate::runtime::Runtime::block_on /// [in-place blocking]: fn@crate::task::block_in_place /// [`spawn_blocking`]: fn@crate::task::spawn_blocking #[track_caller] #[cfg(feature = "rt")] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn block_on<F>(&self, rt: &crate::runtime::Runtime, future: F) -> F::Output where F: Future, { rt.block_on(self.run_until(future)) } /// Runs a future to completion on the local set, returning its output. /// /// This returns a future that runs the given future with a local set, /// allowing it to call [`spawn_local`] to spawn additional `!Send` futures. /// Any local futures spawned on the local set will be driven in the /// background until the future passed to `run_until` completes. When the future /// passed to `run_until` finishes, any local futures which have not completed /// will remain on the local set, and will be driven on subsequent calls to /// `run_until` or when [awaiting the local set] itself. /// /// # Cancel safety /// /// This method is cancel safe when `future` is cancel safe. /// /// # Examples /// /// ```rust /// use tokio::task; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// task::LocalSet::new().run_until(async { /// task::spawn_local(async move { /// // ... /// }).await.unwrap(); /// // ... /// }).await; /// # } /// ``` /// /// [`spawn_local`]: fn@spawn_local /// [awaiting the local set]: #awaiting-a-localset pub async fn run_until<F>(&self, future: F) -> F::Output where F: Future, { let run_until = RunUntil { future, local_set: self, }; run_until.await } #[track_caller] pub(in crate::task) fn spawn_named<F>( &self, future: F, meta: SpawnMeta<'_>, ) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static, { self.spawn_named_inner(future, meta) } #[track_caller] fn spawn_named_inner<F>(&self, future: F, meta: SpawnMeta<'_>) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static, { let handle = self.context.spawn(future, meta); // Because a task was spawned from *outside* the `LocalSet`, wake the // `LocalSet` future to execute the new task, if it hasn't been woken. // // Spawning via the free fn `spawn` does not require this, as it can // only be called from *within* a future executing on the `LocalSet` — // in that case, the `LocalSet` must already be awake. self.context.shared.waker.wake(); handle } /// Ticks the scheduler, returning whether the local future needs to be /// notified again. fn tick(&self) -> bool { for _ in 0..MAX_TASKS_PER_TICK { // Make sure we didn't hit an unhandled panic assert!(!self.context.unhandled_panic.get(), "a spawned task panicked and the LocalSet is configured to shutdown on unhandled panic"); match self.next_task() { // Run the task // // Safety: As spawned tasks are `!Send`, `run_unchecked` must be // used. We are responsible for maintaining the invariant that // `run_unchecked` is only called on threads that spawned the // task initially. Because `LocalSet` itself is `!Send`, and // `spawn_local` spawns into the `LocalSet` on the current // thread, the invariant is maintained. Some(task) => crate::task::coop::budget(|| task.run()), // We have fully drained the queue of notified tasks, so the // local future doesn't need to be notified again — it can wait // until something else wakes a task in the local set. None => return false, } } true } fn next_task(&self) -> Option<task::LocalNotified<Arc<Shared>>> { let tick = self.tick.get(); self.tick.set(tick.wrapping_add(1)); let task = if tick % REMOTE_FIRST_INTERVAL == 0 { self.context .shared .queue .lock() .as_mut() .and_then(|queue| queue.pop_front()) .or_else(|| self.pop_local()) } else { self.pop_local().or_else(|| { self.context .shared .queue .lock() .as_mut() .and_then(VecDeque::pop_front) }) }; task.map(|task| unsafe { // Safety: because the `LocalSet` itself is `!Send`, we know we are // on the same thread if we have access to the `LocalSet`, and can // therefore access the local run queue. self.context.shared.local_state.assert_owner(task) }) } fn pop_local(&self) -> Option<task::Notified<Arc<Shared>>> { unsafe { // Safety: because the `LocalSet` itself is `!Send`, we know we are // on the same thread if we have access to the `LocalSet`, and can // therefore access the local run queue. self.context.shared.local_state.task_pop_front() } } fn with<T>(&self, f: impl FnOnce() -> T) -> T { CURRENT.with(|local_data| { let _guard = local_data.enter(self.context.clone()); f() }) } /// This method is like `with`, but it just calls `f` without setting the thread-local if that /// fails. fn with_if_possible<T>(&self, f: impl FnOnce() -> T) -> T { let mut f = Some(f); let res = CURRENT.try_with(|local_data| { let _guard = local_data.enter(self.context.clone()); (f.take().unwrap())() }); match res { Ok(res) => res, Err(_access_error) => (f.take().unwrap())(), } } /// Returns the [`Id`] of the current [`LocalSet`] runtime. /// /// # Examples /// /// ```rust /// use tokio::task; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let local_set = task::LocalSet::new(); /// println!("Local set id: {}", local_set.id()); /// # } /// ``` /// /// [`Id`]: struct@crate::runtime::Id pub fn id(&self) -> runtime::Id { runtime::Id::new(self.context.shared.local_state.owned.id) } } cfg_unstable! { impl LocalSet { /// Configure how the `LocalSet` responds to an unhandled panic on a /// spawned task. /// /// By default, an unhandled panic (i.e. a panic not caught by /// [`std::panic::catch_unwind`]) has no impact on the `LocalSet`'s /// execution. The panic is error value is forwarded to the task's /// [`JoinHandle`] and all other spawned tasks continue running. /// /// The `unhandled_panic` option enables configuring this behavior. /// /// * `UnhandledPanic::Ignore` is the default behavior. Panics on /// spawned tasks have no impact on the `LocalSet`'s execution. /// * `UnhandledPanic::ShutdownRuntime` will force the `LocalSet` to /// shutdown immediately when a spawned task panics even if that /// task's `JoinHandle` has not been dropped. All other spawned tasks /// will immediately terminate and further calls to /// [`LocalSet::block_on`] and [`LocalSet::run_until`] will panic. /// /// # Panics /// /// This method panics if called after the `LocalSet` has started /// running. /// /// # Unstable /// /// This option is currently unstable and its implementation is /// incomplete. The API may change or be removed in the future. See /// tokio-rs/tokio#4516 for more details. /// /// # Examples /// /// The following demonstrates a `LocalSet` configured to shutdown on /// panic. The first spawned task panics and results in the `LocalSet` /// shutting down. The second spawned task never has a chance to /// execute. The call to `run_until` will panic due to the runtime being /// forcibly shutdown. /// /// ```should_panic /// use tokio::runtime::UnhandledPanic; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// tokio::task::LocalSet::new() /// .unhandled_panic(UnhandledPanic::ShutdownRuntime) /// .run_until(async { /// tokio::task::spawn_local(async { panic!("boom"); }); /// tokio::task::spawn_local(async { /// // This task never completes /// }); /// /// // Do some work, but `run_until` will panic before it completes /// # loop { tokio::task::yield_now().await; } /// }) /// .await; /// # } /// ``` /// /// [`JoinHandle`]: struct@crate::task::JoinHandle pub fn unhandled_panic(&mut self, behavior: crate::runtime::UnhandledPanic) -> &mut Self { // TODO: This should be set as a builder Rc::get_mut(&mut self.context) .and_then(|ctx| Arc::get_mut(&mut ctx.shared)) .expect("Unhandled Panic behavior modified after starting LocalSet") .unhandled_panic = behavior; self } } } impl fmt::Debug for LocalSet { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("LocalSet").finish() } } impl Future for LocalSet { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> { let _no_blocking = crate::runtime::context::disallow_block_in_place(); // Register the waker before starting to work self.context.shared.waker.register_by_ref(cx.waker()); if self.with(|| self.tick()) { // If `tick` returns true, we need to notify the local future again: // there are still tasks remaining in the run queue. cx.waker().wake_by_ref(); Poll::Pending
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true