repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/builder.rs
tokio/src/task/builder.rs
#![allow(unreachable_pub)] use crate::{ runtime::{Handle, BOX_FUTURE_THRESHOLD}, task::{JoinHandle, LocalSet}, util::trace::SpawnMeta, }; use std::{future::Future, io, mem}; /// Factory which is used to configure the properties of a new task. /// /// **Note**: This is an [unstable API][unstable]. The public API of this type /// may break in 1.x releases. See [the documentation on unstable /// features][unstable] for details. /// /// Methods can be chained in order to configure it. /// /// Currently, there is only one configuration option: /// /// - [`name`], which specifies an associated name for /// the task /// /// There are three types of task that can be spawned from a Builder: /// - [`spawn_local`] for executing not [`Send`] futures /// - [`spawn`] for executing [`Send`] futures on the runtime /// - [`spawn_blocking`] for executing blocking code in the /// blocking thread pool. /// /// ## Example /// /// ```no_run /// use tokio::net::{TcpListener, TcpStream}; /// /// use std::io; /// /// async fn process(socket: TcpStream) { /// // ... /// # drop(socket); /// } /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let listener = TcpListener::bind("127.0.0.1:8080").await?; /// /// loop { /// let (socket, _) = listener.accept().await?; /// /// tokio::task::Builder::new() /// .name("tcp connection handler") /// .spawn(async move { /// // Process each socket concurrently. /// process(socket).await /// })?; /// } /// } /// ``` /// [unstable]: crate#unstable-features /// [`name`]: Builder::name /// [`spawn_local`]: Builder::spawn_local /// [`spawn`]: Builder::spawn /// [`spawn_blocking`]: Builder::spawn_blocking #[derive(Default, Debug)] #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] pub struct Builder<'a> { name: Option<&'a str>, } impl<'a> Builder<'a> { /// Creates a new task builder. pub fn new() -> Self { Self::default() } /// Assigns a name to the task which will be spawned. pub fn name(&self, name: &'a str) -> Self { Self { name: Some(name) } } /// Spawns a task with this builder's settings on the current runtime. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// See [`task::spawn`](crate::task::spawn()) for /// more details. #[track_caller] pub fn spawn<Fut>(self, future: Fut) -> io::Result<JoinHandle<Fut::Output>> where Fut: Future + Send + 'static, Fut::Output: Send + 'static, { let fut_size = mem::size_of::<Fut>(); Ok(if fut_size > BOX_FUTURE_THRESHOLD { super::spawn::spawn_inner(Box::pin(future), SpawnMeta::new(self.name, fut_size)) } else { super::spawn::spawn_inner(future, SpawnMeta::new(self.name, fut_size)) }) } /// Spawn a task with this builder's settings on the provided [runtime /// handle]. /// /// See [`Handle::spawn`] for more details. /// /// [runtime handle]: crate::runtime::Handle /// [`Handle::spawn`]: crate::runtime::Handle::spawn #[track_caller] pub fn spawn_on<Fut>(self, future: Fut, handle: &Handle) -> io::Result<JoinHandle<Fut::Output>> where Fut: Future + Send + 'static, Fut::Output: Send + 'static, { let fut_size = mem::size_of::<Fut>(); Ok(if fut_size > BOX_FUTURE_THRESHOLD { handle.spawn_named(Box::pin(future), SpawnMeta::new(self.name, fut_size)) } else { handle.spawn_named(future, SpawnMeta::new(self.name, fut_size)) }) } /// Spawns a `!Send` task on the current [`LocalSet`] or [`LocalRuntime`] with /// this builder's settings. /// /// The spawned future will be run on the same thread that called `spawn_local`. /// This may only be called from the context of a [local task set][`LocalSet`] /// or a [`LocalRuntime`]. /// /// # Panics /// /// This function panics if called outside of a [local task set][`LocalSet`] /// or a [`LocalRuntime`]. /// /// See [`task::spawn_local`] for more details. /// /// [`task::spawn_local`]: crate::task::spawn_local /// [`LocalSet`]: crate::task::LocalSet /// [`LocalRuntime`]: crate::runtime::LocalRuntime #[track_caller] pub fn spawn_local<Fut>(self, future: Fut) -> io::Result<JoinHandle<Fut::Output>> where Fut: Future + 'static, Fut::Output: 'static, { let fut_size = mem::size_of::<Fut>(); Ok(if fut_size > BOX_FUTURE_THRESHOLD { super::local::spawn_local_inner(Box::pin(future), SpawnMeta::new(self.name, fut_size)) } else { super::local::spawn_local_inner(future, SpawnMeta::new(self.name, fut_size)) }) } /// Spawns `!Send` a task on the provided [`LocalSet`] with this builder's /// settings. /// /// See [`LocalSet::spawn_local`] for more details. /// /// [`LocalSet::spawn_local`]: crate::task::LocalSet::spawn_local /// [`LocalSet`]: crate::task::LocalSet #[track_caller] pub fn spawn_local_on<Fut>( self, future: Fut, local_set: &LocalSet, ) -> io::Result<JoinHandle<Fut::Output>> where Fut: Future + 'static, Fut::Output: 'static, { let fut_size = mem::size_of::<Fut>(); Ok(if fut_size > BOX_FUTURE_THRESHOLD { local_set.spawn_named(Box::pin(future), SpawnMeta::new(self.name, fut_size)) } else { local_set.spawn_named(future, SpawnMeta::new(self.name, fut_size)) }) } /// Spawns blocking code on the blocking threadpool. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// See [`task::spawn_blocking`](crate::task::spawn_blocking) /// for more details. #[track_caller] pub fn spawn_blocking<Function, Output>( self, function: Function, ) -> io::Result<JoinHandle<Output>> where Function: FnOnce() -> Output + Send + 'static, Output: Send + 'static, { let handle = Handle::current(); self.spawn_blocking_on(function, &handle) } /// Spawns blocking code on the provided [runtime handle]'s blocking threadpool. /// /// See [`Handle::spawn_blocking`] for more details. /// /// [runtime handle]: crate::runtime::Handle /// [`Handle::spawn_blocking`]: crate::runtime::Handle::spawn_blocking #[track_caller] pub fn spawn_blocking_on<Function, Output>( self, function: Function, handle: &Handle, ) -> io::Result<JoinHandle<Output>> where Function: FnOnce() -> Output + Send + 'static, Output: Send + 'static, { use crate::runtime::Mandatory; let fn_size = mem::size_of::<Function>(); let (join_handle, spawn_result) = if fn_size > BOX_FUTURE_THRESHOLD { handle.inner.blocking_spawner().spawn_blocking_inner( Box::new(function), Mandatory::NonMandatory, SpawnMeta::new(self.name, fn_size), handle, ) } else { handle.inner.blocking_spawner().spawn_blocking_inner( function, Mandatory::NonMandatory, SpawnMeta::new(self.name, fn_size), handle, ) }; spawn_result?; Ok(join_handle) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/yield_now.rs
tokio/src/task/yield_now.rs
use crate::runtime::context; use std::future::Future; use std::pin::Pin; use std::task::{ready, Context, Poll}; /// Yields execution back to the Tokio runtime. /// /// A task yields by awaiting on `yield_now()`, and may resume when that future /// completes (with no output.) The current task will be re-added as a pending /// task at the _back_ of the pending queue. Any other pending tasks will be /// scheduled. No other waking is required for the task to continue. /// /// See also the usage example in the [task module](index.html#yield_now). /// /// ## Non-guarantees /// /// This function may not yield all the way up to the executor if there are any /// special combinators above it in the call stack. For example, if a /// [`tokio::select!`] has another branch complete during the same poll as the /// `yield_now()`, then the yield is not propagated all the way up to the /// runtime. /// /// It is generally not guaranteed that the runtime behaves like you expect it /// to when deciding which task to schedule next after a call to `yield_now()`. /// In particular, the runtime may choose to poll the task that just ran /// `yield_now()` again immediately without polling any other tasks first. For /// example, the runtime will not drive the IO driver between every poll of a /// task, and this could result in the runtime polling the current task again /// immediately even if there is another task that could make progress if that /// other task is waiting for a notification from the IO driver. /// /// In general, changes to the order in which the runtime polls tasks is not /// considered a breaking change, and your program should be correct no matter /// which order the runtime polls your tasks in. /// /// [`tokio::select!`]: macro@crate::select #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub async fn yield_now() { /// Yield implementation struct YieldNow { yielded: bool, } impl Future for YieldNow { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { ready!(crate::trace::trace_leaf(cx)); if self.yielded { return Poll::Ready(()); } self.yielded = true; context::defer(cx.waker()); Poll::Pending } } YieldNow { yielded: false }.await; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/join_set.rs
tokio/src/task/join_set.rs
//! A collection of tasks spawned on a Tokio runtime. //! //! This module provides the [`JoinSet`] type, a collection which stores a set //! of spawned tasks and allows asynchronously awaiting the output of those //! tasks as they complete. See the documentation for the [`JoinSet`] type for //! details. use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use std::{fmt, panic}; use crate::runtime::Handle; use crate::task::Id; use crate::task::{unconstrained, AbortHandle, JoinError, JoinHandle, LocalSet}; use crate::util::IdleNotifiedSet; /// A collection of tasks spawned on a Tokio runtime. /// /// A `JoinSet` can be used to await the completion of some or all of the tasks /// in the set. The set is not ordered, and the tasks will be returned in the /// order they complete. /// /// All of the tasks must have the same return type `T`. /// /// When the `JoinSet` is dropped, all tasks in the `JoinSet` are immediately aborted. /// /// # Examples /// /// Spawn multiple tasks and wait for them. /// /// ``` /// use tokio::task::JoinSet; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut set = JoinSet::new(); /// /// for i in 0..10 { /// set.spawn(async move { i }); /// } /// /// let mut seen = [false; 10]; /// while let Some(res) = set.join_next().await { /// let idx = res.unwrap(); /// seen[idx] = true; /// } /// /// for i in 0..10 { /// assert!(seen[i]); /// } /// # } /// ``` /// /// # Task ID guarantees /// /// While a task is tracked in a `JoinSet`, that task's ID is unique relative /// to all other running tasks in Tokio. For this purpose, tracking a task in a /// `JoinSet` is equivalent to holding a [`JoinHandle`] to it. See the [task ID] /// documentation for more info. /// /// [`JoinHandle`]: crate::task::JoinHandle /// [task ID]: crate::task::Id #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub struct JoinSet<T> { inner: IdleNotifiedSet<JoinHandle<T>>, } /// A variant of [`task::Builder`] that spawns tasks on a [`JoinSet`] rather /// than on the current default runtime. /// /// [`task::Builder`]: crate::task::Builder #[cfg(all(tokio_unstable, feature = "tracing"))] #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] #[must_use = "builders do nothing unless used to spawn a task"] pub struct Builder<'a, T> { joinset: &'a mut JoinSet<T>, builder: super::Builder<'a>, } impl<T> JoinSet<T> { /// Create a new `JoinSet`. pub fn new() -> Self { Self { inner: IdleNotifiedSet::new(), } } /// Returns the number of tasks currently in the `JoinSet`. pub fn len(&self) -> usize { self.inner.len() } /// Returns whether the `JoinSet` is empty. pub fn is_empty(&self) -> bool { self.inner.is_empty() } } impl<T: 'static> JoinSet<T> { /// Returns a [`Builder`] that can be used to configure a task prior to /// spawning it on this `JoinSet`. /// /// # Examples /// /// ``` /// use tokio::task::JoinSet; /// /// #[tokio::main] /// async fn main() -> std::io::Result<()> { /// let mut set = JoinSet::new(); /// /// // Use the builder to configure a task's name before spawning it. /// set.build_task() /// .name("my_task") /// .spawn(async { /* ... */ })?; /// /// Ok(()) /// } /// ``` #[cfg(all(tokio_unstable, feature = "tracing"))] #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] pub fn build_task(&mut self) -> Builder<'_, T> { Builder { builder: super::Builder::new(), joinset: self, } } /// Spawn the provided task on the `JoinSet`, returning an [`AbortHandle`] /// that can be used to remotely cancel the task. /// /// The provided future will start running in the background immediately /// when this method is called, even if you don't await anything on this /// `JoinSet`. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn<F>(&mut self, task: F) -> AbortHandle where F: Future<Output = T>, F: Send + 'static, T: Send, { self.insert(crate::spawn(task)) } /// Spawn the provided task on the provided runtime and store it in this /// `JoinSet` returning an [`AbortHandle`] that can be used to remotely /// cancel the task. /// /// The provided future will start running in the background immediately /// when this method is called, even if you don't await anything on this /// `JoinSet`. /// /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_on<F>(&mut self, task: F, handle: &Handle) -> AbortHandle where F: Future<Output = T>, F: Send + 'static, T: Send, { self.insert(handle.spawn(task)) } /// Spawn the provided task on the current [`LocalSet`] or [`LocalRuntime`] /// and store it in this `JoinSet`, returning an [`AbortHandle`] that can /// be used to remotely cancel the task. /// /// The provided future will start running in the background immediately /// when this method is called, even if you don't await anything on this /// `JoinSet`. /// /// # Panics /// /// This method panics if it is called outside of a `LocalSet` or `LocalRuntime`. /// /// [`LocalSet`]: crate::task::LocalSet /// [`LocalRuntime`]: crate::runtime::LocalRuntime /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_local<F>(&mut self, task: F) -> AbortHandle where F: Future<Output = T>, F: 'static, { self.insert(crate::task::spawn_local(task)) } /// Spawn the provided task on the provided [`LocalSet`] and store it in /// this `JoinSet`, returning an [`AbortHandle`] that can be used to /// remotely cancel the task. /// /// Unlike the [`spawn_local`] method, this method may be used to spawn local /// tasks on a `LocalSet` that is _not_ currently running. The provided /// future will start running whenever the `LocalSet` is next started. /// /// [`LocalSet`]: crate::task::LocalSet /// [`AbortHandle`]: crate::task::AbortHandle /// [`spawn_local`]: Self::spawn_local #[track_caller] pub fn spawn_local_on<F>(&mut self, task: F, local_set: &LocalSet) -> AbortHandle where F: Future<Output = T>, F: 'static, { self.insert(local_set.spawn_local(task)) } /// Spawn the blocking code on the blocking threadpool and store /// it in this `JoinSet`, returning an [`AbortHandle`] that can be /// used to remotely cancel the task. /// /// # Examples /// /// Spawn multiple blocking tasks and wait for them. /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::task::JoinSet; /// /// #[tokio::main] /// async fn main() { /// let mut set = JoinSet::new(); /// /// for i in 0..10 { /// set.spawn_blocking(move || { i }); /// } /// /// let mut seen = [false; 10]; /// while let Some(res) = set.join_next().await { /// let idx = res.unwrap(); /// seen[idx] = true; /// } /// /// for i in 0..10 { /// assert!(seen[i]); /// } /// } /// # } /// ``` /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_blocking<F>(&mut self, f: F) -> AbortHandle where F: FnOnce() -> T, F: Send + 'static, T: Send, { self.insert(crate::runtime::spawn_blocking(f)) } /// Spawn the blocking code on the blocking threadpool of the /// provided runtime and store it in this `JoinSet`, returning an /// [`AbortHandle`] that can be used to remotely cancel the task. /// /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_blocking_on<F>(&mut self, f: F, handle: &Handle) -> AbortHandle where F: FnOnce() -> T, F: Send + 'static, T: Send, { self.insert(handle.spawn_blocking(f)) } fn insert(&mut self, jh: JoinHandle<T>) -> AbortHandle { let abort = jh.abort_handle(); let mut entry = self.inner.insert_idle(jh); // Set the waker that is notified when the task completes. entry.with_value_and_context(|jh, ctx| jh.set_join_waker(ctx.waker())); abort } /// Waits until one of the tasks in the set completes and returns its output. /// /// Returns `None` if the set is empty. /// /// # Cancel Safety /// /// This method is cancel safe. If `join_next` is used as the event in a `tokio::select!` /// statement and some other branch completes first, it is guaranteed that no tasks were /// removed from this `JoinSet`. pub async fn join_next(&mut self) -> Option<Result<T, JoinError>> { std::future::poll_fn(|cx| self.poll_join_next(cx)).await } /// Waits until one of the tasks in the set completes and returns its /// output, along with the [task ID] of the completed task. /// /// Returns `None` if the set is empty. /// /// When this method returns an error, then the id of the task that failed can be accessed /// using the [`JoinError::id`] method. /// /// # Cancel Safety /// /// This method is cancel safe. If `join_next_with_id` is used as the event in a `tokio::select!` /// statement and some other branch completes first, it is guaranteed that no tasks were /// removed from this `JoinSet`. /// /// [task ID]: crate::task::Id /// [`JoinError::id`]: fn@crate::task::JoinError::id pub async fn join_next_with_id(&mut self) -> Option<Result<(Id, T), JoinError>> { std::future::poll_fn(|cx| self.poll_join_next_with_id(cx)).await } /// Tries to join one of the tasks in the set that has completed and return its output. /// /// Returns `None` if there are no completed tasks, or if the set is empty. pub fn try_join_next(&mut self) -> Option<Result<T, JoinError>> { // Loop over all notified `JoinHandle`s to find one that's ready, or until none are left. loop { let mut entry = self.inner.try_pop_notified()?; let res = entry.with_value_and_context(|jh, ctx| { // Since this function is not async and cannot be forced to yield, we should // disable budgeting when we want to check for the `JoinHandle` readiness. Pin::new(&mut unconstrained(jh)).poll(ctx) }); if let Poll::Ready(res) = res { let _entry = entry.remove(); return Some(res); } } } /// Tries to join one of the tasks in the set that has completed and return its output, /// along with the [task ID] of the completed task. /// /// Returns `None` if there are no completed tasks, or if the set is empty. /// /// When this method returns an error, then the id of the task that failed can be accessed /// using the [`JoinError::id`] method. /// /// [task ID]: crate::task::Id /// [`JoinError::id`]: fn@crate::task::JoinError::id pub fn try_join_next_with_id(&mut self) -> Option<Result<(Id, T), JoinError>> { // Loop over all notified `JoinHandle`s to find one that's ready, or until none are left. loop { let mut entry = self.inner.try_pop_notified()?; let res = entry.with_value_and_context(|jh, ctx| { // Since this function is not async and cannot be forced to yield, we should // disable budgeting when we want to check for the `JoinHandle` readiness. Pin::new(&mut unconstrained(jh)).poll(ctx) }); if let Poll::Ready(res) = res { let entry = entry.remove(); return Some(res.map(|output| (entry.id(), output))); } } } /// Aborts all tasks and waits for them to finish shutting down. /// /// Calling this method is equivalent to calling [`abort_all`] and then calling [`join_next`] in /// a loop until it returns `None`. /// /// This method ignores any panics in the tasks shutting down. When this call returns, the /// `JoinSet` will be empty. /// /// [`abort_all`]: fn@Self::abort_all /// [`join_next`]: fn@Self::join_next pub async fn shutdown(&mut self) { self.abort_all(); while self.join_next().await.is_some() {} } /// Awaits the completion of all tasks in this `JoinSet`, returning a vector of their results. /// /// The results will be stored in the order they completed not the order they were spawned. /// This is a convenience method that is equivalent to calling [`join_next`] in /// a loop. If any tasks on the `JoinSet` fail with an [`JoinError`], then this call /// to `join_all` will panic and all remaining tasks on the `JoinSet` are /// cancelled. To handle errors in any other way, manually call [`join_next`] /// in a loop. /// /// # Examples /// /// Spawn multiple tasks and `join_all` them. /// /// ``` /// use tokio::task::JoinSet; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut set = JoinSet::new(); /// /// for i in 0..3 { /// set.spawn(async move { /// tokio::time::sleep(Duration::from_secs(3 - i)).await; /// i /// }); /// } /// /// let output = set.join_all().await; /// assert_eq!(output, vec![2, 1, 0]); /// # } /// ``` /// /// Equivalent implementation of `join_all`, using [`join_next`] and loop. /// /// ``` /// use tokio::task::JoinSet; /// use std::panic; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut set = JoinSet::new(); /// /// for i in 0..3 { /// set.spawn(async move {i}); /// } /// /// let mut output = Vec::new(); /// while let Some(res) = set.join_next().await{ /// match res { /// Ok(t) => output.push(t), /// Err(err) if err.is_panic() => panic::resume_unwind(err.into_panic()), /// Err(err) => panic!("{err}"), /// } /// } /// assert_eq!(output.len(),3); /// # } /// ``` /// [`join_next`]: fn@Self::join_next /// [`JoinError::id`]: fn@crate::task::JoinError::id pub async fn join_all(mut self) -> Vec<T> { let mut output = Vec::with_capacity(self.len()); while let Some(res) = self.join_next().await { match res { Ok(t) => output.push(t), Err(err) if err.is_panic() => panic::resume_unwind(err.into_panic()), Err(err) => panic!("{err}"), } } output } /// Aborts all tasks on this `JoinSet`. /// /// This does not remove the tasks from the `JoinSet`. To wait for the tasks to complete /// cancellation, you should call `join_next` in a loop until the `JoinSet` is empty. pub fn abort_all(&mut self) { self.inner.for_each(|jh| jh.abort()); } /// Removes all tasks from this `JoinSet` without aborting them. /// /// The tasks removed by this call will continue to run in the background even if the `JoinSet` /// is dropped. pub fn detach_all(&mut self) { self.inner.drain(drop); } /// Polls for one of the tasks in the set to complete. /// /// If this returns `Poll::Ready(Some(_))`, then the task that completed is removed from the set. /// /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` is scheduled /// to receive a wakeup when a task in the `JoinSet` completes. Note that on multiple calls to /// `poll_join_next`, only the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. /// /// # Returns /// /// This function returns: /// /// * `Poll::Pending` if the `JoinSet` is not empty but there is no task whose output is /// available right now. /// * `Poll::Ready(Some(Ok(value)))` if one of the tasks in this `JoinSet` has completed. /// The `value` is the return value of one of the tasks that completed. /// * `Poll::Ready(Some(Err(err)))` if one of the tasks in this `JoinSet` has panicked or been /// aborted. The `err` is the `JoinError` from the panicked/aborted task. /// * `Poll::Ready(None)` if the `JoinSet` is empty. /// /// Note that this method may return `Poll::Pending` even if one of the tasks has completed. /// This can happen if the [coop budget] is reached. /// /// [coop budget]: crate::task::coop#cooperative-scheduling pub fn poll_join_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<T, JoinError>>> { // The call to `pop_notified` moves the entry to the `idle` list. It is moved back to // the `notified` list if the waker is notified in the `poll` call below. let mut entry = match self.inner.pop_notified(cx.waker()) { Some(entry) => entry, None => { if self.is_empty() { return Poll::Ready(None); } else { // The waker was set by `pop_notified`. return Poll::Pending; } } }; let res = entry.with_value_and_context(|jh, ctx| Pin::new(jh).poll(ctx)); if let Poll::Ready(res) = res { let _entry = entry.remove(); Poll::Ready(Some(res)) } else { // A JoinHandle generally won't emit a wakeup without being ready unless // the coop limit has been reached. We yield to the executor in this // case. cx.waker().wake_by_ref(); Poll::Pending } } /// Polls for one of the tasks in the set to complete. /// /// If this returns `Poll::Ready(Some(_))`, then the task that completed is removed from the set. /// /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` is scheduled /// to receive a wakeup when a task in the `JoinSet` completes. Note that on multiple calls to /// `poll_join_next`, only the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. /// /// # Returns /// /// This function returns: /// /// * `Poll::Pending` if the `JoinSet` is not empty but there is no task whose output is /// available right now. /// * `Poll::Ready(Some(Ok((id, value))))` if one of the tasks in this `JoinSet` has completed. /// The `value` is the return value of one of the tasks that completed, and /// `id` is the [task ID] of that task. /// * `Poll::Ready(Some(Err(err)))` if one of the tasks in this `JoinSet` has panicked or been /// aborted. The `err` is the `JoinError` from the panicked/aborted task. /// * `Poll::Ready(None)` if the `JoinSet` is empty. /// /// Note that this method may return `Poll::Pending` even if one of the tasks has completed. /// This can happen if the [coop budget] is reached. /// /// [coop budget]: crate::task::coop#cooperative-scheduling /// [task ID]: crate::task::Id pub fn poll_join_next_with_id( &mut self, cx: &mut Context<'_>, ) -> Poll<Option<Result<(Id, T), JoinError>>> { // The call to `pop_notified` moves the entry to the `idle` list. It is moved back to // the `notified` list if the waker is notified in the `poll` call below. let mut entry = match self.inner.pop_notified(cx.waker()) { Some(entry) => entry, None => { if self.is_empty() { return Poll::Ready(None); } else { // The waker was set by `pop_notified`. return Poll::Pending; } } }; let res = entry.with_value_and_context(|jh, ctx| Pin::new(jh).poll(ctx)); if let Poll::Ready(res) = res { let entry = entry.remove(); // If the task succeeded, add the task ID to the output. Otherwise, the // `JoinError` will already have the task's ID. Poll::Ready(Some(res.map(|output| (entry.id(), output)))) } else { // A JoinHandle generally won't emit a wakeup without being ready unless // the coop limit has been reached. We yield to the executor in this // case. cx.waker().wake_by_ref(); Poll::Pending } } } impl<T> Drop for JoinSet<T> { fn drop(&mut self) { self.inner.drain(|join_handle| join_handle.abort()); } } impl<T> fmt::Debug for JoinSet<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("JoinSet").field("len", &self.len()).finish() } } impl<T> Default for JoinSet<T> { fn default() -> Self { Self::new() } } /// Collect an iterator of futures into a [`JoinSet`]. /// /// This is equivalent to calling [`JoinSet::spawn`] on each element of the iterator. /// /// # Examples /// /// The main example from [`JoinSet`]'s documentation can also be written using [`collect`]: /// /// ``` /// use tokio::task::JoinSet; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut set: JoinSet<_> = (0..10).map(|i| async move { i }).collect(); /// /// let mut seen = [false; 10]; /// while let Some(res) = set.join_next().await { /// let idx = res.unwrap(); /// seen[idx] = true; /// } /// /// for i in 0..10 { /// assert!(seen[i]); /// } /// # } /// ``` /// /// [`collect`]: std::iter::Iterator::collect impl<T, F> std::iter::FromIterator<F> for JoinSet<T> where F: Future<Output = T>, F: Send + 'static, T: Send + 'static, { fn from_iter<I: IntoIterator<Item = F>>(iter: I) -> Self { let mut set = Self::new(); iter.into_iter().for_each(|task| { set.spawn(task); }); set } } /// Extend a [`JoinSet`] with futures from an iterator. /// /// This is equivalent to calling [`JoinSet::spawn`] on each element of the iterator. /// /// # Examples /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::task::JoinSet; /// /// #[tokio::main] /// async fn main() { /// let mut set: JoinSet<_> = (0..5).map(|i| async move { i }).collect(); /// /// set.extend((5..10).map(|i| async move { i })); /// /// let mut seen = [false; 10]; /// while let Some(res) = set.join_next().await { /// let idx = res.unwrap(); /// seen[idx] = true; /// } /// /// for i in 0..10 { /// assert!(seen[i]); /// } /// } /// # } /// ``` impl<T, F> std::iter::Extend<F> for JoinSet<T> where F: Future<Output = T>, F: Send + 'static, T: Send + 'static, { fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item = F>, { iter.into_iter().for_each(|task| { self.spawn(task); }); } } // === impl Builder === #[cfg(all(tokio_unstable, feature = "tracing"))] #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] impl<'a, T: 'static> Builder<'a, T> { /// Assigns a name to the task which will be spawned. pub fn name(self, name: &'a str) -> Self { let builder = self.builder.name(name); Self { builder, ..self } } /// Spawn the provided task with this builder's settings and store it in the /// [`JoinSet`], returning an [`AbortHandle`] that can be used to remotely /// cancel the task. /// /// # Returns /// /// An [`AbortHandle`] that can be used to remotely cancel the task. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn<F>(self, future: F) -> std::io::Result<AbortHandle> where F: Future<Output = T>, F: Send + 'static, T: Send, { Ok(self.joinset.insert(self.builder.spawn(future)?)) } /// Spawn the provided task on the provided [runtime handle] with this /// builder's settings, and store it in the [`JoinSet`]. /// /// # Returns /// /// An [`AbortHandle`] that can be used to remotely cancel the task. /// /// /// [`AbortHandle`]: crate::task::AbortHandle /// [runtime handle]: crate::runtime::Handle #[track_caller] pub fn spawn_on<F>(self, future: F, handle: &Handle) -> std::io::Result<AbortHandle> where F: Future<Output = T>, F: Send + 'static, T: Send, { Ok(self.joinset.insert(self.builder.spawn_on(future, handle)?)) } /// Spawn the blocking code on the blocking threadpool with this builder's /// settings, and store it in the [`JoinSet`]. /// /// # Returns /// /// An [`AbortHandle`] that can be used to remotely cancel the task. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`JoinSet`]: crate::task::JoinSet /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_blocking<F>(self, f: F) -> std::io::Result<AbortHandle> where F: FnOnce() -> T, F: Send + 'static, T: Send, { Ok(self.joinset.insert(self.builder.spawn_blocking(f)?)) } /// Spawn the blocking code on the blocking threadpool of the provided /// runtime handle with this builder's settings, and store it in the /// [`JoinSet`]. /// /// # Returns /// /// An [`AbortHandle`] that can be used to remotely cancel the task. /// /// [`JoinSet`]: crate::task::JoinSet /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_blocking_on<F>(self, f: F, handle: &Handle) -> std::io::Result<AbortHandle> where F: FnOnce() -> T, F: Send + 'static, T: Send, { Ok(self .joinset .insert(self.builder.spawn_blocking_on(f, handle)?)) } /// Spawn the provided task on the current [`LocalSet`] or [`LocalRuntime`] /// with this builder's settings, and store it in the [`JoinSet`]. /// /// # Returns /// /// An [`AbortHandle`] that can be used to remotely cancel the task. /// /// # Panics /// /// This method panics if it is called outside of a `LocalSet` or `LocalRuntime`. /// /// [`LocalSet`]: crate::task::LocalSet /// [`LocalRuntime`]: crate::runtime::LocalRuntime /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_local<F>(self, future: F) -> std::io::Result<AbortHandle> where F: Future<Output = T>, F: 'static, { Ok(self.joinset.insert(self.builder.spawn_local(future)?)) } /// Spawn the provided task on the provided [`LocalSet`] with this builder's /// settings, and store it in the [`JoinSet`]. /// /// # Returns /// /// An [`AbortHandle`] that can be used to remotely cancel the task. /// /// [`LocalSet`]: crate::task::LocalSet /// [`AbortHandle`]: crate::task::AbortHandle #[track_caller] pub fn spawn_local_on<F>(self, future: F, local_set: &LocalSet) -> std::io::Result<AbortHandle> where F: Future<Output = T>, F: 'static, { Ok(self .joinset .insert(self.builder.spawn_local_on(future, local_set)?)) } } // Manual `Debug` impl so that `Builder` is `Debug` regardless of whether `T` is // `Debug`. #[cfg(all(tokio_unstable, feature = "tracing"))] #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] impl<'a, T> fmt::Debug for Builder<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("join_set::Builder") .field("joinset", &self.joinset) .field("builder", &self.builder) .finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/spawn.rs
tokio/src/task/spawn.rs
use crate::runtime::BOX_FUTURE_THRESHOLD; use crate::task::JoinHandle; use crate::util::trace::SpawnMeta; use std::future::Future; cfg_rt! { /// Spawns a new asynchronous task, returning a /// [`JoinHandle`](JoinHandle) for it. /// /// The provided future will start running in the background immediately /// when `spawn` is called, even if you don't await the returned /// `JoinHandle`. /// /// Spawning a task enables the task to execute concurrently to other tasks. The /// spawned task may execute on the current thread, or it may be sent to a /// different thread to be executed. The specifics depend on the current /// [`Runtime`](crate::runtime::Runtime) configuration. /// /// It is guaranteed that spawn will not synchronously poll the task being spawned. /// This means that calling spawn while holding a lock does not pose a risk of /// deadlocking with the spawned task. /// /// There is no guarantee that a spawned task will execute to completion. /// When a runtime is shutdown, all outstanding tasks are dropped, /// regardless of the lifecycle of that task. /// /// This function must be called from the context of a Tokio runtime. Tasks running on /// the Tokio runtime are always inside its context, but you can also enter the context /// using the [`Runtime::enter`](crate::runtime::Runtime::enter()) method. /// /// # Examples /// /// In this example, a server is started and `spawn` is used to start a new task /// that processes each received connection. /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::net::{TcpListener, TcpStream}; /// /// use std::io; /// /// async fn process(socket: TcpStream) { /// // ... /// # drop(socket); /// } /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let listener = TcpListener::bind("127.0.0.1:8080").await?; /// /// loop { /// let (socket, _) = listener.accept().await?; /// /// tokio::spawn(async move { /// // Process each socket concurrently. /// process(socket).await /// }); /// } /// } /// # } /// ``` /// /// To run multiple tasks in parallel and receive their results, join /// handles can be stored in a vector. /// ``` /// # #[tokio::main(flavor = "current_thread")] async fn main() { /// async fn my_background_op(id: i32) -> String { /// let s = format!("Starting background task {}.", id); /// println!("{}", s); /// s /// } /// /// let ops = vec![1, 2, 3]; /// let mut tasks = Vec::with_capacity(ops.len()); /// for op in ops { /// // This call will make them start running in the background /// // immediately. /// tasks.push(tokio::spawn(my_background_op(op))); /// } /// /// let mut outputs = Vec::with_capacity(tasks.len()); /// for task in tasks { /// outputs.push(task.await.unwrap()); /// } /// println!("{:?}", outputs); /// # } /// ``` /// This example pushes the tasks to `outputs` in the order they were /// started in. If you do not care about the ordering of the outputs, then /// you can also use a [`JoinSet`]. /// /// [`JoinSet`]: struct@crate::task::JoinSet /// /// # Panics /// /// Panics if called from **outside** of the Tokio runtime. /// /// # Using `!Send` values from a task /// /// The task supplied to `spawn` must implement `Send`. However, it is /// possible to **use** `!Send` values from the task as long as they only /// exist between calls to `.await`. /// /// For example, this will work: /// /// ``` /// use tokio::task; /// /// use std::rc::Rc; /// /// fn use_rc(rc: Rc<()>) { /// // Do stuff w/ rc /// # drop(rc); /// } /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// tokio::spawn(async { /// // Force the `Rc` to stay in a scope with no `.await` /// { /// let rc = Rc::new(()); /// use_rc(rc.clone()); /// } /// /// task::yield_now().await; /// }).await.unwrap(); /// # } /// ``` /// /// This will **not** work: /// /// ```compile_fail /// use tokio::task; /// /// use std::rc::Rc; /// /// fn use_rc(rc: Rc<()>) { /// // Do stuff w/ rc /// # drop(rc); /// } /// /// #[tokio::main] /// async fn main() { /// tokio::spawn(async { /// let rc = Rc::new(()); /// /// task::yield_now().await; /// /// use_rc(rc.clone()); /// }).await.unwrap(); /// } /// ``` /// /// Holding on to a `!Send` value across calls to `.await` will result in /// an unfriendly compile error message similar to: /// /// ```text /// `[... some type ...]` cannot be sent between threads safely /// ``` /// /// or: /// /// ```text /// error[E0391]: cycle detected when processing `main` /// ``` #[track_caller] pub fn spawn<F>(future: F) -> JoinHandle<F::Output> where F: Future + Send + 'static, F::Output: Send + 'static, { let fut_size = std::mem::size_of::<F>(); if fut_size > BOX_FUTURE_THRESHOLD { spawn_inner(Box::pin(future), SpawnMeta::new_unnamed(fut_size)) } else { spawn_inner(future, SpawnMeta::new_unnamed(fut_size)) } } #[track_caller] pub(super) fn spawn_inner<T>(future: T, meta: SpawnMeta<'_>) -> JoinHandle<T::Output> where T: Future + Send + 'static, T::Output: Send + 'static, { use crate::runtime::{context, task}; #[cfg(all( tokio_unstable, feature = "taskdump", feature = "rt", target_os = "linux", any( target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64" ) ))] let future = task::trace::Trace::root(future); let id = task::Id::next(); let task = crate::util::trace::task(future, "task", meta, id.as_u64()); match context::with_current(|handle| handle.spawn(task, id, meta.spawned_at)) { Ok(join_handle) => join_handle, Err(e) => panic!("{}", e), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/mod.rs
tokio/src/task/mod.rs
//! Asynchronous green-threads. //! //! ## What are Tasks? //! //! A _task_ is a light weight, non-blocking unit of execution. A task is similar //! to an OS thread, but rather than being managed by the OS scheduler, they are //! managed by the [Tokio runtime][rt]. Another name for this general pattern is //! [green threads]. If you are familiar with [Go's goroutines], [Kotlin's //! coroutines], or [Erlang's processes], you can think of Tokio's tasks as //! something similar. //! //! Key points about tasks include: //! //! * Tasks are **light weight**. Because tasks are scheduled by the Tokio //! runtime rather than the operating system, creating new tasks or switching //! between tasks does not require a context switch and has fairly low //! overhead. Creating, running, and destroying large numbers of tasks is //! quite cheap, especially compared to OS threads. //! //! * Tasks are scheduled **cooperatively**. Most operating systems implement //! _preemptive multitasking_. This is a scheduling technique where the //! operating system allows each thread to run for a period of time, and then //! _preempts_ it, temporarily pausing that thread and switching to another. //! Tasks, on the other hand, implement _cooperative multitasking_. In //! cooperative multitasking, a task is allowed to run until it _yields_, //! indicating to the Tokio runtime's scheduler that it cannot currently //! continue executing. When a task yields, the Tokio runtime switches to //! executing the next task. //! //! * Tasks are **non-blocking**. Typically, when an OS thread performs I/O or //! must synchronize with another thread, it _blocks_, allowing the OS to //! schedule another thread. When a task cannot continue executing, it must //! yield instead, allowing the Tokio runtime to schedule another task. Tasks //! should generally not perform system calls or other operations that could //! block a thread, as this would prevent other tasks running on the same //! thread from executing as well. Instead, this module provides APIs for //! running blocking operations in an asynchronous context. //! //! [rt]: crate::runtime //! [green threads]: https://en.wikipedia.org/wiki/Green_threads //! [Go's goroutines]: https://tour.golang.org/concurrency/1 //! [Kotlin's coroutines]: https://kotlinlang.org/docs/reference/coroutines-overview.html //! [Erlang's processes]: http://erlang.org/doc/getting_started/conc_prog.html#processes //! //! ## Working with Tasks //! //! This module provides the following APIs for working with tasks: //! //! ### Spawning //! //! Perhaps the most important function in this module is [`task::spawn`]. This //! function can be thought of as an async equivalent to the standard library's //! [`thread::spawn`][`std::thread::spawn`]. It takes an `async` block or other //! [future], and creates a new task to run that work concurrently: //! //! ``` //! use tokio::task; //! //! # async fn doc() { //! task::spawn(async { //! // perform some work here... //! }); //! # } //! ``` //! //! Like [`std::thread::spawn`], `task::spawn` returns a [`JoinHandle`] struct. //! A `JoinHandle` is itself a future which may be used to await the output of //! the spawned task. For example: //! //! ``` //! use tokio::task; //! //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() -> Result<(), Box<dyn std::error::Error>> { //! let join = task::spawn(async { //! // ... //! "hello world!" //! }); //! //! // ... //! //! // Await the result of the spawned task. //! let result = join.await?; //! assert_eq!(result, "hello world!"); //! # Ok(()) //! # } //! ``` //! //! Again, like `std::thread`'s [`JoinHandle` type][thread_join], if the spawned //! task panics, awaiting its `JoinHandle` will return a [`JoinError`]. For //! example: //! //! ``` //! # #[cfg(not(target_family = "wasm"))] //! # { //! use tokio::task; //! //! # #[tokio::main] async fn main() { //! let join = task::spawn(async { //! panic!("something bad happened!") //! }); //! //! // The returned result indicates that the task failed. //! assert!(join.await.is_err()); //! # } //! # } //! ``` //! //! `spawn`, `JoinHandle`, and `JoinError` are present when the "rt" //! feature flag is enabled. //! //! [`task::spawn`]: crate::task::spawn() //! [future]: std::future::Future //! [`std::thread::spawn`]: std::thread::spawn //! [`JoinHandle`]: crate::task::JoinHandle //! [thread_join]: std::thread::JoinHandle //! [`JoinError`]: crate::task::JoinError //! //! #### Cancellation //! //! Spawned tasks may be cancelled using the [`JoinHandle::abort`] or //! [`AbortHandle::abort`] methods. When one of these methods are called, the //! task is signalled to shut down next time it yields at an `.await` point. If //! the task is already idle, then it will be shut down as soon as possible //! without running again before being shut down. Additionally, shutting down a //! Tokio runtime (e.g. by returning from `#[tokio::main]`) immediately cancels //! all tasks on it. //! //! When tasks are shut down, it will stop running at whichever `.await` it has //! yielded at. All local variables are destroyed by running their destructor. //! Once shutdown has completed, awaiting the [`JoinHandle`] will fail with a //! [cancelled error](crate::task::JoinError::is_cancelled). //! //! Note that aborting a task does not guarantee that it fails with a cancelled //! error, since it may complete normally first. For example, if the task does //! not yield to the runtime at any point between the call to `abort` and the //! end of the task, then the [`JoinHandle`] will instead report that the task //! exited normally. //! //! Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted //! because they are not async. If you call `abort` on a `spawn_blocking` //! task, then this *will not have any effect*, and the task will continue //! running normally. The exception is if the task has not started running //! yet; in that case, calling `abort` may prevent the task from starting. //! //! Be aware that calls to [`JoinHandle::abort`] just schedule the task for //! cancellation, and will return before the cancellation has completed. To wait //! for cancellation to complete, wait for the task to finish by awaiting the //! [`JoinHandle`]. Similarly, the [`JoinHandle::is_finished`] method does not //! return `true` until the cancellation has finished. //! //! Calling [`JoinHandle::abort`] multiple times has the same effect as calling //! it once. //! //! Tokio also provides an [`AbortHandle`], which is like the [`JoinHandle`], //! except that it does not provide a mechanism to wait for the task to finish. //! Each task can only have one [`JoinHandle`], but it can have more than one //! [`AbortHandle`]. //! //! [`JoinHandle::abort`]: crate::task::JoinHandle::abort //! [`AbortHandle::abort`]: crate::task::AbortHandle::abort //! [`AbortHandle`]: crate::task::AbortHandle //! [`JoinHandle::is_finished`]: crate::task::JoinHandle::is_finished //! //! ### Blocking and Yielding //! //! As we discussed above, code running in asynchronous tasks should not perform //! operations that can block. A blocking operation performed in a task running //! on a thread that is also running other tasks would block the entire thread, //! preventing other tasks from running. //! //! Instead, Tokio provides two APIs for running blocking operations in an //! asynchronous context: [`task::spawn_blocking`] and [`task::block_in_place`]. //! //! Be aware that if you call a non-async method from async code, that non-async //! method is still inside the asynchronous context, so you should also avoid //! blocking operations there. This includes destructors of objects destroyed in //! async code. //! //! #### `spawn_blocking` //! //! The `task::spawn_blocking` function is similar to the `task::spawn` function //! discussed in the previous section, but rather than spawning an //! _non-blocking_ future on the Tokio runtime, it instead spawns a //! _blocking_ function on a dedicated thread pool for blocking tasks. For //! example: //! //! ``` //! use tokio::task; //! //! # async fn docs() { //! task::spawn_blocking(|| { //! // do some compute-heavy work or call synchronous code //! }); //! # } //! ``` //! //! Just like `task::spawn`, `task::spawn_blocking` returns a `JoinHandle` //! which we can use to await the result of the blocking operation: //! //! ```rust //! # use tokio::task; //! # async fn docs() -> Result<(), Box<dyn std::error::Error>>{ //! let join = task::spawn_blocking(|| { //! // do some compute-heavy work or call synchronous code //! "blocking completed" //! }); //! //! let result = join.await?; //! assert_eq!(result, "blocking completed"); //! # Ok(()) //! # } //! ``` //! //! #### `block_in_place` //! //! When using the [multi-threaded runtime][rt-multi-thread], the [`task::block_in_place`] //! function is also available. Like `task::spawn_blocking`, this function //! allows running a blocking operation from an asynchronous context. Unlike //! `spawn_blocking`, however, `block_in_place` works by transitioning the //! _current_ worker thread to a blocking thread, moving other tasks running on //! that thread to another worker thread. This can improve performance by avoiding //! context switches. //! //! For example: //! //! ``` //! # #[cfg(not(target_family = "wasm"))] //! # { //! use tokio::task; //! //! # async fn docs() { //! let result = task::block_in_place(|| { //! // do some compute-heavy work or call synchronous code //! "blocking completed" //! }); //! //! assert_eq!(result, "blocking completed"); //! # } //! # } //! ``` //! //! #### `yield_now` //! //! In addition, this module provides a [`task::yield_now`] async function //! that is analogous to the standard library's [`thread::yield_now`]. Calling //! and `await`ing this function will cause the current task to yield to the //! Tokio runtime's scheduler, allowing other tasks to be //! scheduled. Eventually, the yielding task will be polled again, allowing it //! to execute. For example: //! //! ```rust //! use tokio::task; //! //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! async { //! task::spawn(async { //! // ... //! println!("spawned task done!") //! }); //! //! // Yield, allowing the newly-spawned task to execute first. //! task::yield_now().await; //! println!("main task done!"); //! } //! # .await; //! # } //! ``` //! //! [`task::spawn_blocking`]: crate::task::spawn_blocking //! [`task::block_in_place`]: crate::task::block_in_place //! [rt-multi-thread]: ../runtime/index.html#threaded-scheduler //! [`task::yield_now`]: crate::task::yield_now() //! [`thread::yield_now`]: std::thread::yield_now cfg_rt! { pub use crate::runtime::task::{JoinError, JoinHandle}; mod blocking; pub use blocking::spawn_blocking; mod spawn; pub use spawn::spawn; cfg_rt_multi_thread! { pub use blocking::block_in_place; } mod yield_now; pub use yield_now::yield_now; pub mod coop; #[doc(hidden)] #[deprecated = "Moved to tokio::task::coop::consume_budget"] pub use coop::consume_budget; #[doc(hidden)] #[deprecated = "Moved to tokio::task::coop::unconstrained"] pub use coop::unconstrained; #[doc(hidden)] #[deprecated = "Moved to tokio::task::coop::Unconstrained"] pub use coop::Unconstrained; mod local; pub use local::{spawn_local, LocalSet, LocalEnterGuard}; mod task_local; pub use task_local::LocalKey; #[doc(inline)] pub use join_set::JoinSet; pub use crate::runtime::task::AbortHandle; // Uses #[cfg(...)] instead of macro since the macro adds docsrs annotations. #[cfg(not(tokio_unstable))] mod join_set; #[cfg(tokio_unstable)] pub mod join_set; pub use crate::runtime::task::{Id, id, try_id}; cfg_trace! { mod builder; pub use builder::Builder; } /// Task-related futures. pub mod futures { pub use super::task_local::TaskLocalFuture; } } cfg_not_rt! { pub(crate) mod coop; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/task_local.rs
tokio/src/task/task_local.rs
use pin_project_lite::pin_project; use std::cell::RefCell; use std::error::Error; use std::future::Future; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; use std::{fmt, mem, thread}; /// Declares a new task-local key of type [`tokio::task::LocalKey`]. /// /// # Syntax /// /// The macro wraps any number of static declarations and makes them local to the current task. /// Publicity and attributes for each static is preserved. For example: /// /// # Examples /// /// ``` /// # use tokio::task_local; /// task_local! { /// pub static ONE: u32; /// /// #[allow(unused)] /// static TWO: f32; /// } /// # fn main() {} /// ``` /// /// See [`LocalKey` documentation][`tokio::task::LocalKey`] for more /// information. /// /// [`tokio::task::LocalKey`]: struct@crate::task::LocalKey #[macro_export] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] macro_rules! task_local { // empty (base case for the recursion) () => {}; ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => { $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); $crate::task_local!($($rest)*); }; ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => { $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); } } #[doc(hidden)] #[macro_export] macro_rules! __task_local_inner { ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => { $(#[$attr])* $vis static $name: $crate::task::LocalKey<$t> = { std::thread_local! { static __KEY: std::cell::RefCell<Option<$t>> = const { std::cell::RefCell::new(None) }; } $crate::task::LocalKey { inner: __KEY } }; }; } /// A key for task-local data. /// /// This type is generated by the [`task_local!`] macro. /// /// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will /// _not_ lazily initialize the value on first access. Instead, the /// value is first initialized when the future containing /// the task-local is first polled by a futures executor, like Tokio. /// /// # Examples /// /// ``` /// # async fn dox() { /// tokio::task_local! { /// static NUMBER: u32; /// } /// /// NUMBER.scope(1, async move { /// assert_eq!(NUMBER.get(), 1); /// }).await; /// /// NUMBER.scope(2, async move { /// assert_eq!(NUMBER.get(), 2); /// /// NUMBER.scope(3, async move { /// assert_eq!(NUMBER.get(), 3); /// }).await; /// }).await; /// # } /// ``` /// /// [`std::thread::LocalKey`]: struct@std::thread::LocalKey /// [`task_local!`]: ../macro.task_local.html #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub struct LocalKey<T: 'static> { #[doc(hidden)] pub inner: thread::LocalKey<RefCell<Option<T>>>, } impl<T: 'static> LocalKey<T> { /// Sets a value `T` as the task-local value for the future `F`. /// /// On completion of `scope`, the task-local will be dropped. /// /// ### Panics /// /// If you poll the returned future inside a call to [`with`] or /// [`try_with`] on the same `LocalKey`, then the call to `poll` will panic. /// /// ### Examples /// /// ``` /// # async fn dox() { /// tokio::task_local! { /// static NUMBER: u32; /// } /// /// NUMBER.scope(1, async move { /// println!("task local value: {}", NUMBER.get()); /// }).await; /// # } /// ``` /// /// [`with`]: fn@Self::with /// [`try_with`]: fn@Self::try_with pub fn scope<F>(&'static self, value: T, f: F) -> TaskLocalFuture<T, F> where F: Future, { TaskLocalFuture { local: self, slot: Some(value), future: Some(f), _pinned: PhantomPinned, } } /// Sets a value `T` as the task-local value for the closure `F`. /// /// On completion of `sync_scope`, the task-local will be dropped. /// /// ### Panics /// /// This method panics if called inside a call to [`with`] or [`try_with`] /// on the same `LocalKey`. /// /// ### Examples /// /// ``` /// # async fn dox() { /// tokio::task_local! { /// static NUMBER: u32; /// } /// /// NUMBER.sync_scope(1, || { /// println!("task local value: {}", NUMBER.get()); /// }); /// # } /// ``` /// /// [`with`]: fn@Self::with /// [`try_with`]: fn@Self::try_with #[track_caller] pub fn sync_scope<F, R>(&'static self, value: T, f: F) -> R where F: FnOnce() -> R, { let mut value = Some(value); match self.scope_inner(&mut value, f) { Ok(res) => res, Err(err) => err.panic(), } } fn scope_inner<F, R>(&'static self, slot: &mut Option<T>, f: F) -> Result<R, ScopeInnerErr> where F: FnOnce() -> R, { struct Guard<'a, T: 'static> { local: &'static LocalKey<T>, slot: &'a mut Option<T>, } impl<'a, T: 'static> Drop for Guard<'a, T> { fn drop(&mut self) { // This should not panic. // // We know that the RefCell was not borrowed before the call to // `scope_inner`, so the only way for this to panic is if the // closure has created but not destroyed a RefCell guard. // However, we never give user-code access to the guards, so // there's no way for user-code to forget to destroy a guard. // // The call to `with` also should not panic, since the // thread-local wasn't destroyed when we first called // `scope_inner`, and it shouldn't have gotten destroyed since // then. self.local.inner.with(|inner| { let mut ref_mut = inner.borrow_mut(); mem::swap(self.slot, &mut *ref_mut); }); } } self.inner.try_with(|inner| { inner .try_borrow_mut() .map(|mut ref_mut| mem::swap(slot, &mut *ref_mut)) })??; let guard = Guard { local: self, slot }; let res = f(); drop(guard); Ok(res) } /// Accesses the current task-local and runs the provided closure. /// /// # Panics /// /// This function will panic if the task local doesn't have a value set. #[track_caller] pub fn with<F, R>(&'static self, f: F) -> R where F: FnOnce(&T) -> R, { match self.try_with(f) { Ok(res) => res, Err(_) => panic!("cannot access a task-local storage value without setting it first"), } } /// Accesses the current task-local and runs the provided closure. /// /// If the task-local with the associated key is not present, this /// method will return an `AccessError`. For a panicking variant, /// see `with`. pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError> where F: FnOnce(&T) -> R, { // If called after the thread-local storing the task-local is destroyed, // then we are outside of a closure where the task-local is set. // // Therefore, it is correct to return an AccessError if `try_with` // returns an error. let try_with_res = self.inner.try_with(|v| { // This call to `borrow` cannot panic because no user-defined code // runs while a `borrow_mut` call is active. v.borrow().as_ref().map(f) }); match try_with_res { Ok(Some(res)) => Ok(res), Ok(None) | Err(_) => Err(AccessError { _private: () }), } } } impl<T: Clone + 'static> LocalKey<T> { /// Returns a copy of the task-local value /// if the task-local value implements `Clone`. /// /// # Panics /// /// This function will panic if the task local doesn't have a value set. #[track_caller] pub fn get(&'static self) -> T { self.with(|v| v.clone()) } /// Returns a copy of the task-local value /// if the task-local value implements `Clone`. /// /// If the task-local with the associated key is not present, this /// method will return an `AccessError`. For a panicking variant, /// see `get`. pub fn try_get(&'static self) -> Result<T, AccessError> { self.try_with(|v| v.clone()) } } impl<T: 'static> fmt::Debug for LocalKey<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("LocalKey { .. }") } } pin_project! { /// A future that sets a value `T` of a task local for the future `F` during /// its execution. /// /// The value of the task-local must be `'static` and will be dropped on the /// completion of the future. /// /// Created by the function [`LocalKey::scope`](self::LocalKey::scope). /// /// ### Examples /// /// ``` /// # async fn dox() { /// tokio::task_local! { /// static NUMBER: u32; /// } /// /// NUMBER.scope(1, async move { /// println!("task local value: {}", NUMBER.get()); /// }).await; /// # } /// ``` pub struct TaskLocalFuture<T, F> where T: 'static, { local: &'static LocalKey<T>, slot: Option<T>, #[pin] future: Option<F>, #[pin] _pinned: PhantomPinned, } impl<T: 'static, F> PinnedDrop for TaskLocalFuture<T, F> { fn drop(this: Pin<&mut Self>) { let this = this.project(); if mem::needs_drop::<F>() && this.future.is_some() { // Drop the future while the task-local is set, if possible. Otherwise // the future is dropped normally when the `Option<F>` field drops. let mut future = this.future; let _ = this.local.scope_inner(this.slot, || { future.set(None); }); } } } } impl<T, F> TaskLocalFuture<T, F> where T: 'static, { /// Returns the value stored in the task local by this `TaskLocalFuture`. /// /// The function returns: /// /// * `Some(T)` if the task local value exists. /// * `None` if the task local value has already been taken. /// /// Note that this function attempts to take the task local value even if /// the future has not yet completed. In that case, the value will no longer /// be available via the task local after the call to `take_value`. /// /// # Examples /// /// ``` /// # async fn dox() { /// tokio::task_local! { /// static KEY: u32; /// } /// /// let fut = KEY.scope(42, async { /// // Do some async work /// }); /// /// let mut pinned = Box::pin(fut); /// /// // Complete the TaskLocalFuture /// let _ = pinned.as_mut().await; /// /// // And here, we can take task local value /// let value = pinned.as_mut().take_value(); /// /// assert_eq!(value, Some(42)); /// # } /// ``` pub fn take_value(self: Pin<&mut Self>) -> Option<T> { let this = self.project(); this.slot.take() } } impl<T: 'static, F: Future> Future for TaskLocalFuture<T, F> { type Output = F::Output; #[track_caller] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); let mut future_opt = this.future; let res = this .local .scope_inner(this.slot, || match future_opt.as_mut().as_pin_mut() { Some(fut) => { let res = fut.poll(cx); if res.is_ready() { future_opt.set(None); } Some(res) } None => None, }); match res { Ok(Some(res)) => res, Ok(None) => panic!("`TaskLocalFuture` polled after completion"), Err(err) => err.panic(), } } } impl<T: 'static, F> fmt::Debug for TaskLocalFuture<T, F> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { /// Format the Option without Some. struct TransparentOption<'a, T> { value: &'a Option<T>, } impl<'a, T: fmt::Debug> fmt::Debug for TransparentOption<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.value.as_ref() { Some(value) => value.fmt(f), // Hitting the None branch should not be possible. None => f.pad("<missing>"), } } } f.debug_struct("TaskLocalFuture") .field("value", &TransparentOption { value: &self.slot }) .finish() } } /// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with). #[derive(Clone, Copy, Eq, PartialEq)] pub struct AccessError { _private: (), } impl fmt::Debug for AccessError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("AccessError").finish() } } impl fmt::Display for AccessError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt("task-local value not set", f) } } impl Error for AccessError {} enum ScopeInnerErr { BorrowError, AccessError, } impl ScopeInnerErr { #[track_caller] fn panic(&self) -> ! { match self { Self::BorrowError => panic!("cannot enter a task-local scope while the task-local storage is borrowed"), Self::AccessError => panic!("cannot enter a task-local scope during or after destruction of the underlying thread-local"), } } } impl From<std::cell::BorrowMutError> for ScopeInnerErr { fn from(_: std::cell::BorrowMutError) -> Self { Self::BorrowError } } impl From<std::thread::AccessError> for ScopeInnerErr { fn from(_: std::thread::AccessError) -> Self { Self::AccessError } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/coop/mod.rs
tokio/src/task/coop/mod.rs
#![cfg_attr(not(feature = "full"), allow(dead_code))] #![cfg_attr(not(feature = "rt"), allow(unreachable_pub))] //! Utilities for improved cooperative scheduling. //! //! ### Cooperative scheduling //! //! A single call to [`poll`] on a top-level task may potentially do a lot of //! work before it returns `Poll::Pending`. If a task runs for a long period of //! time without yielding back to the executor, it can starve other tasks //! waiting on that executor to execute them, or drive underlying resources. //! Since Rust does not have a runtime, it is difficult to forcibly preempt a //! long-running task. Instead, this module provides an opt-in mechanism for //! futures to collaborate with the executor to avoid starvation. //! //! Consider a future like this one: //! //! ``` //! # use tokio_stream::{Stream, StreamExt}; //! async fn drop_all<I: Stream + Unpin>(mut input: I) { //! while let Some(_) = input.next().await {} //! } //! ``` //! //! It may look harmless, but consider what happens under heavy load if the //! input stream is _always_ ready. If we spawn `drop_all`, the task will never //! yield, and will starve other tasks and resources on the same executor. //! //! To account for this, Tokio has explicit yield points in a number of library //! functions, which force tasks to return to the executor periodically. //! //! //! #### unconstrained //! //! If necessary, [`task::unconstrained`] lets you opt a future out of Tokio's cooperative //! scheduling. When a future is wrapped with `unconstrained`, it will never be forced to yield to //! Tokio. For example: //! //! ``` //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! use tokio::{task, sync::mpsc}; //! //! let fut = async { //! let (tx, mut rx) = mpsc::unbounded_channel(); //! //! for i in 0..1000 { //! let _ = tx.send(()); //! // This will always be ready. If coop was in effect, this code would be forced to yield //! // periodically. However, if left unconstrained, then this code will never yield. //! rx.recv().await; //! } //! }; //! //! task::coop::unconstrained(fut).await; //! # } //! ``` //! [`poll`]: method@std::future::Future::poll //! [`task::unconstrained`]: crate::task::unconstrained() cfg_rt! { mod consume_budget; pub use consume_budget::consume_budget; mod unconstrained; pub use unconstrained::{unconstrained, Unconstrained}; } // ```ignore // # use tokio_stream::{Stream, StreamExt}; // async fn drop_all<I: Stream + Unpin>(mut input: I) { // while let Some(_) = input.next().await { // tokio::coop::proceed().await; // } // } // ``` // // The `proceed` future will coordinate with the executor to make sure that // every so often control is yielded back to the executor so it can run other // tasks. // // # Placing yield points // // Voluntary yield points should be placed _after_ at least some work has been // done. If they are not, a future sufficiently deep in the task hierarchy may // end up _never_ getting to run because of the number of yield points that // inevitably appear before it is reached. In general, you will want yield // points to only appear in "leaf" futures -- those that do not themselves poll // other futures. By doing this, you avoid double-counting each iteration of // the outer future against the cooperating budget. use crate::runtime::context; /// Opaque type tracking the amount of "work" a task may still do before /// yielding back to the scheduler. #[derive(Debug, Copy, Clone)] pub(crate) struct Budget(Option<u8>); pub(crate) struct BudgetDecrement { success: bool, hit_zero: bool, } impl Budget { /// Budget assigned to a task on each poll. /// /// The value itself is chosen somewhat arbitrarily. It needs to be high /// enough to amortize wakeup and scheduling costs, but low enough that we /// do not starve other tasks for too long. The value also needs to be high /// enough that particularly deep tasks are able to do at least some useful /// work at all. /// /// Note that as more yield points are added in the ecosystem, this value /// will probably also have to be raised. const fn initial() -> Budget { Budget(Some(128)) } /// Returns an unconstrained budget. Operations will not be limited. pub(crate) const fn unconstrained() -> Budget { Budget(None) } fn has_remaining(self) -> bool { self.0.map_or(true, |budget| budget > 0) } } /// Runs the given closure with a cooperative task budget. When the function /// returns, the budget is reset to the value prior to calling the function. #[inline(always)] pub(crate) fn budget<R>(f: impl FnOnce() -> R) -> R { with_budget(Budget::initial(), f) } /// Runs the given closure with an unconstrained task budget. When the function returns, the budget /// is reset to the value prior to calling the function. #[inline(always)] pub(crate) fn with_unconstrained<R>(f: impl FnOnce() -> R) -> R { with_budget(Budget::unconstrained(), f) } #[inline(always)] fn with_budget<R>(budget: Budget, f: impl FnOnce() -> R) -> R { struct ResetGuard { prev: Budget, } impl Drop for ResetGuard { fn drop(&mut self) { let _ = context::budget(|cell| { cell.set(self.prev); }); } } #[allow(unused_variables)] let maybe_guard = context::budget(|cell| { let prev = cell.get(); cell.set(budget); ResetGuard { prev } }); // The function is called regardless even if the budget is not successfully // set due to the thread-local being destroyed. f() } /// Returns `true` if there is still budget left on the task. /// /// # Examples /// /// This example defines a `Timeout` future that requires a given `future` to complete before the /// specified duration elapses. If it does, its result is returned; otherwise, an error is returned /// and the future is canceled. /// /// Note that the future could exhaust the budget before we evaluate the timeout. Using `has_budget_remaining`, /// we can detect this scenario and ensure the timeout is always checked. /// /// ``` /// # use std::future::Future; /// # use std::pin::{pin, Pin}; /// # use std::task::{ready, Context, Poll}; /// # use tokio::task::coop; /// # use tokio::time::Sleep; /// pub struct Timeout<T> { /// future: T, /// delay: Pin<Box<Sleep>>, /// } /// /// impl<T> Future for Timeout<T> /// where /// T: Future + Unpin, /// { /// type Output = Result<T::Output, ()>; /// /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { /// let this = Pin::into_inner(self); /// let future = Pin::new(&mut this.future); /// let delay = Pin::new(&mut this.delay); /// /// // check if the future is ready /// let had_budget_before = coop::has_budget_remaining(); /// if let Poll::Ready(v) = future.poll(cx) { /// return Poll::Ready(Ok(v)); /// } /// let has_budget_now = coop::has_budget_remaining(); /// /// // evaluate the timeout /// if let (true, false) = (had_budget_before, has_budget_now) { /// // it is the underlying future that exhausted the budget /// ready!(pin!(coop::unconstrained(delay)).poll(cx)); /// } else { /// ready!(delay.poll(cx)); /// } /// return Poll::Ready(Err(())); /// } /// } ///``` #[inline(always)] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn has_budget_remaining() -> bool { // If the current budget cannot be accessed due to the thread-local being // shutdown, then we assume there is budget remaining. context::budget(|cell| cell.get().has_remaining()).unwrap_or(true) } cfg_rt_multi_thread! { /// Sets the current task's budget. pub(crate) fn set(budget: Budget) { let _ = context::budget(|cell| cell.set(budget)); } } cfg_rt! { /// Forcibly removes the budgeting constraints early. /// /// Returns the remaining budget pub(crate) fn stop() -> Budget { context::budget(|cell| { let prev = cell.get(); cell.set(Budget::unconstrained()); prev }).unwrap_or(Budget::unconstrained()) } } cfg_coop! { use pin_project_lite::pin_project; use std::cell::Cell; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; use std::task::{ready, Context, Poll}; /// Value returned by the [`poll_proceed`] method. #[derive(Debug)] #[must_use] pub struct RestoreOnPending(Cell<Budget>, PhantomData<*mut ()>); impl RestoreOnPending { fn new(budget: Budget) -> Self { RestoreOnPending( Cell::new(budget), PhantomData, ) } /// Signals that the task that obtained this `RestoreOnPending` was able to make /// progress. This prevents the task budget from being restored to the value /// it had prior to obtaining this instance when it is dropped. pub fn made_progress(&self) { self.0.set(Budget::unconstrained()); } } impl Drop for RestoreOnPending { fn drop(&mut self) { // Don't reset if budget was unconstrained or if we made progress. // They are both represented as the remembered budget being unconstrained. let budget = self.0.get(); if !budget.is_unconstrained() { let _ = context::budget(|cell| { cell.set(budget); }); } } } /// Decrements the task budget and returns [`Poll::Pending`] if the budget is depleted. /// This indicates that the task should yield to the scheduler. Otherwise, returns /// [`RestoreOnPending`] which can be used to commit the budget consumption. /// /// The returned [`RestoreOnPending`] will revert the budget to its former /// value when dropped unless [`RestoreOnPending::made_progress`] /// is called. It is the caller's responsibility to do so when it _was_ able to /// make progress after the call to [`poll_proceed`]. /// Restoring the budget automatically ensures the task can try to make progress in some other /// way. /// /// Note that [`RestoreOnPending`] restores the budget **as it was before [`poll_proceed`]**. /// Therefore, if the budget is _further_ adjusted between when [`poll_proceed`] returns and /// [`RestoreOnPending`] is dropped, those adjustments are erased unless the caller indicates /// that progress was made. /// /// # Examples /// /// This example wraps the `futures::channel::mpsc::UnboundedReceiver` to /// cooperate with the Tokio scheduler. Each time a value is received, task budget /// is consumed. If no budget is available, the task yields to the scheduler. /// /// ``` /// use std::pin::Pin; /// use std::task::{ready, Context, Poll}; /// use tokio::task::coop; /// use futures::stream::{Stream, StreamExt}; /// use futures::channel::mpsc::UnboundedReceiver; /// /// struct CoopUnboundedReceiver<T> { /// receiver: UnboundedReceiver<T>, /// } /// /// impl<T> Stream for CoopUnboundedReceiver<T> { /// type Item = T; /// fn poll_next( /// mut self: Pin<&mut Self>, /// cx: &mut Context<'_> /// ) -> Poll<Option<T>> { /// let coop = ready!(coop::poll_proceed(cx)); /// match self.receiver.poll_next_unpin(cx) { /// Poll::Ready(v) => { /// // We received a value, so consume budget. /// coop.made_progress(); /// Poll::Ready(v) /// } /// Poll::Pending => Poll::Pending, /// } /// } /// } /// ``` #[inline] pub fn poll_proceed(cx: &mut Context<'_>) -> Poll<RestoreOnPending> { context::budget(|cell| { let mut budget = cell.get(); let decrement = budget.decrement(); if decrement.success { let restore = RestoreOnPending::new(cell.get()); cell.set(budget); // avoid double counting if decrement.hit_zero { inc_budget_forced_yield_count(); } Poll::Ready(restore) } else { register_waker(cx); Poll::Pending } }).unwrap_or(Poll::Ready(RestoreOnPending::new(Budget::unconstrained()))) } /// Returns `Poll::Ready` if the current task has budget to consume, and `Poll::Pending` otherwise. /// /// Note that in contrast to `poll_proceed`, this method does not consume any budget and is used when /// polling for budget availability. #[inline] pub(crate) fn poll_budget_available(cx: &mut Context<'_>) -> Poll<()> { if has_budget_remaining() { Poll::Ready(()) } else { register_waker(cx); Poll::Pending } } cfg_rt! { cfg_unstable_metrics! { #[inline(always)] fn inc_budget_forced_yield_count() { let _ = context::with_current(|handle| { handle.scheduler_metrics().inc_budget_forced_yield_count(); }); } } cfg_not_unstable_metrics! { #[inline(always)] fn inc_budget_forced_yield_count() {} } fn register_waker(cx: &mut Context<'_>) { context::defer(cx.waker()); } } cfg_not_rt! { #[inline(always)] fn inc_budget_forced_yield_count() {} fn register_waker(cx: &mut Context<'_>) { cx.waker().wake_by_ref() } } impl Budget { /// Decrements the budget. Returns `true` if successful. Decrementing fails /// when there is not enough remaining budget. fn decrement(&mut self) -> BudgetDecrement { if let Some(num) = &mut self.0 { if *num > 0 { *num -= 1; let hit_zero = *num == 0; BudgetDecrement { success: true, hit_zero } } else { BudgetDecrement { success: false, hit_zero: false } } } else { BudgetDecrement { success: true, hit_zero: false } } } fn is_unconstrained(self) -> bool { self.0.is_none() } } pin_project! { /// Future wrapper to ensure cooperative scheduling created by [`cooperative`]. #[must_use = "futures do nothing unless polled"] pub struct Coop<F: Future> { #[pin] pub(crate) fut: F, } } impl<F: Future> Future for Coop<F> { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let coop = ready!(poll_proceed(cx)); let me = self.project(); if let Poll::Ready(ret) = me.fut.poll(cx) { coop.made_progress(); Poll::Ready(ret) } else { Poll::Pending } } } /// Creates a wrapper future that makes the inner future cooperate with the Tokio scheduler. /// /// When polled, the wrapper will first call [`poll_proceed`] to consume task budget, and /// immediately yield if the budget has been depleted. If budget was available, the inner future /// is polled. The budget consumption will be made final using [`RestoreOnPending::made_progress`] /// if the inner future resolves to its final value. /// /// # Examples /// /// When you call `recv` on the `Receiver` of a [`tokio::sync::mpsc`](crate::sync::mpsc) /// channel, task budget will automatically be consumed when the next value is returned. /// This makes tasks that use Tokio mpsc channels automatically cooperative. /// /// If you're using [`futures::channel::mpsc`](https://docs.rs/futures/latest/futures/channel/mpsc/index.html) /// instead, automatic task budget consumption will not happen. This example shows how can use /// `cooperative` to make `futures::channel::mpsc` channels cooperate with the scheduler in the /// same way Tokio channels do. /// /// ``` /// use tokio::task::coop::cooperative; /// use futures::channel::mpsc::Receiver; /// use futures::stream::StreamExt; /// /// async fn receive_next<T>(receiver: &mut Receiver<T>) -> Option<T> { /// // Use `StreamExt::next` to obtain a `Future` that resolves to the next value /// let recv_future = receiver.next(); /// // Wrap it a cooperative wrapper /// let coop_future = cooperative(recv_future); /// // And await /// coop_future.await /// } #[inline] pub fn cooperative<F: Future>(fut: F) -> Coop<F> { Coop { fut } } } #[cfg(all(test, not(loom)))] mod test { use super::*; #[cfg(all(target_family = "wasm", not(target_os = "wasi")))] use wasm_bindgen_test::wasm_bindgen_test as test; fn get() -> Budget { context::budget(|cell| cell.get()).unwrap_or(Budget::unconstrained()) } #[test] fn budgeting() { use std::future::poll_fn; use tokio_test::*; assert!(get().0.is_none()); let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); assert!(get().0.is_none()); drop(coop); assert!(get().0.is_none()); budget(|| { assert_eq!(get().0, Budget::initial().0); let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); drop(coop); // we didn't make progress assert_eq!(get().0, Budget::initial().0); let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); coop.made_progress(); drop(coop); // we _did_ make progress assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); coop.made_progress(); drop(coop); assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); budget(|| { assert_eq!(get().0, Budget::initial().0); let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); coop.made_progress(); drop(coop); assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); }); assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); }); assert!(get().0.is_none()); budget(|| { let n = get().0.unwrap(); for _ in 0..n { let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); coop.made_progress(); } let mut task = task::spawn(poll_fn(|cx| { let coop = std::task::ready!(poll_proceed(cx)); coop.made_progress(); Poll::Ready(()) })); assert_pending!(task.poll()); }); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/coop/consume_budget.rs
tokio/src/task/coop/consume_budget.rs
/// Consumes a unit of budget and returns the execution back to the Tokio /// runtime *if* the task's coop budget was exhausted. /// /// The task will only yield if its entire coop budget has been exhausted. /// This function can be used in order to insert optional yield points into long /// computations that do not use Tokio resources like sockets or semaphores, /// without redundantly yielding to the runtime each time. /// /// # Examples /// /// Make sure that a function which returns a sum of (potentially lots of) /// iterated values is cooperative. /// /// ``` /// async fn sum_iterator(input: &mut impl std::iter::Iterator<Item=i64>) -> i64 { /// let mut sum: i64 = 0; /// while let Some(i) = input.next() { /// sum += i; /// tokio::task::consume_budget().await /// } /// sum /// } /// ``` #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub async fn consume_budget() { let mut status = std::task::Poll::Pending; std::future::poll_fn(move |cx| { std::task::ready!(crate::trace::trace_leaf(cx)); if status.is_ready() { return status; } status = crate::task::coop::poll_proceed(cx).map(|restore| { restore.made_progress(); }); status }) .await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/task/coop/unconstrained.rs
tokio/src/task/coop/unconstrained.rs
use pin_project_lite::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// Future for the [`unconstrained`](unconstrained) method. #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] #[must_use = "Unconstrained does nothing unless polled"] pub struct Unconstrained<F> { #[pin] inner: F, } } impl<F> Future for Unconstrained<F> where F: Future, { type Output = <F as Future>::Output; cfg_coop! { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let inner = self.project().inner; crate::task::coop::with_unconstrained(|| inner.poll(cx)) } } cfg_not_coop! { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let inner = self.project().inner; inner.poll(cx) } } } /// Turn off cooperative scheduling for a future. The future will never be forced to yield by /// Tokio. Using this exposes your service to starvation if the unconstrained future never yields /// otherwise. /// /// See also the usage example in the [task module](index.html#unconstrained). #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn unconstrained<F>(inner: F) -> Unconstrained<F> { Unconstrained { inner } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/doc/os.rs
tokio/src/doc/os.rs
//! See [`std::os`](https://doc.rust-lang.org/std/os/index.html). /// Platform-specific extensions to `std` for Windows. /// /// See [`std::os::windows`](https://doc.rust-lang.org/std/os/windows/index.html). pub mod windows { /// Windows-specific extensions to general I/O primitives. /// /// See [`std::os::windows::io`](https://doc.rust-lang.org/std/os/windows/io/index.html). pub mod io { /// See [`std::os::windows::io::RawHandle`](https://doc.rust-lang.org/std/os/windows/io/type.RawHandle.html) pub type RawHandle = crate::doc::NotDefinedHere; /// See [`std::os::windows::io::OwnedHandle`](https://doc.rust-lang.org/std/os/windows/io/struct.OwnedHandle.html) pub type OwnedHandle = crate::doc::NotDefinedHere; /// See [`std::os::windows::io::AsRawHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html) pub trait AsRawHandle { /// See [`std::os::windows::io::AsRawHandle::as_raw_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html#tymethod.as_raw_handle) fn as_raw_handle(&self) -> RawHandle; } /// See [`std::os::windows::io::FromRawHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html) pub trait FromRawHandle { /// See [`std::os::windows::io::FromRawHandle::from_raw_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html#tymethod.from_raw_handle) unsafe fn from_raw_handle(handle: RawHandle) -> Self; } /// See [`std::os::windows::io::RawSocket`](https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html) pub type RawSocket = crate::doc::NotDefinedHere; /// See [`std::os::windows::io::AsRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html) pub trait AsRawSocket { /// See [`std::os::windows::io::AsRawSocket::as_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html#tymethod.as_raw_socket) fn as_raw_socket(&self) -> RawSocket; } /// See [`std::os::windows::io::FromRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html) pub trait FromRawSocket { /// See [`std::os::windows::io::FromRawSocket::from_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html#tymethod.from_raw_socket) unsafe fn from_raw_socket(sock: RawSocket) -> Self; } /// See [`std::os::windows::io::IntoRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html) pub trait IntoRawSocket { /// See [`std::os::windows::io::IntoRawSocket::into_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html#tymethod.into_raw_socket) fn into_raw_socket(self) -> RawSocket; } /// See [`std::os::windows::io::BorrowedHandle`](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedHandle.html) pub type BorrowedHandle<'handle> = crate::doc::NotDefinedHere; /// See [`std::os::windows::io::AsHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html) pub trait AsHandle { /// See [`std::os::windows::io::AsHandle::as_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html#tymethod.as_handle) fn as_handle(&self) -> BorrowedHandle<'_>; } /// See [`std::os::windows::io::BorrowedSocket`](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedSocket.html) pub type BorrowedSocket<'socket> = crate::doc::NotDefinedHere; /// See [`std::os::windows::io::AsSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html) pub trait AsSocket { /// See [`std::os::windows::io::AsSocket::as_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html#tymethod.as_socket) fn as_socket(&self) -> BorrowedSocket<'_>; } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/doc/mod.rs
tokio/src/doc/mod.rs
//! Types which are documented locally in the Tokio crate, but does not actually //! live here. //! //! **Note** this module is only visible on docs.rs, you cannot use it directly //! in your own code. /// The name of a type which is not defined here. /// /// This is typically used as an alias for another type, like so: /// /// ```rust,ignore /// /// See [some::other::location](https://example.com). /// type DEFINED_ELSEWHERE = crate::doc::NotDefinedHere; /// ``` /// /// This type is uninhabitable like the [`never` type] to ensure that no one /// will ever accidentally use it. /// /// [`never` type]: https://doc.rust-lang.org/std/primitive.never.html #[derive(Debug)] pub enum NotDefinedHere {} #[cfg(feature = "net")] impl mio::event::Source for NotDefinedHere { fn register( &mut self, _registry: &mio::Registry, _token: mio::Token, _interests: mio::Interest, ) -> std::io::Result<()> { Ok(()) } fn reregister( &mut self, _registry: &mio::Registry, _token: mio::Token, _interests: mio::Interest, ) -> std::io::Result<()> { Ok(()) } fn deregister(&mut self, _registry: &mio::Registry) -> std::io::Result<()> { Ok(()) } } #[cfg(any(feature = "net", feature = "fs"))] pub mod os;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/stdout.rs
tokio/src/io/stdout.rs
use crate::io::blocking::Blocking; use crate::io::stdio_common::SplitByUtf8BoundaryIfWindows; use crate::io::AsyncWrite; use std::io; use std::pin::Pin; use std::task::Context; use std::task::Poll; cfg_io_std! { /// A handle to the standard output stream of a process. /// /// Concurrent writes to stdout must be executed with care: Only individual /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular /// you should be aware that writes using [`write_all`] are not guaranteed /// to occur as a single write, so multiple threads writing data with /// [`write_all`] may result in interleaved output. /// /// Created by the [`stdout`] function. /// /// [`stdout`]: stdout() /// [`AsyncWrite`]: AsyncWrite /// [`write_all`]: crate::io::AsyncWriteExt::write_all() /// /// # Examples /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut stdout = io::stdout(); /// stdout.write_all(b"Hello world!").await?; /// Ok(()) /// } /// ``` /// /// The following is an example of using `stdio` with loop. /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// #[tokio::main] /// async fn main() { /// let messages = vec!["hello", " world\n"]; /// /// // When you use `stdio` in a loop, it is recommended to create /// // a single `stdio` instance outside the loop and call a write /// // operation against that instance on each loop. /// // /// // Repeatedly creating `stdout` instances inside the loop and /// // writing to that handle could result in mangled output since /// // each write operation is handled by a different blocking thread. /// let mut stdout = io::stdout(); /// /// for message in &messages { /// stdout.write_all(message.as_bytes()).await.unwrap(); /// stdout.flush().await.unwrap(); /// } /// } /// ``` #[derive(Debug)] pub struct Stdout { std: SplitByUtf8BoundaryIfWindows<Blocking<std::io::Stdout>>, } /// Constructs a new handle to the standard output of the current process. /// /// The returned handle allows writing to standard out from the within the /// Tokio runtime. /// /// Concurrent writes to stdout must be executed with care: Only individual /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular /// you should be aware that writes using [`write_all`] are not guaranteed /// to occur as a single write, so multiple threads writing data with /// [`write_all`] may result in interleaved output. /// /// [`AsyncWrite`]: AsyncWrite /// [`write_all`]: crate::io::AsyncWriteExt::write_all() /// /// # Examples /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut stdout = io::stdout(); /// stdout.write_all(b"Hello world!").await?; /// Ok(()) /// } /// ``` /// /// The following is an example of using `stdio` with loop. /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// #[tokio::main] /// async fn main() { /// let messages = vec!["hello", " world\n"]; /// /// // When you use `stdio` in a loop, it is recommended to create /// // a single `stdio` instance outside the loop and call a write /// // operation against that instance on each loop. /// // /// // Repeatedly creating `stdout` instances inside the loop and /// // writing to that handle could result in mangled output since /// // each write operation is handled by a different blocking thread. /// let mut stdout = io::stdout(); /// /// for message in &messages { /// stdout.write_all(message.as_bytes()).await.unwrap(); /// stdout.flush().await.unwrap(); /// } /// } /// ``` pub fn stdout() -> Stdout { let std = io::stdout(); // SAFETY: The `Read` implementation of `std` does not read from the // buffer it is borrowing and correctly reports the length of the data // written into the buffer. let blocking = unsafe { Blocking::new(std) }; Stdout { std: SplitByUtf8BoundaryIfWindows::new(blocking), } } } #[cfg(unix)] mod sys { use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd}; use super::Stdout; impl AsRawFd for Stdout { fn as_raw_fd(&self) -> RawFd { std::io::stdout().as_raw_fd() } } impl AsFd for Stdout { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } } cfg_windows! { use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle}; impl AsRawHandle for Stdout { fn as_raw_handle(&self) -> RawHandle { std::io::stdout().as_raw_handle() } } impl AsHandle for Stdout { fn as_handle(&self) -> BorrowedHandle<'_> { unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } } } } impl AsyncWrite for Stdout { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { Pin::new(&mut self.std).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { Pin::new(&mut self.std).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), io::Error>> { Pin::new(&mut self.std).poll_shutdown(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/poll_evented.rs
tokio/src/io/poll_evented.rs
use crate::io::interest::Interest; use crate::runtime::io::Registration; use crate::runtime::scheduler; use mio::event::Source; use std::fmt; use std::io; use std::ops::Deref; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::task::ready; cfg_io_driver! { /// Associates an I/O resource that implements the [`std::io::Read`] and/or /// [`std::io::Write`] traits with the reactor that drives it. /// /// `PollEvented` uses [`Registration`] internally to take a type that /// implements [`mio::event::Source`] as well as [`std::io::Read`] and/or /// [`std::io::Write`] and associate it with a reactor that will drive it. /// /// Once the [`mio::event::Source`] type is wrapped by `PollEvented`, it can be /// used from within the future's execution model. As such, the /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] /// implementations using the underlying I/O resource as well as readiness /// events provided by the reactor. /// /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is /// `Sync`), the caller must ensure that there are at most two tasks that /// use a `PollEvented` instance concurrently. One for reading and one for /// writing. While violating this requirement is "safe" from a Rust memory /// model point of view, it will result in unexpected behavior in the form /// of lost notifications and tasks hanging. /// /// ## Readiness events /// /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, /// this type also supports access to the underlying readiness event stream. /// While similar in function to what [`Registration`] provides, the /// semantics are a bit different. /// /// Two functions are provided to access the readiness events: /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the /// current readiness state of the `PollEvented` instance. If /// [`poll_read_ready`] indicates read readiness, immediately calling /// [`poll_read_ready`] again will also indicate read readiness. /// /// When the operation is attempted and is unable to succeed due to the I/O /// resource not being ready, the caller must call [`clear_readiness`]. /// This clears the readiness state until a new readiness event is received. /// /// This allows the caller to implement additional functions. For example, /// [`TcpListener`] implements `poll_accept` by using [`poll_read_ready`] and /// [`clear_readiness`]. /// /// ## Platform-specific events /// /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. /// These events are included as part of the read readiness event stream. The /// write readiness event stream is only for `Ready::writable()` events. /// /// [`AsyncRead`]: crate::io::AsyncRead /// [`AsyncWrite`]: crate::io::AsyncWrite /// [`TcpListener`]: crate::net::TcpListener /// [`clear_readiness`]: Registration::clear_readiness /// [`poll_read_ready`]: Registration::poll_read_ready /// [`poll_write_ready`]: Registration::poll_write_ready pub(crate) struct PollEvented<E: Source> { io: Option<E>, registration: Registration, } } // ===== impl PollEvented ===== impl<E: Source> PollEvented<E> { /// Creates a new `PollEvented` associated with the default reactor. /// /// The returned `PollEvented` has readable and writable interests. For more control, use /// [`Self::new_with_interest`]. /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[track_caller] #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new(io: E) -> io::Result<Self> { PollEvented::new_with_interest(io, Interest::READABLE | Interest::WRITABLE) } /// Creates a new `PollEvented` associated with the default reactor, for /// specific `Interest` state. `new_with_interest` should be used over `new` /// when you need control over the readiness state, such as when a file /// descriptor only allows reads. This does not add `hup` or `error` so if /// you are interested in those states, you will need to add them to the /// readiness state passed to this function. /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called from /// a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) /// function. #[track_caller] #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new_with_interest(io: E, interest: Interest) -> io::Result<Self> { Self::new_with_interest_and_handle(io, interest, scheduler::Handle::current()) } #[track_caller] pub(crate) fn new_with_interest_and_handle( mut io: E, interest: Interest, handle: scheduler::Handle, ) -> io::Result<Self> { let registration = Registration::new_with_interest_and_handle(&mut io, interest, handle)?; Ok(Self { io: Some(io), registration, }) } /// Returns a reference to the registration. #[cfg(any(feature = "net", all(feature = "process", target_os = "linux")))] pub(crate) fn registration(&self) -> &Registration { &self.registration } /// Deregisters the inner io from the registration and returns a Result containing the inner io. #[cfg(any(feature = "net", feature = "process"))] pub(crate) fn into_inner(mut self) -> io::Result<E> { let mut inner = self.io.take().unwrap(); // As io shouldn't ever be None, just unwrap here. self.registration.deregister(&mut inner)?; Ok(inner) } /// Re-register under new runtime with `interest`. #[cfg(all(feature = "process", target_os = "linux"))] pub(crate) fn reregister(&mut self, interest: Interest) -> io::Result<()> { let io = self.io.as_mut().unwrap(); // As io shouldn't ever be None, just unwrap here. let _ = self.registration.deregister(io); self.registration = Registration::new_with_interest_and_handle(io, interest, scheduler::Handle::current())?; Ok(()) } } feature! { #![any(feature = "net", all(unix, feature = "process"))] use crate::io::ReadBuf; use std::task::{Context, Poll}; impl<E: Source> PollEvented<E> { // Safety: The caller must ensure that `E` can read into uninitialized memory pub(crate) unsafe fn poll_read<'a>( &'a self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> where &'a E: io::Read + 'a, { use std::io::Read; loop { let evt = ready!(self.registration.poll_read_ready(cx))?; let b = unsafe { &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; // used only when the cfgs below apply #[allow(unused_variables)] let len = b.len(); match self.io.as_ref().unwrap().read(b) { Ok(n) => { // When mio is using the epoll or kqueue selector, reading a partially full // buffer is sufficient to show that the socket buffer has been drained. // // This optimization does not work for level-triggered selectors such as // windows or when poll is used. // // Read more: // https://github.com/tokio-rs/tokio/issues/5866 #[cfg(all( not(mio_unsupported_force_poll_poll), any( // epoll target_os = "android", target_os = "illumos", target_os = "linux", target_os = "redox", // kqueue target_os = "dragonfly", target_os = "freebsd", target_os = "ios", target_os = "macos", target_os = "netbsd", target_os = "openbsd", target_os = "tvos", target_os = "visionos", target_os = "watchos", ) ))] if 0 < n && n < len { self.registration.clear_readiness(evt); } // Safety: We trust `TcpStream::read` to have filled up `n` bytes in the // buffer. unsafe { buf.assume_init(n) }; buf.advance(n); return Poll::Ready(Ok(())); }, Err(e) if e.kind() == io::ErrorKind::WouldBlock => { self.registration.clear_readiness(evt); } Err(e) => return Poll::Ready(Err(e)), } } } pub(crate) fn poll_write<'a>(&'a self, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> where &'a E: io::Write + 'a, { use std::io::Write; loop { let evt = ready!(self.registration.poll_write_ready(cx))?; match self.io.as_ref().unwrap().write(buf) { Ok(n) => { // if we write only part of our buffer, this is sufficient on unix to show // that the socket buffer is full. Unfortunately this assumption // fails for level-triggered selectors (like on Windows or poll even for // UNIX): https://github.com/tokio-rs/tokio/issues/5866 if n > 0 && (!cfg!(windows) && !cfg!(mio_unsupported_force_poll_poll) && n < buf.len()) { self.registration.clear_readiness(evt); } return Poll::Ready(Ok(n)); }, Err(e) if e.kind() == io::ErrorKind::WouldBlock => { self.registration.clear_readiness(evt); } Err(e) => return Poll::Ready(Err(e)), } } } #[cfg(any(feature = "net", feature = "process"))] pub(crate) fn poll_write_vectored<'a>( &'a self, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> where &'a E: io::Write + 'a, { use std::io::Write; self.registration.poll_write_io(cx, || self.io.as_ref().unwrap().write_vectored(bufs)) } } } impl<E: Source> UnwindSafe for PollEvented<E> {} impl<E: Source> RefUnwindSafe for PollEvented<E> {} impl<E: Source> Deref for PollEvented<E> { type Target = E; fn deref(&self) -> &E { self.io.as_ref().unwrap() } } impl<E: Source + fmt::Debug> fmt::Debug for PollEvented<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollEvented").field("io", &self.io).finish() } } impl<E: Source> Drop for PollEvented<E> { fn drop(&mut self) { if let Some(mut io) = self.io.take() { // Ignore errors let _ = self.registration.deregister(&mut io); } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/async_fd.rs
tokio/src/io/async_fd.rs
use crate::io::{Interest, Ready}; use crate::runtime::io::{ReadyEvent, Registration}; use crate::runtime::scheduler; use mio::unix::SourceFd; use std::error::Error; use std::fmt; use std::io; use std::os::unix::io::{AsRawFd, RawFd}; use std::task::{ready, Context, Poll}; /// Associates an IO object backed by a Unix file descriptor with the tokio /// reactor, allowing for readiness to be polled. The file descriptor must be of /// a type that can be used with the OS polling facilities (ie, `poll`, `epoll`, /// `kqueue`, etc), such as a network socket or pipe, and the file descriptor /// must have the nonblocking mode set to true. /// /// Creating an [`AsyncFd`] registers the file descriptor with the current tokio /// Reactor, allowing you to directly await the file descriptor being readable /// or writable. Once registered, the file descriptor remains registered until /// the [`AsyncFd`] is dropped. /// /// The [`AsyncFd`] takes ownership of an arbitrary object to represent the IO /// object. It is intended that the inner object will handle closing the file /// descriptor when it is dropped, avoiding resource leaks and ensuring that the /// [`AsyncFd`] can clean up the registration before closing the file descriptor. /// The [`AsyncFd::into_inner`] function can be used to extract the inner object /// to retake control from the tokio IO reactor. The [`OwnedFd`] type is often /// used as the inner object, as it is the simplest type that closes the fd on /// drop. /// /// The inner object is required to implement [`AsRawFd`]. This file descriptor /// must not change while [`AsyncFd`] owns the inner object, i.e. the /// [`AsRawFd::as_raw_fd`] method on the inner type must always return the same /// file descriptor when called multiple times. Failure to uphold this results /// in unspecified behavior in the IO driver, which may include breaking /// notifications for other sockets/etc. /// /// Polling for readiness is done by calling the async functions [`readable`] /// and [`writable`]. These functions complete when the associated readiness /// condition is observed. Any number of tasks can query the same `AsyncFd` in /// parallel, on the same or different conditions. /// /// On some platforms, the readiness detecting mechanism relies on /// edge-triggered notifications. This means that the OS will only notify Tokio /// when the file descriptor transitions from not-ready to ready. For this to /// work you should first try to read or write and only poll for readiness /// if that fails with an error of [`std::io::ErrorKind::WouldBlock`]. /// /// Tokio internally tracks when it has received a ready notification, and when /// readiness checking functions like [`readable`] and [`writable`] are called, /// if the readiness flag is set, these async functions will complete /// immediately. This however does mean that it is critical to ensure that this /// ready flag is cleared when (and only when) the file descriptor ceases to be /// ready. The [`AsyncFdReadyGuard`] returned from readiness checking functions /// serves this function; after calling a readiness-checking async function, /// you must use this [`AsyncFdReadyGuard`] to signal to tokio whether the file /// descriptor is no longer in a ready state. /// /// ## Use with to a poll-based API /// /// In some cases it may be desirable to use `AsyncFd` from APIs similar to /// [`TcpStream::poll_read_ready`]. The [`AsyncFd::poll_read_ready`] and /// [`AsyncFd::poll_write_ready`] functions are provided for this purpose. /// Because these functions don't create a future to hold their state, they have /// the limitation that only one task can wait on each direction (read or write) /// at a time. /// /// # Examples /// /// This example shows how to turn [`std::net::TcpStream`] asynchronous using /// `AsyncFd`. It implements the read/write operations both as an `async fn` /// and using the IO traits [`AsyncRead`] and [`AsyncWrite`]. /// /// ```no_run /// use std::io::{self, Read, Write}; /// use std::net::TcpStream; /// use std::pin::Pin; /// use std::task::{ready, Context, Poll}; /// use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; /// use tokio::io::unix::AsyncFd; /// /// pub struct AsyncTcpStream { /// inner: AsyncFd<TcpStream>, /// } /// /// impl AsyncTcpStream { /// pub fn new(tcp: TcpStream) -> io::Result<Self> { /// tcp.set_nonblocking(true)?; /// Ok(Self { /// inner: AsyncFd::new(tcp)?, /// }) /// } /// /// pub async fn read(&self, out: &mut [u8]) -> io::Result<usize> { /// loop { /// let mut guard = self.inner.readable().await?; /// /// match guard.try_io(|inner| inner.get_ref().read(out)) { /// Ok(result) => return result, /// Err(_would_block) => continue, /// } /// } /// } /// /// pub async fn write(&self, buf: &[u8]) -> io::Result<usize> { /// loop { /// let mut guard = self.inner.writable().await?; /// /// match guard.try_io(|inner| inner.get_ref().write(buf)) { /// Ok(result) => return result, /// Err(_would_block) => continue, /// } /// } /// } /// } /// /// impl AsyncRead for AsyncTcpStream { /// fn poll_read( /// self: Pin<&mut Self>, /// cx: &mut Context<'_>, /// buf: &mut ReadBuf<'_> /// ) -> Poll<io::Result<()>> { /// loop { /// let mut guard = ready!(self.inner.poll_read_ready(cx))?; /// /// let unfilled = buf.initialize_unfilled(); /// match guard.try_io(|inner| inner.get_ref().read(unfilled)) { /// Ok(Ok(len)) => { /// buf.advance(len); /// return Poll::Ready(Ok(())); /// }, /// Ok(Err(err)) => return Poll::Ready(Err(err)), /// Err(_would_block) => continue, /// } /// } /// } /// } /// /// impl AsyncWrite for AsyncTcpStream { /// fn poll_write( /// self: Pin<&mut Self>, /// cx: &mut Context<'_>, /// buf: &[u8] /// ) -> Poll<io::Result<usize>> { /// loop { /// let mut guard = ready!(self.inner.poll_write_ready(cx))?; /// /// match guard.try_io(|inner| inner.get_ref().write(buf)) { /// Ok(result) => return Poll::Ready(result), /// Err(_would_block) => continue, /// } /// } /// } /// /// fn poll_flush( /// self: Pin<&mut Self>, /// cx: &mut Context<'_>, /// ) -> Poll<io::Result<()>> { /// // tcp flush is a no-op /// Poll::Ready(Ok(())) /// } /// /// fn poll_shutdown( /// self: Pin<&mut Self>, /// cx: &mut Context<'_>, /// ) -> Poll<io::Result<()>> { /// self.inner.get_ref().shutdown(std::net::Shutdown::Write)?; /// Poll::Ready(Ok(())) /// } /// } /// ``` /// /// [`readable`]: method@Self::readable /// [`writable`]: method@Self::writable /// [`AsyncFdReadyGuard`]: struct@self::AsyncFdReadyGuard /// [`TcpStream::poll_read_ready`]: struct@crate::net::TcpStream /// [`AsyncRead`]: trait@crate::io::AsyncRead /// [`AsyncWrite`]: trait@crate::io::AsyncWrite /// [`OwnedFd`]: struct@std::os::fd::OwnedFd pub struct AsyncFd<T: AsRawFd> { registration: Registration, // The inner value is always present. the Option is required for `drop` and `into_inner`. // In all other methods `unwrap` is valid, and will never panic. inner: Option<T>, } /// Represents an IO-ready event detected on a particular file descriptor that /// has not yet been acknowledged. This is a `must_use` structure to help ensure /// that you do not forget to explicitly clear (or not clear) the event. /// /// This type exposes an immutable reference to the underlying IO object. #[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"] pub struct AsyncFdReadyGuard<'a, T: AsRawFd> { async_fd: &'a AsyncFd<T>, event: Option<ReadyEvent>, } /// Represents an IO-ready event detected on a particular file descriptor that /// has not yet been acknowledged. This is a `must_use` structure to help ensure /// that you do not forget to explicitly clear (or not clear) the event. /// /// This type exposes a mutable reference to the underlying IO object. #[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"] pub struct AsyncFdReadyMutGuard<'a, T: AsRawFd> { async_fd: &'a mut AsyncFd<T>, event: Option<ReadyEvent>, } impl<T: AsRawFd> AsyncFd<T> { /// Creates an [`AsyncFd`] backed by (and taking ownership of) an object /// implementing [`AsRawFd`]. The backing file descriptor is cached at the /// time of creation. /// /// Only configures the [`Interest::READABLE`] and [`Interest::WRITABLE`] interests. For more /// control, use [`AsyncFd::with_interest`]. /// /// This method must be called in the context of a tokio runtime. /// /// # Panics /// /// This function panics if there is no current reactor set, or if the `rt` /// feature flag is not enabled. #[inline] #[track_caller] pub fn new(inner: T) -> io::Result<Self> where T: AsRawFd, { Self::with_interest(inner, Interest::READABLE | Interest::WRITABLE) } /// Creates an [`AsyncFd`] backed by (and taking ownership of) an object /// implementing [`AsRawFd`], with a specific [`Interest`]. The backing /// file descriptor is cached at the time of creation. /// /// # Panics /// /// This function panics if there is no current reactor set, or if the `rt` /// feature flag is not enabled. #[inline] #[track_caller] pub fn with_interest(inner: T, interest: Interest) -> io::Result<Self> where T: AsRawFd, { Self::new_with_handle_and_interest(inner, scheduler::Handle::current(), interest) } #[track_caller] pub(crate) fn new_with_handle_and_interest( inner: T, handle: scheduler::Handle, interest: Interest, ) -> io::Result<Self> { Self::try_new_with_handle_and_interest(inner, handle, interest).map_err(Into::into) } /// Creates an [`AsyncFd`] backed by (and taking ownership of) an object /// implementing [`AsRawFd`]. The backing file descriptor is cached at the /// time of creation. /// /// Only configures the [`Interest::READABLE`] and [`Interest::WRITABLE`] interests. For more /// control, use [`AsyncFd::try_with_interest`]. /// /// This method must be called in the context of a tokio runtime. /// /// In the case of failure, it returns [`AsyncFdTryNewError`] that contains the original object /// passed to this function. /// /// # Panics /// /// This function panics if there is no current reactor set, or if the `rt` /// feature flag is not enabled. #[inline] #[track_caller] pub fn try_new(inner: T) -> Result<Self, AsyncFdTryNewError<T>> where T: AsRawFd, { Self::try_with_interest(inner, Interest::READABLE | Interest::WRITABLE) } /// Creates an [`AsyncFd`] backed by (and taking ownership of) an object /// implementing [`AsRawFd`], with a specific [`Interest`]. The backing /// file descriptor is cached at the time of creation. /// /// In the case of failure, it returns [`AsyncFdTryNewError`] that contains the original object /// passed to this function. /// /// # Panics /// /// This function panics if there is no current reactor set, or if the `rt` /// feature flag is not enabled. #[inline] #[track_caller] pub fn try_with_interest(inner: T, interest: Interest) -> Result<Self, AsyncFdTryNewError<T>> where T: AsRawFd, { Self::try_new_with_handle_and_interest(inner, scheduler::Handle::current(), interest) } #[track_caller] pub(crate) fn try_new_with_handle_and_interest( inner: T, handle: scheduler::Handle, interest: Interest, ) -> Result<Self, AsyncFdTryNewError<T>> { let fd = inner.as_raw_fd(); match Registration::new_with_interest_and_handle(&mut SourceFd(&fd), interest, handle) { Ok(registration) => Ok(AsyncFd { registration, inner: Some(inner), }), Err(cause) => Err(AsyncFdTryNewError { inner, cause }), } } /// Returns a shared reference to the backing object of this [`AsyncFd`]. #[inline] pub fn get_ref(&self) -> &T { self.inner.as_ref().unwrap() } /// Returns a mutable reference to the backing object of this [`AsyncFd`]. #[inline] pub fn get_mut(&mut self) -> &mut T { self.inner.as_mut().unwrap() } fn take_inner(&mut self) -> Option<T> { let inner = self.inner.take()?; let fd = inner.as_raw_fd(); let _ = self.registration.deregister(&mut SourceFd(&fd)); Some(inner) } /// Deregisters this file descriptor and returns ownership of the backing /// object. pub fn into_inner(mut self) -> T { self.take_inner().unwrap() } /// Polls for read readiness. /// /// If the file descriptor is not currently ready for reading, this method /// will store a clone of the [`Waker`] from the provided [`Context`]. When the /// file descriptor becomes ready for reading, [`Waker::wake`] will be called. /// /// Note that on multiple calls to [`poll_read_ready`] or /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the /// most recent call is scheduled to receive a wakeup. (However, /// [`poll_write_ready`] retains a second, independent waker). /// /// This method is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// This method takes `&self`, so it is possible to call this method /// concurrently with other methods on this struct. This method only /// provides shared access to the inner IO resource when handling the /// [`AsyncFdReadyGuard`]. /// /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut /// [`poll_write_ready`]: method@Self::poll_write_ready /// [`readable`]: method@Self::readable /// [`Context`]: struct@std::task::Context /// [`Waker`]: struct@std::task::Waker /// [`Waker::wake`]: method@std::task::Waker::wake pub fn poll_read_ready<'a>( &'a self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>> { let event = ready!(self.registration.poll_read_ready(cx))?; Poll::Ready(Ok(AsyncFdReadyGuard { async_fd: self, event: Some(event), })) } /// Polls for read readiness. /// /// If the file descriptor is not currently ready for reading, this method /// will store a clone of the [`Waker`] from the provided [`Context`]. When the /// file descriptor becomes ready for reading, [`Waker::wake`] will be called. /// /// Note that on multiple calls to [`poll_read_ready`] or /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the /// most recent call is scheduled to receive a wakeup. (However, /// [`poll_write_ready`] retains a second, independent waker). /// /// This method is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// This method takes `&mut self`, so it is possible to access the inner IO /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. /// /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut /// [`poll_write_ready`]: method@Self::poll_write_ready /// [`readable`]: method@Self::readable /// [`Context`]: struct@std::task::Context /// [`Waker`]: struct@std::task::Waker /// [`Waker::wake`]: method@std::task::Waker::wake pub fn poll_read_ready_mut<'a>( &'a mut self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>> { let event = ready!(self.registration.poll_read_ready(cx))?; Poll::Ready(Ok(AsyncFdReadyMutGuard { async_fd: self, event: Some(event), })) } /// Polls for write readiness. /// /// If the file descriptor is not currently ready for writing, this method /// will store a clone of the [`Waker`] from the provided [`Context`]. When the /// file descriptor becomes ready for writing, [`Waker::wake`] will be called. /// /// Note that on multiple calls to [`poll_write_ready`] or /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the /// most recent call is scheduled to receive a wakeup. (However, /// [`poll_read_ready`] retains a second, independent waker). /// /// This method is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// This method takes `&self`, so it is possible to call this method /// concurrently with other methods on this struct. This method only /// provides shared access to the inner IO resource when handling the /// [`AsyncFdReadyGuard`]. /// /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_write_ready`]: method@Self::poll_write_ready /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut /// [`writable`]: method@Self::readable /// [`Context`]: struct@std::task::Context /// [`Waker`]: struct@std::task::Waker /// [`Waker::wake`]: method@std::task::Waker::wake pub fn poll_write_ready<'a>( &'a self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>> { let event = ready!(self.registration.poll_write_ready(cx))?; Poll::Ready(Ok(AsyncFdReadyGuard { async_fd: self, event: Some(event), })) } /// Polls for write readiness. /// /// If the file descriptor is not currently ready for writing, this method /// will store a clone of the [`Waker`] from the provided [`Context`]. When the /// file descriptor becomes ready for writing, [`Waker::wake`] will be called. /// /// Note that on multiple calls to [`poll_write_ready`] or /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the /// most recent call is scheduled to receive a wakeup. (However, /// [`poll_read_ready`] retains a second, independent waker). /// /// This method is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// This method takes `&mut self`, so it is possible to access the inner IO /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. /// /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_write_ready`]: method@Self::poll_write_ready /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut /// [`writable`]: method@Self::readable /// [`Context`]: struct@std::task::Context /// [`Waker`]: struct@std::task::Waker /// [`Waker::wake`]: method@std::task::Waker::wake pub fn poll_write_ready_mut<'a>( &'a mut self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>> { let event = ready!(self.registration.poll_write_ready(cx))?; Poll::Ready(Ok(AsyncFdReadyMutGuard { async_fd: self, event: Some(event), })) } /// Waits for any of the requested ready states, returning a /// [`AsyncFdReadyGuard`] that must be dropped to resume /// polling for the requested ready states. /// /// The function may complete without the file descriptor being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// When an IO operation does return `io::ErrorKind::WouldBlock`, the readiness must be cleared. /// When a combined interest is used, it is important to clear only the readiness /// that is actually observed to block. For instance when the combined /// interest `Interest::READABLE | Interest::WRITABLE` is used, and a read blocks, only /// read readiness should be cleared using the [`AsyncFdReadyGuard::clear_ready_matching`] method: /// `guard.clear_ready_matching(Ready::READABLE)`. /// Also clearing the write readiness in this case would be incorrect. The [`AsyncFdReadyGuard::clear_ready`] /// method clears all readiness flags. /// /// This method takes `&self`, so it is possible to call this method /// concurrently with other methods on this struct. This method only /// provides shared access to the inner IO resource when handling the /// [`AsyncFdReadyGuard`]. /// /// # Examples /// /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without /// splitting. /// /// ```no_run /// use std::error::Error; /// use std::io; /// use std::io::{Read, Write}; /// use std::net::TcpStream; /// use tokio::io::unix::AsyncFd; /// use tokio::io::{Interest, Ready}; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080")?; /// stream.set_nonblocking(true)?; /// let stream = AsyncFd::new(stream)?; /// /// loop { /// let mut guard = stream /// .ready(Interest::READABLE | Interest::WRITABLE) /// .await?; /// /// if guard.ready().is_readable() { /// let mut data = vec![0; 1024]; /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.get_ref().read(&mut data) { /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// // a read has blocked, but a write might still succeed. /// // clear only the read readiness. /// guard.clear_ready_matching(Ready::READABLE); /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// if guard.ready().is_writable() { /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.get_ref().write(b"hello world") { /// Ok(n) => { /// println!("write {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// // a write has blocked, but a read might still succeed. /// // clear only the write readiness. /// guard.clear_ready_matching(Ready::WRITABLE); /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// } /// } /// ``` pub async fn ready(&self, interest: Interest) -> io::Result<AsyncFdReadyGuard<'_, T>> { let event = self.registration.readiness(interest).await?; Ok(AsyncFdReadyGuard { async_fd: self, event: Some(event), }) } /// Waits for any of the requested ready states, returning a /// [`AsyncFdReadyMutGuard`] that must be dropped to resume /// polling for the requested ready states. /// /// The function may complete without the file descriptor being ready. This is a /// false-positive and attempting an operation will return with /// `io::ErrorKind::WouldBlock`. The function can also return with an empty /// [`Ready`] set, so you should always check the returned value and possibly /// wait again if the requested states are not set. /// /// When an IO operation does return `io::ErrorKind::WouldBlock`, the readiness must be cleared. /// When a combined interest is used, it is important to clear only the readiness /// that is actually observed to block. For instance when the combined /// interest `Interest::READABLE | Interest::WRITABLE` is used, and a read blocks, only /// read readiness should be cleared using the [`AsyncFdReadyMutGuard::clear_ready_matching`] method: /// `guard.clear_ready_matching(Ready::READABLE)`. /// Also clearing the write readiness in this case would be incorrect. /// The [`AsyncFdReadyMutGuard::clear_ready`] method clears all readiness flags. /// /// This method takes `&mut self`, so it is possible to access the inner IO /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. /// /// # Examples /// /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without /// splitting. /// /// ```no_run /// use std::error::Error; /// use std::io; /// use std::io::{Read, Write}; /// use std::net::TcpStream; /// use tokio::io::unix::AsyncFd; /// use tokio::io::{Interest, Ready}; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080")?; /// stream.set_nonblocking(true)?; /// let mut stream = AsyncFd::new(stream)?; /// /// loop { /// let mut guard = stream /// .ready_mut(Interest::READABLE | Interest::WRITABLE) /// .await?; /// /// if guard.ready().is_readable() { /// let mut data = vec![0; 1024]; /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match guard.get_inner_mut().read(&mut data) { /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// // a read has blocked, but a write might still succeed. /// // clear only the read readiness. /// guard.clear_ready_matching(Ready::READABLE); /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// if guard.ready().is_writable() { /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match guard.get_inner_mut().write(b"hello world") { /// Ok(n) => { /// println!("write {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// // a write has blocked, but a read might still succeed. /// // clear only the write readiness. /// guard.clear_ready_matching(Ready::WRITABLE); /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// } /// } /// ``` pub async fn ready_mut( &mut self, interest: Interest, ) -> io::Result<AsyncFdReadyMutGuard<'_, T>> { let event = self.registration.readiness(interest).await?; Ok(AsyncFdReadyMutGuard { async_fd: self, event: Some(event), }) } /// Waits for the file descriptor to become readable, returning a /// [`AsyncFdReadyGuard`] that must be dropped to resume read-readiness /// polling. /// /// This method takes `&self`, so it is possible to call this method /// concurrently with other methods on this struct. This method only /// provides shared access to the inner IO resource when handling the /// [`AsyncFdReadyGuard`]. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. pub async fn readable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>> { self.ready(Interest::READABLE).await } /// Waits for the file descriptor to become readable, returning a /// [`AsyncFdReadyMutGuard`] that must be dropped to resume read-readiness /// polling. /// /// This method takes `&mut self`, so it is possible to access the inner IO /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. pub async fn readable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>> { self.ready_mut(Interest::READABLE).await } /// Waits for the file descriptor to become writable, returning a /// [`AsyncFdReadyGuard`] that must be dropped to resume write-readiness /// polling. /// /// This method takes `&self`, so it is possible to call this method /// concurrently with other methods on this struct. This method only /// provides shared access to the inner IO resource when handling the /// [`AsyncFdReadyGuard`]. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. pub async fn writable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>> { self.ready(Interest::WRITABLE).await } /// Waits for the file descriptor to become writable, returning a /// [`AsyncFdReadyMutGuard`] that must be dropped to resume write-readiness /// polling. /// /// This method takes `&mut self`, so it is possible to access the inner IO /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. /// /// # Cancel safety /// /// This method is cancel safe. Once a readiness event occurs, the method /// will continue to return immediately until the readiness event is /// consumed by an attempt to read or write that fails with `WouldBlock` or /// `Poll::Pending`. #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. pub async fn writable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>> { self.ready_mut(Interest::WRITABLE).await } /// Reads or writes from the file descriptor using a user-provided IO operation. ///
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/blocking.rs
tokio/src/io/blocking.rs
use crate::io::sys; use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use std::cmp; use std::future::Future; use std::io; use std::io::prelude::*; use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{ready, Context, Poll}; /// `T` should not implement _both_ Read and Write. #[derive(Debug)] pub(crate) struct Blocking<T> { inner: Option<T>, state: State<T>, /// `true` if the lower IO layer needs flushing. need_flush: bool, } #[derive(Debug)] pub(crate) struct Buf { buf: Vec<u8>, pos: usize, } pub(crate) const DEFAULT_MAX_BUF_SIZE: usize = 2 * 1024 * 1024; #[derive(Debug)] enum State<T> { Idle(Option<Buf>), Busy(sys::Blocking<(io::Result<usize>, Buf, T)>), } cfg_io_blocking! { impl<T> Blocking<T> { /// # Safety /// /// The `Read` implementation of `inner` must never read from the buffer /// it is borrowing and must correctly report the length of the data /// written into the buffer. #[cfg_attr(feature = "fs", allow(dead_code))] pub(crate) unsafe fn new(inner: T) -> Blocking<T> { Blocking { inner: Some(inner), state: State::Idle(Some(Buf::with_capacity(0))), need_flush: false, } } } } impl<T> AsyncRead for Blocking<T> where T: Read + Unpin + Send + 'static, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, dst: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { loop { match self.state { State::Idle(ref mut buf_cell) => { let mut buf = buf_cell.take().unwrap(); if !buf.is_empty() { buf.copy_to(dst); *buf_cell = Some(buf); return Poll::Ready(Ok(())); } let mut inner = self.inner.take().unwrap(); let max_buf_size = cmp::min(dst.remaining(), DEFAULT_MAX_BUF_SIZE); self.state = State::Busy(sys::run(move || { // SAFETY: the requirements are satisfied by `Blocking::new`. let res = unsafe { buf.read_from(&mut inner, max_buf_size) }; (res, buf, inner) })); } State::Busy(ref mut rx) => { let (res, mut buf, inner) = ready!(Pin::new(rx).poll(cx))?; self.inner = Some(inner); match res { Ok(_) => { buf.copy_to(dst); self.state = State::Idle(Some(buf)); return Poll::Ready(Ok(())); } Err(e) => { assert!(buf.is_empty()); self.state = State::Idle(Some(buf)); return Poll::Ready(Err(e)); } } } } } } } impl<T> AsyncWrite for Blocking<T> where T: Write + Unpin + Send + 'static, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, src: &[u8], ) -> Poll<io::Result<usize>> { loop { match self.state { State::Idle(ref mut buf_cell) => { let mut buf = buf_cell.take().unwrap(); assert!(buf.is_empty()); let n = buf.copy_from(src, DEFAULT_MAX_BUF_SIZE); let mut inner = self.inner.take().unwrap(); self.state = State::Busy(sys::run(move || { let n = buf.len(); let res = buf.write_to(&mut inner).map(|()| n); (res, buf, inner) })); self.need_flush = true; return Poll::Ready(Ok(n)); } State::Busy(ref mut rx) => { let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; self.state = State::Idle(Some(buf)); self.inner = Some(inner); // If error, return res?; } } } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { loop { let need_flush = self.need_flush; match self.state { // The buffer is not used here State::Idle(ref mut buf_cell) => { if need_flush { let buf = buf_cell.take().unwrap(); let mut inner = self.inner.take().unwrap(); self.state = State::Busy(sys::run(move || { let res = inner.flush().map(|()| 0); (res, buf, inner) })); self.need_flush = false; } else { return Poll::Ready(Ok(())); } } State::Busy(ref mut rx) => { let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; self.state = State::Idle(Some(buf)); self.inner = Some(inner); // If error, return res?; } } } } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { Poll::Ready(Ok(())) } } /// Repeats operations that are interrupted. macro_rules! uninterruptibly { ($e:expr) => {{ loop { match $e { Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} res => break res, } } }}; } impl Buf { pub(crate) fn with_capacity(n: usize) -> Buf { Buf { buf: Vec::with_capacity(n), pos: 0, } } pub(crate) fn is_empty(&self) -> bool { self.len() == 0 } pub(crate) fn len(&self) -> usize { self.buf.len() - self.pos } pub(crate) fn copy_to(&mut self, dst: &mut ReadBuf<'_>) -> usize { let n = cmp::min(self.len(), dst.remaining()); dst.put_slice(&self.bytes()[..n]); self.pos += n; if self.pos == self.buf.len() { self.buf.truncate(0); self.pos = 0; } n } pub(crate) fn copy_from(&mut self, src: &[u8], max_buf_size: usize) -> usize { assert!(self.is_empty()); let n = cmp::min(src.len(), max_buf_size); self.buf.extend_from_slice(&src[..n]); n } pub(crate) fn bytes(&self) -> &[u8] { &self.buf[self.pos..] } /// # Safety /// /// `rd` must not read from the buffer `read` is borrowing and must correctly /// report the length of the data written into the buffer. pub(crate) unsafe fn read_from<T: Read>( &mut self, rd: &mut T, max_buf_size: usize, ) -> io::Result<usize> { assert!(self.is_empty()); self.buf.reserve(max_buf_size); let buf = &mut self.buf.spare_capacity_mut()[..max_buf_size]; // SAFETY: The memory may be uninitialized, but `rd.read` will only write to the buffer. let buf = unsafe { &mut *(buf as *mut [MaybeUninit<u8>] as *mut [u8]) }; let res = uninterruptibly!(rd.read(buf)); if let Ok(n) = res { // SAFETY: the caller promises that `rd.read` initializes // a section of `buf` and correctly reports that length. // The `self.is_empty()` assertion verifies that `n` // equals the length of the `buf` capacity that was written // to (and that `buf` isn't being shrunk). unsafe { self.buf.set_len(n) } } else { self.buf.clear(); } assert_eq!(self.pos, 0); res } pub(crate) fn write_to<T: Write>(&mut self, wr: &mut T) -> io::Result<()> { assert_eq!(self.pos, 0); // `write_all` already ignores interrupts let res = wr.write_all(&self.buf); self.buf.clear(); res } } cfg_fs! { impl Buf { pub(crate) fn discard_read(&mut self) -> i64 { let ret = -(self.bytes().len() as i64); self.pos = 0; self.buf.truncate(0); ret } pub(crate) fn copy_from_bufs(&mut self, bufs: &[io::IoSlice<'_>], max_buf_size: usize) -> usize { assert!(self.is_empty()); let mut rem = max_buf_size; for buf in bufs { if rem == 0 { break } let len = buf.len().min(rem); self.buf.extend_from_slice(&buf[..len]); rem -= len; } max_buf_size - rem } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/stdin.rs
tokio/src/io/stdin.rs
use crate::io::blocking::Blocking; use crate::io::{AsyncRead, ReadBuf}; use std::io; use std::pin::Pin; use std::task::Context; use std::task::Poll; cfg_io_std! { /// A handle to the standard input stream of a process. /// /// The handle implements the [`AsyncRead`] trait, but beware that concurrent /// reads of `Stdin` must be executed with care. /// /// This handle is best used for non-interactive uses, such as when a file /// is piped into the application. For technical reasons, `stdin` is /// implemented by using an ordinary blocking read on a separate thread, and /// it is impossible to cancel that read. This can make shutdown of the /// runtime hang until the user presses enter. /// /// For interactive uses, it is recommended to spawn a thread dedicated to /// user input and use blocking IO directly in that thread. /// /// Created by the [`stdin`] function. /// /// [`stdin`]: fn@stdin /// [`AsyncRead`]: trait@AsyncRead #[derive(Debug)] pub struct Stdin { std: Blocking<std::io::Stdin>, } /// Constructs a new handle to the standard input of the current process. /// /// This handle is best used for non-interactive uses, such as when a file /// is piped into the application. For technical reasons, `stdin` is /// implemented by using an ordinary blocking read on a separate thread, and /// it is impossible to cancel that read. This can make shutdown of the /// runtime hang until the user presses enter. /// /// For interactive uses, it is recommended to spawn a thread dedicated to /// user input and use blocking IO directly in that thread. pub fn stdin() -> Stdin { let std = io::stdin(); // SAFETY: The `Read` implementation of `std` does not read from the // buffer it is borrowing and correctly reports the length of the data // written into the buffer. let std = unsafe { Blocking::new(std) }; Stdin { std, } } } #[cfg(unix)] mod sys { use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd}; use super::Stdin; impl AsRawFd for Stdin { fn as_raw_fd(&self) -> RawFd { std::io::stdin().as_raw_fd() } } impl AsFd for Stdin { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } } cfg_windows! { use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle}; impl AsRawHandle for Stdin { fn as_raw_handle(&self) -> RawHandle { std::io::stdin().as_raw_handle() } } impl AsHandle for Stdin { fn as_handle(&self) -> BorrowedHandle<'_> { unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } } } } impl AsyncRead for Stdin { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { Pin::new(&mut self.std).poll_read(cx, buf) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/async_write.rs
tokio/src/io/async_write.rs
use std::io::{self, IoSlice}; use std::ops::DerefMut; use std::pin::Pin; use std::task::{Context, Poll}; /// Writes bytes asynchronously. /// /// This trait is analogous to the [`std::io::Write`] trait, but integrates with /// the asynchronous task system. In particular, the [`poll_write`] method, /// unlike [`Write::write`], will automatically queue the current task for wakeup /// and return if data is not yet available, rather than blocking the calling /// thread. /// /// Specifically, this means that the [`poll_write`] function will return one of /// the following: /// /// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately /// written. /// /// * `Poll::Pending` means that no data was written from the buffer /// provided. The I/O object is not currently writable but may become writable /// in the future. Most importantly, **the current future's task is scheduled /// to get unparked when the object is writable**. This means that like /// `Future::poll` you'll receive a notification when the I/O object is /// writable again. /// /// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the /// underlying object. /// /// This trait importantly means that the `write` method only works in the /// context of a future's task. The object may panic if used outside of a task. /// /// Utilities for working with `AsyncWrite` values are provided by /// [`AsyncWriteExt`]. /// /// [`std::io::Write`]: std::io::Write /// [`Write::write`]: std::io::Write::write() /// [`poll_write`]: AsyncWrite::poll_write() /// [`AsyncWriteExt`]: crate::io::AsyncWriteExt pub trait AsyncWrite { /// Attempt to write bytes from `buf` into the object. /// /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. If successful, /// then it must be guaranteed that `n <= buf.len()`. A return value of `0` /// typically means that the underlying object is no longer able to accept /// bytes and will likely not be able to in the future as well, or that the /// buffer provided is empty. /// /// If the object is not ready for writing, the method returns /// `Poll::Pending` and arranges for the current task (via /// `cx.waker()`) to receive a notification when the object becomes /// writable or is closed. fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>>; /// Attempts to flush the object, ensuring that any buffered data reach /// their destination. /// /// On success, returns `Poll::Ready(Ok(()))`. /// /// If flushing cannot immediately complete, this method returns /// `Poll::Pending` and arranges for the current task (via /// `cx.waker()`) to receive a notification when the object can make /// progress towards flushing. fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>>; /// Initiates or attempts to shut down this writer, returning success when /// the I/O connection has completely shut down. /// /// This method is intended to be used for asynchronous shutdown of I/O /// connections. For example this is suitable for implementing shutdown of a /// TLS connection or calling `TcpStream::shutdown` on a proxied connection. /// Protocols sometimes need to flush out final pieces of data or otherwise /// perform a graceful shutdown handshake, reading/writing more data as /// appropriate. This method is the hook for such protocols to implement the /// graceful shutdown logic. /// /// This `shutdown` method is required by implementers of the /// `AsyncWrite` trait. Wrappers typically just want to proxy this call /// through to the wrapped type, and base types will typically implement /// shutdown logic here or just return `Ok(().into())`. Note that if you're /// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that /// transitively the entire stream has been shut down. After your wrapper's /// shutdown logic has been executed you should shut down the underlying /// stream. /// /// Invocation of a `shutdown` implies an invocation of `flush`. Once this /// method returns `Ready` it implies that a flush successfully happened /// before the shutdown happened. That is, callers don't need to call /// `flush` before calling `shutdown`. They can rely that by calling /// `shutdown` any pending buffered data will be written out. /// /// # Return value /// /// This function returns a `Poll<io::Result<()>>` classified as such: /// /// * `Poll::Ready(Ok(()))` - indicates that the connection was /// successfully shut down and is now safe to deallocate/drop/close /// resources associated with it. This method means that the current task /// will no longer receive any notifications due to this method and the /// I/O object itself is likely no longer usable. /// /// * `Poll::Pending` - indicates that shutdown is initiated but could /// not complete just yet. This may mean that more I/O needs to happen to /// continue this shutdown operation. The current task is scheduled to /// receive a notification when it's otherwise ready to continue the /// shutdown operation. When woken up this method should be called again. /// /// * `Poll::Ready(Err(e))` - indicates a fatal error has happened with shutdown, /// indicating that the shutdown operation did not complete successfully. /// This typically means that the I/O object is no longer usable. /// /// # Errors /// /// This function can return normal I/O errors through `Err`, described /// above. Additionally this method may also render the underlying /// `Write::write` method no longer usable (e.g. will return errors in the /// future). It's recommended that once `shutdown` is called the /// `write` method is no longer called. /// /// # Panics /// /// This function will panic if not called within the context of a future's /// task. fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>>; /// Like [`poll_write`], except that it writes from a slice of buffers. /// /// Data is copied from each buffer in order, with the final buffer /// read from possibly being only partially consumed. This method must /// behave as a call to [`write`] with the buffers concatenated would. /// /// The default implementation calls [`poll_write`] with either the first nonempty /// buffer provided, or an empty one if none exists. /// /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. /// /// If the object is not ready for writing, the method returns /// `Poll::Pending` and arranges for the current task (via /// `cx.waker()`) to receive a notification when the object becomes /// writable or is closed. /// /// # Note /// /// This should be implemented as a single "atomic" write action. If any /// data has been partially written, it is wrong to return an error or /// pending. /// /// [`poll_write`]: AsyncWrite::poll_write fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { let buf = bufs .iter() .find(|b| !b.is_empty()) .map_or(&[][..], |b| &**b); self.poll_write(cx, buf) } /// Determines if this writer has an efficient [`poll_write_vectored`] /// implementation. /// /// If a writer does not override the default [`poll_write_vectored`] /// implementation, code using it may want to avoid the method all together /// and coalesce writes into a single buffer for higher performance. /// /// The default implementation returns `false`. /// /// [`poll_write_vectored`]: AsyncWrite::poll_write_vectored fn is_write_vectored(&self) -> bool { false } } macro_rules! deref_async_write { () => { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { Pin::new(&mut **self).poll_write(cx, buf) } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { Pin::new(&mut **self).poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { (**self).is_write_vectored() } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { Pin::new(&mut **self).poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { Pin::new(&mut **self).poll_shutdown(cx) } }; } impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for Box<T> { deref_async_write!(); } impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for &mut T { deref_async_write!(); } impl<P> AsyncWrite for Pin<P> where P: DerefMut, P::Target: AsyncWrite, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { crate::util::pin_as_deref_mut(self).poll_write(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { crate::util::pin_as_deref_mut(self).poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { (**self).is_write_vectored() } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { crate::util::pin_as_deref_mut(self).poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { crate::util::pin_as_deref_mut(self).poll_shutdown(cx) } } impl AsyncWrite for Vec<u8> { fn poll_write( self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.get_mut().extend_from_slice(buf); Poll::Ready(Ok(buf.len())) } fn poll_write_vectored( mut self: Pin<&mut Self>, _: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) } fn is_write_vectored(&self) -> bool { true } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } } impl AsyncWrite for io::Cursor<&mut [u8]> { fn poll_write( mut self: Pin<&mut Self>, _: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write(&mut *self, buf)) } fn poll_write_vectored( mut self: Pin<&mut Self>, _: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) } fn is_write_vectored(&self) -> bool { true } fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(io::Write::flush(&mut *self)) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.poll_flush(cx) } } impl AsyncWrite for io::Cursor<&mut Vec<u8>> { fn poll_write( mut self: Pin<&mut Self>, _: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write(&mut *self, buf)) } fn poll_write_vectored( mut self: Pin<&mut Self>, _: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) } fn is_write_vectored(&self) -> bool { true } fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(io::Write::flush(&mut *self)) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.poll_flush(cx) } } impl AsyncWrite for io::Cursor<Vec<u8>> { fn poll_write( mut self: Pin<&mut Self>, _: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write(&mut *self, buf)) } fn poll_write_vectored( mut self: Pin<&mut Self>, _: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) } fn is_write_vectored(&self) -> bool { true } fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(io::Write::flush(&mut *self)) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.poll_flush(cx) } } impl AsyncWrite for io::Cursor<Box<[u8]>> { fn poll_write( mut self: Pin<&mut Self>, _: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write(&mut *self, buf)) } fn poll_write_vectored( mut self: Pin<&mut Self>, _: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) } fn is_write_vectored(&self) -> bool { true } fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(io::Write::flush(&mut *self)) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.poll_flush(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/stdio_common.rs
tokio/src/io/stdio_common.rs
//! Contains utilities for stdout and stderr. use crate::io::AsyncWrite; use std::pin::Pin; use std::task::{Context, Poll}; /// # Windows /// [`AsyncWrite`] adapter that finds last char boundary in given buffer and does not write the rest, /// if buffer contents seems to be `utf8`. Otherwise it only trims buffer down to `DEFAULT_MAX_BUF_SIZE`. /// That's why, wrapped writer will always receive well-formed utf-8 bytes. /// # Other platforms /// Passes data to `inner` as is. #[derive(Debug)] pub(crate) struct SplitByUtf8BoundaryIfWindows<W> { inner: W, } impl<W> SplitByUtf8BoundaryIfWindows<W> { pub(crate) fn new(inner: W) -> Self { Self { inner } } } // this constant is defined by Unicode standard. const MAX_BYTES_PER_CHAR: usize = 4; // Subject for tweaking here const MAGIC_CONST: usize = 8; impl<W> crate::io::AsyncWrite for SplitByUtf8BoundaryIfWindows<W> where W: AsyncWrite + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: &[u8], ) -> Poll<Result<usize, std::io::Error>> { // just a closure to avoid repetitive code let mut call_inner = move |buf| Pin::new(&mut self.inner).poll_write(cx, buf); // 1. Only windows stdio can suffer from non-utf8. // We also check for `test` so that we can write some tests // for further code. Since `AsyncWrite` can always shrink // buffer at its discretion, excessive (i.e. in tests) shrinking // does not break correctness. // 2. If buffer is small, it will not be shrunk. // That's why, it's "textness" will not change, so we don't have // to fixup it. if cfg!(not(any(target_os = "windows", test))) || buf.len() <= crate::io::blocking::DEFAULT_MAX_BUF_SIZE { return call_inner(buf); } buf = &buf[..crate::io::blocking::DEFAULT_MAX_BUF_SIZE]; // Now there are two possibilities. // If caller gave is binary buffer, we **should not** shrink it // anymore, because excessive shrinking hits performance. // If caller gave as binary buffer, we **must** additionally // shrink it to strip incomplete char at the end of buffer. // that's why check we will perform now is allowed to have // false-positive. // Now let's look at the first MAX_BYTES_PER_CHAR * MAGIC_CONST bytes. // if they are (possibly incomplete) utf8, then we can be quite sure // that input buffer was utf8. let have_to_fix_up = match std::str::from_utf8(&buf[..MAX_BYTES_PER_CHAR * MAGIC_CONST]) { Ok(_) => true, Err(err) => { let incomplete_bytes = MAX_BYTES_PER_CHAR * MAGIC_CONST - err.valid_up_to(); incomplete_bytes < MAX_BYTES_PER_CHAR } }; if have_to_fix_up { // We must pop several bytes at the end which form incomplete // character. To achieve it, we exploit UTF8 encoding: // for any code point, all bytes except first start with 0b10 prefix. // see https://en.wikipedia.org/wiki/UTF-8#Encoding for details let trailing_incomplete_char_size = buf .iter() .rev() .take(MAX_BYTES_PER_CHAR) .position(|byte| *byte < 0b1000_0000 || *byte >= 0b1100_0000) .unwrap_or(0) + 1; buf = &buf[..buf.len() - trailing_incomplete_char_size]; } call_inner(buf) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), std::io::Error>> { Pin::new(&mut self.inner).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), std::io::Error>> { Pin::new(&mut self.inner).poll_shutdown(cx) } } #[cfg(test)] #[cfg(not(loom))] mod tests { use crate::io::blocking::DEFAULT_MAX_BUF_SIZE; use crate::io::AsyncWriteExt; use std::io; use std::pin::Pin; use std::task::Context; use std::task::Poll; struct TextMockWriter; impl crate::io::AsyncWrite for TextMockWriter { fn poll_write( self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8], ) -> Poll<Result<usize, io::Error>> { assert!(buf.len() <= DEFAULT_MAX_BUF_SIZE); assert!(std::str::from_utf8(buf).is_ok()); Poll::Ready(Ok(buf.len())) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { Poll::Ready(Ok(())) } fn poll_shutdown( self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll<Result<(), io::Error>> { Poll::Ready(Ok(())) } } struct LoggingMockWriter { write_history: Vec<usize>, } impl LoggingMockWriter { fn new() -> Self { LoggingMockWriter { write_history: Vec::new(), } } } impl crate::io::AsyncWrite for LoggingMockWriter { fn poll_write( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8], ) -> Poll<Result<usize, io::Error>> { assert!(buf.len() <= DEFAULT_MAX_BUF_SIZE); self.write_history.push(buf.len()); Poll::Ready(Ok(buf.len())) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { Poll::Ready(Ok(())) } fn poll_shutdown( self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll<Result<(), io::Error>> { Poll::Ready(Ok(())) } } #[test] #[cfg_attr(miri, ignore)] // takes a really long time with miri fn test_splitter() { let data = str::repeat("█", DEFAULT_MAX_BUF_SIZE); let mut wr = super::SplitByUtf8BoundaryIfWindows::new(TextMockWriter); let fut = async move { wr.write_all(data.as_bytes()).await.unwrap(); }; crate::runtime::Builder::new_current_thread() .build() .unwrap() .block_on(fut); } #[test] #[cfg_attr(miri, ignore)] // takes a really long time with miri fn test_pseudo_text() { // In this test we write a piece of binary data, whose beginning is // text though. We then validate that even in this corner case buffer // was not shrunk too much. let checked_count = super::MAGIC_CONST * super::MAX_BYTES_PER_CHAR; let mut data: Vec<u8> = str::repeat("a", checked_count).into(); data.extend(std::iter::repeat(0b1010_1010).take(DEFAULT_MAX_BUF_SIZE - checked_count + 1)); let mut writer = LoggingMockWriter::new(); let mut splitter = super::SplitByUtf8BoundaryIfWindows::new(&mut writer); crate::runtime::Builder::new_current_thread() .build() .unwrap() .block_on(async { splitter.write_all(&data).await.unwrap(); }); // Check that at most two writes were performed assert!(writer.write_history.len() <= 2); // Check that all has been written assert_eq!( writer.write_history.iter().copied().sum::<usize>(), data.len() ); // Check that at most MAX_BYTES_PER_CHAR + 1 (i.e. 5) bytes were shrunk // from the buffer: one because it was outside of DEFAULT_MAX_BUF_SIZE boundary, and // up to one "utf8 code point". assert!(data.len() - writer.write_history[0] <= super::MAX_BYTES_PER_CHAR + 1); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/interest.rs
tokio/src/io/interest.rs
#![cfg_attr(not(feature = "net"), allow(dead_code, unreachable_pub))] use crate::io::ready::Ready; use std::fmt; use std::ops; // These must be unique. // same as mio const READABLE: usize = 0b0001; const WRITABLE: usize = 0b0010; // The following are not available on all platforms. #[cfg(target_os = "freebsd")] const AIO: usize = 0b0100; #[cfg(target_os = "freebsd")] const LIO: usize = 0b1000; #[cfg(any(target_os = "linux", target_os = "android"))] const PRIORITY: usize = 0b0001_0000; // error is available on all platforms, but behavior is platform-specific // mio does not have this interest const ERROR: usize = 0b0010_0000; /// Readiness event interest. /// /// Specifies the readiness events the caller is interested in when awaiting on /// I/O resource readiness states. #[cfg_attr(docsrs, doc(cfg(feature = "net")))] #[derive(Clone, Copy, Eq, PartialEq)] pub struct Interest(usize); impl Interest { // The non-FreeBSD definitions in this block are active only when // building documentation. cfg_aio! { /// Interest for POSIX AIO. #[cfg(target_os = "freebsd")] pub const AIO: Interest = Interest(AIO); /// Interest for POSIX AIO. #[cfg(not(target_os = "freebsd"))] pub const AIO: Interest = Interest(READABLE); /// Interest for POSIX AIO `lio_listio` events. #[cfg(target_os = "freebsd")] pub const LIO: Interest = Interest(LIO); /// Interest for POSIX AIO `lio_listio` events. #[cfg(not(target_os = "freebsd"))] pub const LIO: Interest = Interest(READABLE); } /// Interest in all readable events. /// /// Readable interest includes read-closed events. pub const READABLE: Interest = Interest(READABLE); /// Interest in all writable events. /// /// Writable interest includes write-closed events. pub const WRITABLE: Interest = Interest(WRITABLE); /// Interest in error events. /// /// Passes error interest to the underlying OS selector. /// Behavior is platform-specific, read your platform's documentation. pub const ERROR: Interest = Interest(ERROR); /// Returns a `Interest` set representing priority completion interests. #[cfg(any(target_os = "linux", target_os = "android"))] #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] pub const PRIORITY: Interest = Interest(PRIORITY); /// Returns true if the value includes readable interest. /// /// # Examples /// /// ``` /// use tokio::io::Interest; /// /// assert!(Interest::READABLE.is_readable()); /// assert!(!Interest::WRITABLE.is_readable()); /// /// let both = Interest::READABLE | Interest::WRITABLE; /// assert!(both.is_readable()); /// ``` pub const fn is_readable(self) -> bool { self.0 & READABLE != 0 } /// Returns true if the value includes writable interest. /// /// # Examples /// /// ``` /// use tokio::io::Interest; /// /// assert!(!Interest::READABLE.is_writable()); /// assert!(Interest::WRITABLE.is_writable()); /// /// let both = Interest::READABLE | Interest::WRITABLE; /// assert!(both.is_writable()); /// ``` pub const fn is_writable(self) -> bool { self.0 & WRITABLE != 0 } /// Returns true if the value includes error interest. /// /// # Examples /// /// ``` /// use tokio::io::Interest; /// /// assert!(Interest::ERROR.is_error()); /// assert!(!Interest::WRITABLE.is_error()); /// /// let combined = Interest::READABLE | Interest::ERROR; /// assert!(combined.is_error()); /// ``` pub const fn is_error(self) -> bool { self.0 & ERROR != 0 } #[cfg(target_os = "freebsd")] const fn is_aio(self) -> bool { self.0 & AIO != 0 } #[cfg(target_os = "freebsd")] const fn is_lio(self) -> bool { self.0 & LIO != 0 } /// Returns true if the value includes priority interest. /// /// # Examples /// /// ``` /// use tokio::io::Interest; /// /// assert!(!Interest::READABLE.is_priority()); /// assert!(Interest::PRIORITY.is_priority()); /// /// let both = Interest::READABLE | Interest::PRIORITY; /// assert!(both.is_priority()); /// ``` #[cfg(any(target_os = "linux", target_os = "android"))] #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] pub const fn is_priority(self) -> bool { self.0 & PRIORITY != 0 } /// Add together two `Interest` values. /// /// This function works from a `const` context. /// /// # Examples /// /// ``` /// use tokio::io::Interest; /// /// const BOTH: Interest = Interest::READABLE.add(Interest::WRITABLE); /// /// assert!(BOTH.is_readable()); /// assert!(BOTH.is_writable()); #[must_use = "this returns the result of the operation, without modifying the original"] pub const fn add(self, other: Interest) -> Interest { Self(self.0 | other.0) } /// Remove `Interest` from `self`. /// /// Interests present in `other` but *not* in `self` are ignored. /// /// Returns `None` if the set would be empty after removing `Interest`. /// /// # Examples /// /// ``` /// use tokio::io::Interest; /// /// const RW_INTEREST: Interest = Interest::READABLE.add(Interest::WRITABLE); /// /// let w_interest = RW_INTEREST.remove(Interest::READABLE).unwrap(); /// assert!(!w_interest.is_readable()); /// assert!(w_interest.is_writable()); /// /// // Removing all interests from the set returns `None`. /// assert_eq!(w_interest.remove(Interest::WRITABLE), None); /// /// // Remove all interests at once. /// assert_eq!(RW_INTEREST.remove(RW_INTEREST), None); /// ``` #[must_use = "this returns the result of the operation, without modifying the original"] pub fn remove(self, other: Interest) -> Option<Interest> { let value = self.0 & !other.0; if value != 0 { Some(Self(value)) } else { None } } // This function must be crate-private to avoid exposing a `mio` dependency. pub(crate) fn to_mio(self) -> mio::Interest { fn mio_add(wrapped: &mut Option<mio::Interest>, add: mio::Interest) { match wrapped { Some(inner) => *inner |= add, None => *wrapped = Some(add), } } // mio does not allow and empty interest, so use None for empty let mut mio = None; if self.is_readable() { mio_add(&mut mio, mio::Interest::READABLE); } if self.is_writable() { mio_add(&mut mio, mio::Interest::WRITABLE); } #[cfg(any(target_os = "linux", target_os = "android"))] if self.is_priority() { mio_add(&mut mio, mio::Interest::PRIORITY); } #[cfg(target_os = "freebsd")] if self.is_aio() { mio_add(&mut mio, mio::Interest::AIO); } #[cfg(target_os = "freebsd")] if self.is_lio() { mio_add(&mut mio, mio::Interest::LIO); } if self.is_error() { // There is no error interest in mio, because error events are always reported. // But mio interests cannot be empty and an interest is needed just for the registration. // // read readiness is filtered out in `Interest::mask` or `Ready::from_interest` if // the read interest was not specified by the user. mio_add(&mut mio, mio::Interest::READABLE); } // the default `mio::Interest::READABLE` should never be used in practice. Either // // - at least one tokio interest with a mio counterpart was used // - only the error tokio interest was specified // // in both cases, `mio` is Some already mio.unwrap_or(mio::Interest::READABLE) } pub(crate) fn mask(self) -> Ready { match self { Interest::READABLE => Ready::READABLE | Ready::READ_CLOSED, Interest::WRITABLE => Ready::WRITABLE | Ready::WRITE_CLOSED, #[cfg(any(target_os = "linux", target_os = "android"))] Interest::PRIORITY => Ready::PRIORITY | Ready::READ_CLOSED, Interest::ERROR => Ready::ERROR, _ => Ready::EMPTY, } } } impl ops::BitOr for Interest { type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { self.add(other) } } impl ops::BitOrAssign for Interest { #[inline] fn bitor_assign(&mut self, other: Self) { *self = *self | other; } } impl fmt::Debug for Interest { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let mut separator = false; if self.is_readable() { if separator { write!(fmt, " | ")?; } write!(fmt, "READABLE")?; separator = true; } if self.is_writable() { if separator { write!(fmt, " | ")?; } write!(fmt, "WRITABLE")?; separator = true; } #[cfg(any(target_os = "linux", target_os = "android"))] if self.is_priority() { if separator { write!(fmt, " | ")?; } write!(fmt, "PRIORITY")?; separator = true; } #[cfg(target_os = "freebsd")] if self.is_aio() { if separator { write!(fmt, " | ")?; } write!(fmt, "AIO")?; separator = true; } #[cfg(target_os = "freebsd")] if self.is_lio() { if separator { write!(fmt, " | ")?; } write!(fmt, "LIO")?; separator = true; } if self.is_error() { if separator { write!(fmt, " | ")?; } write!(fmt, "ERROR")?; separator = true; } let _ = separator; Ok(()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/ready.rs
tokio/src/io/ready.rs
#![cfg_attr(not(feature = "net"), allow(unreachable_pub))] use crate::io::interest::Interest; use std::fmt; use std::ops; const READABLE: usize = 0b0_01; const WRITABLE: usize = 0b0_10; const READ_CLOSED: usize = 0b0_0100; const WRITE_CLOSED: usize = 0b0_1000; #[cfg(any(target_os = "linux", target_os = "android"))] const PRIORITY: usize = 0b1_0000; const ERROR: usize = 0b10_0000; /// Describes the readiness state of an I/O resources. /// /// `Ready` tracks which operation an I/O resource is ready to perform. #[cfg_attr(docsrs, doc(cfg(feature = "net")))] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Ready(usize); impl Ready { /// Returns the empty `Ready` set. pub const EMPTY: Ready = Ready(0); /// Returns a `Ready` representing readable readiness. pub const READABLE: Ready = Ready(READABLE); /// Returns a `Ready` representing writable readiness. pub const WRITABLE: Ready = Ready(WRITABLE); /// Returns a `Ready` representing read closed readiness. pub const READ_CLOSED: Ready = Ready(READ_CLOSED); /// Returns a `Ready` representing write closed readiness. pub const WRITE_CLOSED: Ready = Ready(WRITE_CLOSED); /// Returns a `Ready` representing priority readiness. #[cfg(any(target_os = "linux", target_os = "android"))] #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] pub const PRIORITY: Ready = Ready(PRIORITY); /// Returns a `Ready` representing error readiness. pub const ERROR: Ready = Ready(ERROR); /// Returns a `Ready` representing readiness for all operations. #[cfg(any(target_os = "linux", target_os = "android"))] pub const ALL: Ready = Ready(READABLE | WRITABLE | READ_CLOSED | WRITE_CLOSED | ERROR | PRIORITY); /// Returns a `Ready` representing readiness for all operations. #[cfg(not(any(target_os = "linux", target_os = "android")))] pub const ALL: Ready = Ready(READABLE | WRITABLE | READ_CLOSED | WRITE_CLOSED | ERROR); // Must remain crate-private to avoid adding a public dependency on Mio. pub(crate) fn from_mio(event: &mio::event::Event) -> Ready { let mut ready = Ready::EMPTY; #[cfg(all(target_os = "freebsd", feature = "net"))] { if event.is_aio() { ready |= Ready::READABLE; } if event.is_lio() { ready |= Ready::READABLE; } } if event.is_readable() { ready |= Ready::READABLE; } if event.is_writable() { ready |= Ready::WRITABLE; } if event.is_read_closed() { ready |= Ready::READ_CLOSED; } if event.is_write_closed() { ready |= Ready::WRITE_CLOSED; } if event.is_error() { ready |= Ready::ERROR; } #[cfg(any(target_os = "linux", target_os = "android"))] { if event.is_priority() { ready |= Ready::PRIORITY; } } ready } /// Returns true if `Ready` is the empty set. /// /// # Examples /// /// ``` /// use tokio::io::Ready; /// /// assert!(Ready::EMPTY.is_empty()); /// assert!(!Ready::READABLE.is_empty()); /// ``` pub fn is_empty(self) -> bool { self == Ready::EMPTY } /// Returns `true` if the value includes `readable`. /// /// # Examples /// /// ``` /// use tokio::io::Ready; /// /// assert!(!Ready::EMPTY.is_readable()); /// assert!(Ready::READABLE.is_readable()); /// assert!(Ready::READ_CLOSED.is_readable()); /// assert!(!Ready::WRITABLE.is_readable()); /// ``` pub fn is_readable(self) -> bool { self.contains(Ready::READABLE) || self.is_read_closed() } /// Returns `true` if the value includes writable `readiness`. /// /// # Examples /// /// ``` /// use tokio::io::Ready; /// /// assert!(!Ready::EMPTY.is_writable()); /// assert!(!Ready::READABLE.is_writable()); /// assert!(Ready::WRITABLE.is_writable()); /// assert!(Ready::WRITE_CLOSED.is_writable()); /// ``` pub fn is_writable(self) -> bool { self.contains(Ready::WRITABLE) || self.is_write_closed() } /// Returns `true` if the value includes read-closed `readiness`. /// /// # Examples /// /// ``` /// use tokio::io::Ready; /// /// assert!(!Ready::EMPTY.is_read_closed()); /// assert!(!Ready::READABLE.is_read_closed()); /// assert!(Ready::READ_CLOSED.is_read_closed()); /// ``` pub fn is_read_closed(self) -> bool { self.contains(Ready::READ_CLOSED) } /// Returns `true` if the value includes write-closed `readiness`. /// /// # Examples /// /// ``` /// use tokio::io::Ready; /// /// assert!(!Ready::EMPTY.is_write_closed()); /// assert!(!Ready::WRITABLE.is_write_closed()); /// assert!(Ready::WRITE_CLOSED.is_write_closed()); /// ``` pub fn is_write_closed(self) -> bool { self.contains(Ready::WRITE_CLOSED) } /// Returns `true` if the value includes priority `readiness`. /// /// # Examples /// /// ``` /// use tokio::io::Ready; /// /// assert!(!Ready::EMPTY.is_priority()); /// assert!(!Ready::WRITABLE.is_priority()); /// assert!(Ready::PRIORITY.is_priority()); /// ``` #[cfg(any(target_os = "linux", target_os = "android"))] #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] pub fn is_priority(self) -> bool { self.contains(Ready::PRIORITY) } /// Returns `true` if the value includes error `readiness`. /// /// # Examples /// /// ``` /// use tokio::io::Ready; /// /// assert!(!Ready::EMPTY.is_error()); /// assert!(!Ready::WRITABLE.is_error()); /// assert!(Ready::ERROR.is_error()); /// ``` pub fn is_error(self) -> bool { self.contains(Ready::ERROR) } /// Returns true if `self` is a superset of `other`. /// /// `other` may represent more than one readiness operations, in which case /// the function only returns true if `self` contains all readiness /// specified in `other`. pub(crate) fn contains<T: Into<Self>>(self, other: T) -> bool { let other = other.into(); (self & other) == other } /// Creates a `Ready` instance using the given `usize` representation. /// /// The `usize` representation must have been obtained from a call to /// `Readiness::as_usize`. /// /// This function is mainly provided to allow the caller to get a /// readiness value from an `AtomicUsize`. pub(crate) fn from_usize(val: usize) -> Ready { Ready(val & Ready::ALL.as_usize()) } /// Returns a `usize` representation of the `Ready` value. /// /// This function is mainly provided to allow the caller to store a /// readiness value in an `AtomicUsize`. pub(crate) fn as_usize(self) -> usize { self.0 } pub(crate) fn from_interest(interest: Interest) -> Ready { let mut ready = Ready::EMPTY; if interest.is_readable() { ready |= Ready::READABLE; ready |= Ready::READ_CLOSED; } if interest.is_writable() { ready |= Ready::WRITABLE; ready |= Ready::WRITE_CLOSED; } #[cfg(any(target_os = "linux", target_os = "android"))] if interest.is_priority() { ready |= Ready::PRIORITY; ready |= Ready::READ_CLOSED; } if interest.is_error() { ready |= Ready::ERROR; } ready } pub(crate) fn intersection(self, interest: Interest) -> Ready { Ready(self.0 & Ready::from_interest(interest).0) } pub(crate) fn satisfies(self, interest: Interest) -> bool { self.0 & Ready::from_interest(interest).0 != 0 } } impl ops::BitOr<Ready> for Ready { type Output = Ready; #[inline] fn bitor(self, other: Ready) -> Ready { Ready(self.0 | other.0) } } impl ops::BitOrAssign<Ready> for Ready { #[inline] fn bitor_assign(&mut self, other: Ready) { self.0 |= other.0; } } impl ops::BitAnd<Ready> for Ready { type Output = Ready; #[inline] fn bitand(self, other: Ready) -> Ready { Ready(self.0 & other.0) } } impl ops::Sub<Ready> for Ready { type Output = Ready; #[inline] fn sub(self, other: Ready) -> Ready { Ready(self.0 & !other.0) } } impl fmt::Debug for Ready { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let mut fmt = fmt.debug_struct("Ready"); fmt.field("is_readable", &self.is_readable()) .field("is_writable", &self.is_writable()) .field("is_read_closed", &self.is_read_closed()) .field("is_write_closed", &self.is_write_closed()) .field("is_error", &self.is_error()); #[cfg(any(target_os = "linux", target_os = "android"))] fmt.field("is_priority", &self.is_priority()); fmt.finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/stderr.rs
tokio/src/io/stderr.rs
use crate::io::blocking::Blocking; use crate::io::stdio_common::SplitByUtf8BoundaryIfWindows; use crate::io::AsyncWrite; use std::io; use std::pin::Pin; use std::task::Context; use std::task::Poll; cfg_io_std! { /// A handle to the standard error stream of a process. /// /// Concurrent writes to stderr must be executed with care: Only individual /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular /// you should be aware that writes using [`write_all`] are not guaranteed /// to occur as a single write, so multiple threads writing data with /// [`write_all`] may result in interleaved output. /// /// Created by the [`stderr`] function. /// /// [`stderr`]: stderr() /// [`AsyncWrite`]: AsyncWrite /// [`write_all`]: crate::io::AsyncWriteExt::write_all() /// /// # Examples /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut stderr = io::stdout(); /// stderr.write_all(b"Print some error here.").await?; /// Ok(()) /// } /// ``` #[derive(Debug)] pub struct Stderr { std: SplitByUtf8BoundaryIfWindows<Blocking<std::io::Stderr>>, } /// Constructs a new handle to the standard error of the current process. /// /// The returned handle allows writing to standard error from the within the /// Tokio runtime. /// /// Concurrent writes to stderr must be executed with care: Only individual /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular /// you should be aware that writes using [`write_all`] are not guaranteed /// to occur as a single write, so multiple threads writing data with /// [`write_all`] may result in interleaved output. /// /// [`AsyncWrite`]: AsyncWrite /// [`write_all`]: crate::io::AsyncWriteExt::write_all() /// /// # Examples /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut stderr = io::stderr(); /// stderr.write_all(b"Print some error here.").await?; /// Ok(()) /// } /// ``` pub fn stderr() -> Stderr { let std = io::stderr(); // SAFETY: The `Read` implementation of `std` does not read from the // buffer it is borrowing and correctly reports the length of the data // written into the buffer. let blocking = unsafe { Blocking::new(std) }; Stderr { std: SplitByUtf8BoundaryIfWindows::new(blocking), } } } #[cfg(unix)] mod sys { use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd}; use super::Stderr; impl AsRawFd for Stderr { fn as_raw_fd(&self) -> RawFd { std::io::stderr().as_raw_fd() } } impl AsFd for Stderr { fn as_fd(&self) -> BorrowedFd<'_> { unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } } } } cfg_windows! { use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle}; impl AsRawHandle for Stderr { fn as_raw_handle(&self) -> RawHandle { std::io::stderr().as_raw_handle() } } impl AsHandle for Stderr { fn as_handle(&self) -> BorrowedHandle<'_> { unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } } } } impl AsyncWrite for Stderr { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { Pin::new(&mut self.std).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { Pin::new(&mut self.std).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), io::Error>> { Pin::new(&mut self.std).poll_shutdown(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/mod.rs
tokio/src/io/mod.rs
//! Traits, helpers, and type definitions for asynchronous I/O functionality. //! //! This module is the asynchronous version of `std::io`. Primarily, it //! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which are asynchronous //! versions of the [`Read`] and [`Write`] traits in the standard library. //! //! # `AsyncRead` and `AsyncWrite` //! //! Like the standard library's [`Read`] and [`Write`] traits, [`AsyncRead`] and //! [`AsyncWrite`] provide the most general interface for reading and writing //! input and output. Unlike the standard library's traits, however, they are //! _asynchronous_ &mdash; meaning that reading from or writing to a `tokio::io` //! type will _yield_ to the Tokio scheduler when IO is not ready, rather than //! blocking. This allows other tasks to run while waiting on IO. //! //! Another difference is that `AsyncRead` and `AsyncWrite` only contain //! core methods needed to provide asynchronous reading and writing //! functionality. Instead, utility methods are defined in the [`AsyncReadExt`] //! and [`AsyncWriteExt`] extension traits. These traits are automatically //! implemented for all values that implement `AsyncRead` and `AsyncWrite` //! respectively. //! //! End users will rarely interact directly with `AsyncRead` and //! `AsyncWrite`. Instead, they will use the async functions defined in the //! extension traits. Library authors are expected to implement `AsyncRead` //! and `AsyncWrite` in order to provide types that behave like byte streams. //! //! Even with these differences, Tokio's `AsyncRead` and `AsyncWrite` traits //! can be used in almost exactly the same manner as the standard library's //! `Read` and `Write`. Most types in the standard library that implement `Read` //! and `Write` have asynchronous equivalents in `tokio` that implement //! `AsyncRead` and `AsyncWrite`, such as [`File`] and [`TcpStream`]. //! //! For example, the standard library documentation introduces `Read` by //! [demonstrating][std_example] reading some bytes from a [`std::fs::File`]. We //! can do the same with [`tokio::fs::File`][`File`]: //! //! ```no_run //! # #[cfg(not(target_family = "wasm"))] //! # { //! use tokio::io::{self, AsyncReadExt}; //! use tokio::fs::File; //! //! #[tokio::main] //! async fn main() -> io::Result<()> { //! let mut f = File::open("foo.txt").await?; //! let mut buffer = [0; 10]; //! //! // read up to 10 bytes //! let n = f.read(&mut buffer).await?; //! //! println!("The bytes: {:?}", &buffer[..n]); //! Ok(()) //! } //! # } //! ``` //! //! [`File`]: crate::fs::File //! [`TcpStream`]: crate::net::TcpStream //! [`std::fs::File`]: std::fs::File //! [std_example]: std::io#read-and-write //! //! ## Buffered Readers and Writers //! //! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be //! making near-constant calls to the operating system. To help with this, //! `std::io` comes with [support for _buffered_ readers and writers][stdbuf], //! and therefore, `tokio::io` does as well. //! //! Tokio provides an async version of the [`std::io::BufRead`] trait, //! [`AsyncBufRead`]; and async [`BufReader`] and [`BufWriter`] structs, which //! wrap readers and writers. These wrappers use a buffer, reducing the number //! of calls and providing nicer methods for accessing exactly what you want. //! //! For example, [`BufReader`] works with the [`AsyncBufRead`] trait to add //! extra methods to any async reader: //! //! ```no_run //! # #[cfg(not(target_family = "wasm"))] //! # { //! use tokio::io::{self, BufReader, AsyncBufReadExt}; //! use tokio::fs::File; //! //! #[tokio::main] //! async fn main() -> io::Result<()> { //! let f = File::open("foo.txt").await?; //! let mut reader = BufReader::new(f); //! let mut buffer = String::new(); //! //! // read a line into buffer //! reader.read_line(&mut buffer).await?; //! //! println!("{}", buffer); //! Ok(()) //! } //! # } //! ``` //! //! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call //! to [`write`](crate::io::AsyncWriteExt::write). However, you **must** flush //! [`BufWriter`] to ensure that any buffered data is written. //! //! ```no_run //! # #[cfg(not(target_family = "wasm"))] //! # { //! use tokio::io::{self, BufWriter, AsyncWriteExt}; //! use tokio::fs::File; //! //! #[tokio::main] //! async fn main() -> io::Result<()> { //! let f = File::create("foo.txt").await?; //! { //! let mut writer = BufWriter::new(f); //! //! // Write a byte to the buffer. //! writer.write(&[42u8]).await?; //! //! // Flush the buffer before it goes out of scope. //! writer.flush().await?; //! //! } // Unless flushed or shut down, the contents of the buffer is discarded on drop. //! //! Ok(()) //! } //! # } //! ``` //! //! [stdbuf]: std::io#bufreader-and-bufwriter //! [`std::io::BufRead`]: std::io::BufRead //! [`AsyncBufRead`]: crate::io::AsyncBufRead //! [`BufReader`]: crate::io::BufReader //! [`BufWriter`]: crate::io::BufWriter //! //! ## Implementing `AsyncRead` and `AsyncWrite` //! //! Because they are traits, we can implement [`AsyncRead`] and [`AsyncWrite`] for //! our own types, as well. Note that these traits must only be implemented for //! non-blocking I/O types that integrate with the futures type system. In //! other words, these types must never block the thread, and instead the //! current task is notified when the I/O resource is ready. //! //! ## Conversion to and from Stream/Sink //! //! It is often convenient to encapsulate the reading and writing of bytes in a //! [`Stream`] or [`Sink`] of data. //! //! Tokio provides simple wrappers for converting [`AsyncRead`] to [`Stream`] //! and vice-versa in the [tokio-util] crate, see [`ReaderStream`] and //! [`StreamReader`]. //! //! There are also utility traits that abstract the asynchronous buffering //! necessary to write your own adaptors for encoding and decoding bytes to/from //! your structured data, allowing to transform something that implements //! [`AsyncRead`]/[`AsyncWrite`] into a [`Stream`]/[`Sink`], see [`Decoder`] and //! [`Encoder`] in the [tokio-util::codec] module. //! //! [tokio-util]: https://docs.rs/tokio-util //! [tokio-util::codec]: https://docs.rs/tokio-util/latest/tokio_util/codec/index.html //! //! # Standard input and output //! //! Tokio provides asynchronous APIs to standard [input], [output], and [error]. //! These APIs are very similar to the ones provided by `std`, but they also //! implement [`AsyncRead`] and [`AsyncWrite`]. //! //! Note that the standard input / output APIs **must** be used from the //! context of the Tokio runtime, as they require Tokio-specific features to //! function. Calling these functions outside of a Tokio runtime will panic. //! //! [input]: fn@stdin //! [output]: fn@stdout //! [error]: fn@stderr //! //! # `std` re-exports //! //! Additionally, [`Error`], [`ErrorKind`], [`Result`], and [`SeekFrom`] are //! re-exported from `std::io` for ease of use. //! //! [`AsyncRead`]: trait@AsyncRead //! [`AsyncWrite`]: trait@AsyncWrite //! [`AsyncReadExt`]: trait@AsyncReadExt //! [`AsyncWriteExt`]: trait@AsyncWriteExt //! ["codec"]: https://docs.rs/tokio-util/latest/tokio_util/codec/index.html //! [`Encoder`]: https://docs.rs/tokio-util/latest/tokio_util/codec/trait.Encoder.html //! [`Decoder`]: https://docs.rs/tokio-util/latest/tokio_util/codec/trait.Decoder.html //! [`ReaderStream`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.ReaderStream.html //! [`StreamReader`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.StreamReader.html //! [`Error`]: struct@Error //! [`ErrorKind`]: enum@ErrorKind //! [`Result`]: type@Result //! [`Read`]: std::io::Read //! [`SeekFrom`]: enum@SeekFrom //! [`Sink`]: https://docs.rs/futures/0.3/futures/sink/trait.Sink.html //! [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html //! [`Write`]: std::io::Write #![cfg_attr( not(all(feature = "rt", feature = "net")), allow(dead_code, unused_imports) )] cfg_io_blocking! { pub(crate) mod blocking; } mod async_buf_read; pub use self::async_buf_read::AsyncBufRead; mod async_read; pub use self::async_read::AsyncRead; mod async_seek; pub use self::async_seek::AsyncSeek; mod async_write; pub use self::async_write::AsyncWrite; mod read_buf; pub use self::read_buf::ReadBuf; // Re-export some types from `std::io` so that users don't have to deal // with conflicts when `use`ing `tokio::io` and `std::io`. #[doc(no_inline)] pub use std::io::{Error, ErrorKind, Result, SeekFrom}; cfg_io_driver_impl! { pub(crate) mod interest; pub(crate) mod ready; cfg_net_or_uring! { pub use interest::Interest; pub use ready::Ready; } #[cfg_attr(target_os = "wasi", allow(unused_imports))] mod poll_evented; #[cfg(not(loom))] #[cfg_attr(target_os = "wasi", allow(unused_imports))] pub(crate) use poll_evented::PollEvented; } // The bsd module can't be build on Windows, so we completely ignore it, even // when building documentation. #[cfg(unix)] cfg_aio! { /// BSD-specific I/O types. pub mod bsd { mod poll_aio; pub use poll_aio::{Aio, AioEvent, AioSource}; } } cfg_net_unix! { mod async_fd; pub mod unix { //! Asynchronous IO structures specific to Unix-like operating systems. pub use super::async_fd::{AsyncFd, AsyncFdTryNewError, AsyncFdReadyGuard, AsyncFdReadyMutGuard, TryIoError}; } } cfg_io_std! { mod stdio_common; mod stderr; pub use stderr::{stderr, Stderr}; mod stdin; pub use stdin::{stdin, Stdin}; mod stdout; pub use stdout::{stdout, Stdout}; } cfg_io_util! { mod split; pub use split::{split, ReadHalf, WriteHalf}; mod join; pub use join::{join, Join}; pub(crate) mod seek; pub(crate) mod util; pub use util::{ copy, copy_bidirectional, copy_bidirectional_with_sizes, copy_buf, duplex, empty, repeat, sink, simplex, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt, BufReader, BufStream, BufWriter, Chain, DuplexStream, Empty, Lines, Repeat, Sink, Split, Take, SimplexStream, }; } cfg_not_io_util! { cfg_process! { pub(crate) mod util; } } cfg_io_blocking! { /// Types in this module can be mocked out in tests. mod sys { // TODO: don't rename pub(crate) use crate::blocking::spawn_blocking as run; pub(crate) use crate::blocking::JoinHandle as Blocking; } } cfg_io_uring! { pub(crate) mod uring; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/join.rs
tokio/src/io/join.rs
//! Join two values implementing `AsyncRead` and `AsyncWrite` into a single one. use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf}; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; /// Join two values implementing `AsyncRead` and `AsyncWrite` into a /// single handle. pub fn join<R, W>(reader: R, writer: W) -> Join<R, W> where R: AsyncRead, W: AsyncWrite, { Join { reader, writer } } pin_project_lite::pin_project! { /// Joins two values implementing `AsyncRead` and `AsyncWrite` into a /// single handle. #[derive(Debug)] pub struct Join<R, W> { #[pin] reader: R, #[pin] writer: W, } } impl<R, W> Join<R, W> where R: AsyncRead, W: AsyncWrite, { /// Splits this `Join` back into its `AsyncRead` and `AsyncWrite` /// components. pub fn into_inner(self) -> (R, W) { (self.reader, self.writer) } /// Returns a reference to the inner reader. pub fn reader(&self) -> &R { &self.reader } /// Returns a reference to the inner writer. pub fn writer(&self) -> &W { &self.writer } /// Returns a mutable reference to the inner reader. pub fn reader_mut(&mut self) -> &mut R { &mut self.reader } /// Returns a mutable reference to the inner writer. pub fn writer_mut(&mut self) -> &mut W { &mut self.writer } /// Returns a pinned mutable reference to the inner reader. pub fn reader_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } /// Returns a pinned mutable reference to the inner writer. pub fn writer_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer } } impl<R, W> AsyncRead for Join<R, W> where R: AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<Result<(), io::Error>> { self.project().reader.poll_read(cx, buf) } } impl<R, W> AsyncWrite for Join<R, W> where W: AsyncWrite, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<Result<usize, io::Error>> { self.project().writer.poll_write(cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { self.project().writer.poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { self.project().writer.poll_shutdown(cx) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<Result<usize, io::Error>> { self.project().writer.poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { self.writer.is_write_vectored() } } impl<R, W> AsyncBufRead for Join<R, W> where R: AsyncBufRead, { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { self.project().reader.poll_fill_buf(cx) } fn consume(self: Pin<&mut Self>, amt: usize) { self.project().reader.consume(amt) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/async_seek.rs
tokio/src/io/async_seek.rs
use std::io::{self, SeekFrom}; use std::ops::DerefMut; use std::pin::Pin; use std::task::{Context, Poll}; /// Seek bytes asynchronously. /// /// This trait is analogous to the [`std::io::Seek`] trait, but integrates /// with the asynchronous task system. In particular, the `start_seek` /// method, unlike [`Seek::seek`], will not block the calling thread. /// /// Utilities for working with `AsyncSeek` values are provided by /// [`AsyncSeekExt`]. /// /// [`std::io::Seek`]: std::io::Seek /// [`Seek::seek`]: std::io::Seek::seek() /// [`AsyncSeekExt`]: crate::io::AsyncSeekExt pub trait AsyncSeek { /// Attempts to seek to an offset, in bytes, in a stream. /// /// A seek beyond the end of a stream is allowed, but behavior is defined /// by the implementation. /// /// If this function returns successfully, then the job has been submitted. /// To find out when it completes, call `poll_complete`. /// /// # Errors /// /// This function can return [`io::ErrorKind::Other`] in case there is /// another seek in progress. To avoid this, it is advisable that any call /// to `start_seek` is preceded by a call to `poll_complete` to ensure all /// pending seeks have completed. fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()>; /// Waits for a seek operation to complete. /// /// If the seek operation completed successfully, this method returns the /// new position from the start of the stream. That position can be used /// later with [`SeekFrom::Start`]. /// /// The position returned by calling this method can only be relied on right /// after `start_seek`. If you have changed the position by e.g. reading or /// writing since calling `start_seek`, then it is unspecified whether the /// returned position takes that position change into account. Similarly, if /// `start_seek` has never been called, then it is unspecified whether /// `poll_complete` returns the actual position or some other placeholder /// value (such as 0). /// /// # Errors /// /// Seeking to a negative offset is considered an error. fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>>; } macro_rules! deref_async_seek { () => { fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { Pin::new(&mut **self).start_seek(pos) } fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { Pin::new(&mut **self).poll_complete(cx) } }; } impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for Box<T> { deref_async_seek!(); } impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for &mut T { deref_async_seek!(); } impl<P> AsyncSeek for Pin<P> where P: DerefMut, P::Target: AsyncSeek, { fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { crate::util::pin_as_deref_mut(self).start_seek(pos) } fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { crate::util::pin_as_deref_mut(self).poll_complete(cx) } } impl<T: AsRef<[u8]> + Unpin> AsyncSeek for io::Cursor<T> { fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { io::Seek::seek(&mut *self, pos).map(drop) } fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<u64>> { Poll::Ready(Ok(self.get_mut().position())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/async_buf_read.rs
tokio/src/io/async_buf_read.rs
use crate::io::AsyncRead; use std::io; use std::ops::DerefMut; use std::pin::Pin; use std::task::{Context, Poll}; /// Reads bytes asynchronously. /// /// This trait is analogous to [`std::io::BufRead`], but integrates with /// the asynchronous task system. In particular, the [`poll_fill_buf`] method, /// unlike [`BufRead::fill_buf`], will automatically queue the current task for wakeup /// and return if data is not yet available, rather than blocking the calling /// thread. /// /// Utilities for working with `AsyncBufRead` values are provided by /// [`AsyncBufReadExt`]. /// /// [`std::io::BufRead`]: std::io::BufRead /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf /// [`BufRead::fill_buf`]: std::io::BufRead::fill_buf /// [`AsyncBufReadExt`]: crate::io::AsyncBufReadExt pub trait AsyncBufRead: AsyncRead { /// Attempts to return the contents of the internal buffer, filling it with more data /// from the inner reader if it is empty. /// /// On success, returns `Poll::Ready(Ok(buf))`. /// /// If no data is available for reading, the method returns /// `Poll::Pending` and arranges for the current task (via /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes /// readable or is closed. /// /// This function is a lower-level call. It needs to be paired with the /// [`consume`] method to function properly. When calling this /// method, none of the contents will be "read" in the sense that later /// calling [`poll_read`] may return the same contents. As such, [`consume`] must /// be called with the number of bytes that are consumed from this buffer to /// ensure that the bytes are never returned twice. /// /// An empty buffer returned indicates that the stream has reached EOF. /// /// [`poll_read`]: AsyncRead::poll_read /// [`consume`]: AsyncBufRead::consume fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>>; /// Tells this buffer that `amt` bytes have been consumed from the buffer, /// so they should no longer be returned in calls to [`poll_read`]. /// /// This function is a lower-level call. It needs to be paired with the /// [`poll_fill_buf`] method to function properly. This function does /// not perform any I/O, it simply informs this object that some amount of /// its buffer, returned from [`poll_fill_buf`], has been consumed and should /// no longer be returned. As such, this function may do odd things if /// [`poll_fill_buf`] isn't called before calling it. /// /// The `amt` must be `<=` the number of bytes in the buffer returned by /// [`poll_fill_buf`]. /// /// [`poll_read`]: AsyncRead::poll_read /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf fn consume(self: Pin<&mut Self>, amt: usize); } macro_rules! deref_async_buf_read { () => { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { Pin::new(&mut **self.get_mut()).poll_fill_buf(cx) } fn consume(mut self: Pin<&mut Self>, amt: usize) { Pin::new(&mut **self).consume(amt) } }; } impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for Box<T> { deref_async_buf_read!(); } impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for &mut T { deref_async_buf_read!(); } impl<P> AsyncBufRead for Pin<P> where P: DerefMut, P::Target: AsyncBufRead, { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { crate::util::pin_as_deref_mut(self).poll_fill_buf(cx) } fn consume(self: Pin<&mut Self>, amt: usize) { crate::util::pin_as_deref_mut(self).consume(amt); } } impl AsyncBufRead for &[u8] { fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { Poll::Ready(Ok(*self)) } fn consume(mut self: Pin<&mut Self>, amt: usize) { *self = &self[amt..]; } } impl<T: AsRef<[u8]> + Unpin> AsyncBufRead for io::Cursor<T> { fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { Poll::Ready(io::BufRead::fill_buf(self.get_mut())) } fn consume(self: Pin<&mut Self>, amt: usize) { io::BufRead::consume(self.get_mut(), amt); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/async_read.rs
tokio/src/io/async_read.rs
use super::ReadBuf; use std::io; use std::ops::DerefMut; use std::pin::Pin; use std::task::{Context, Poll}; /// Reads bytes from a source. /// /// This trait is analogous to the [`std::io::Read`] trait, but integrates with /// the asynchronous task system. In particular, the [`poll_read`] method, /// unlike [`Read::read`], will automatically queue the current task for wakeup /// and return if data is not yet available, rather than blocking the calling /// thread. /// /// Specifically, this means that the `poll_read` function will return one of /// the following: /// /// * `Poll::Ready(Ok(()))` means that data was immediately read and placed into /// the output buffer. The amount of data read can be determined by the /// increase in the length of the slice returned by `ReadBuf::filled`. If the /// difference is 0, either EOF has been reached, or the output buffer had zero /// capacity (i.e. `buf.remaining()` == 0). /// /// * `Poll::Pending` means that no data was read into the buffer /// provided. The I/O object is not currently readable but may become readable /// in the future. Most importantly, **the current future's task is scheduled /// to get unparked when the object is readable**. This means that like /// `Future::poll` you'll receive a notification when the I/O object is /// readable again. /// /// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the /// underlying object. /// /// This trait importantly means that the `read` method only works in the /// context of a future's task. The object may panic if used outside of a task. /// /// Utilities for working with `AsyncRead` values are provided by /// [`AsyncReadExt`]. /// /// [`poll_read`]: AsyncRead::poll_read /// [`std::io::Read`]: std::io::Read /// [`Read::read`]: std::io::Read::read /// [`AsyncReadExt`]: crate::io::AsyncReadExt pub trait AsyncRead { /// Attempts to read from the `AsyncRead` into `buf`. /// /// On success, returns `Poll::Ready(Ok(()))` and places data in the /// unfilled portion of `buf`. If no data was read (`buf.filled().len()` is /// unchanged), it implies that EOF has been reached, or the output buffer /// had zero capacity (i.e. `buf.remaining()` == 0). /// /// If no data is available for reading, the method returns `Poll::Pending` /// and arranges for the current task (via `cx.waker()`) to receive a /// notification when the object becomes readable or is closed. fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>>; } macro_rules! deref_async_read { () => { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { Pin::new(&mut **self).poll_read(cx, buf) } }; } impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for Box<T> { deref_async_read!(); } impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for &mut T { deref_async_read!(); } impl<P> AsyncRead for Pin<P> where P: DerefMut, P::Target: AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { crate::util::pin_as_deref_mut(self).poll_read(cx, buf) } } impl AsyncRead for &[u8] { fn poll_read( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { let amt = std::cmp::min(self.len(), buf.remaining()); let (a, b) = self.split_at(amt); buf.put_slice(a); *self = b; Poll::Ready(Ok(())) } } impl<T: AsRef<[u8]> + Unpin> AsyncRead for io::Cursor<T> { fn poll_read( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { let pos = self.position(); let slice: &[u8] = (*self).get_ref().as_ref(); // The position could technically be out of bounds, so don't panic... if pos > slice.len() as u64 { return Poll::Ready(Ok(())); } let start = pos as usize; let amt = std::cmp::min(slice.len() - start, buf.remaining()); // Add won't overflow because of pos check above. let end = start + amt; buf.put_slice(&slice[start..end]); self.set_position(end as u64); Poll::Ready(Ok(())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/read_buf.rs
tokio/src/io/read_buf.rs
use std::fmt; use std::mem::MaybeUninit; /// A wrapper around a byte buffer that is incrementally filled and initialized. /// /// This type is a sort of "double cursor". It tracks three regions in the /// buffer: a region at the beginning of the buffer that has been logically /// filled with data, a region that has been initialized at some point but not /// yet logically filled, and a region at the end that may be uninitialized. /// The filled region is guaranteed to be a subset of the initialized region. /// /// In summary, the contents of the buffer can be visualized as: /// /// ```not_rust /// [ capacity ] /// [ filled | unfilled ] /// [ initialized | uninitialized ] /// ``` /// /// It is undefined behavior to de-initialize any bytes from the uninitialized /// region, since it is merely unknown whether this region is uninitialized or /// not, and if part of it turns out to be initialized, it must stay initialized. pub struct ReadBuf<'a> { buf: &'a mut [MaybeUninit<u8>], filled: usize, initialized: usize, } impl<'a> ReadBuf<'a> { /// Creates a new `ReadBuf` from a fully initialized buffer. #[inline] pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> { let initialized = buf.len(); let buf = unsafe { slice_to_uninit_mut(buf) }; ReadBuf { buf, filled: 0, initialized, } } /// Creates a new `ReadBuf` from a buffer that may be uninitialized. /// /// The internal cursor will mark the entire buffer as uninitialized. If /// the buffer is known to be partially initialized, then use `assume_init` /// to move the internal cursor. #[inline] pub fn uninit(buf: &'a mut [MaybeUninit<u8>]) -> ReadBuf<'a> { ReadBuf { buf, filled: 0, initialized: 0, } } /// Returns the total capacity of the buffer. #[inline] pub fn capacity(&self) -> usize { self.buf.len() } /// Returns a shared reference to the filled portion of the buffer. #[inline] pub fn filled(&self) -> &[u8] { let slice = &self.buf[..self.filled]; // safety: filled describes how far into the buffer that the // user has filled with bytes, so it's been initialized. unsafe { slice_assume_init(slice) } } /// Returns a mutable reference to the filled portion of the buffer. #[inline] pub fn filled_mut(&mut self) -> &mut [u8] { let slice = &mut self.buf[..self.filled]; // safety: filled describes how far into the buffer that the // user has filled with bytes, so it's been initialized. unsafe { slice_assume_init_mut(slice) } } /// Returns a new `ReadBuf` comprised of the unfilled section up to `n`. #[inline] pub fn take(&mut self, n: usize) -> ReadBuf<'_> { let max = std::cmp::min(self.remaining(), n); // Safety: We don't set any of the `unfilled_mut` with `MaybeUninit::uninit`. unsafe { ReadBuf::uninit(&mut self.unfilled_mut()[..max]) } } /// Returns a shared reference to the initialized portion of the buffer. /// /// This includes the filled portion. #[inline] pub fn initialized(&self) -> &[u8] { let slice = &self.buf[..self.initialized]; // safety: initialized describes how far into the buffer that the // user has at some point initialized with bytes. unsafe { slice_assume_init(slice) } } /// Returns a mutable reference to the initialized portion of the buffer. /// /// This includes the filled portion. #[inline] pub fn initialized_mut(&mut self) -> &mut [u8] { let slice = &mut self.buf[..self.initialized]; // safety: initialized describes how far into the buffer that the // user has at some point initialized with bytes. unsafe { slice_assume_init_mut(slice) } } /// Returns a mutable reference to the entire buffer, without ensuring that it has been fully /// initialized. /// /// The elements between 0 and `self.filled().len()` are filled, and those between 0 and /// `self.initialized().len()` are initialized (and so can be converted to a `&mut [u8]`). /// /// The caller of this method must ensure that these invariants are upheld. For example, if the /// caller initializes some of the uninitialized section of the buffer, it must call /// [`assume_init`](Self::assume_init) with the number of bytes initialized. /// /// # Safety /// /// The caller must not de-initialize portions of the buffer that have already been initialized. /// This includes any bytes in the region marked as uninitialized by `ReadBuf`. #[inline] pub unsafe fn inner_mut(&mut self) -> &mut [MaybeUninit<u8>] { self.buf } /// Returns a mutable reference to the unfilled part of the buffer without ensuring that it has been fully /// initialized. /// /// # Safety /// /// The caller must not de-initialize portions of the buffer that have already been initialized. /// This includes any bytes in the region marked as uninitialized by `ReadBuf`. #[inline] pub unsafe fn unfilled_mut(&mut self) -> &mut [MaybeUninit<u8>] { &mut self.buf[self.filled..] } /// Returns a mutable reference to the unfilled part of the buffer, ensuring it is fully initialized. /// /// Since `ReadBuf` tracks the region of the buffer that has been initialized, this is effectively "free" after /// the first use. #[inline] pub fn initialize_unfilled(&mut self) -> &mut [u8] { self.initialize_unfilled_to(self.remaining()) } /// Returns a mutable reference to the first `n` bytes of the unfilled part of the buffer, ensuring it is /// fully initialized. /// /// # Panics /// /// Panics if `self.remaining()` is less than `n`. #[inline] #[track_caller] pub fn initialize_unfilled_to(&mut self, n: usize) -> &mut [u8] { assert!(self.remaining() >= n, "n overflows remaining"); // This can't overflow, otherwise the assert above would have failed. let end = self.filled + n; if self.initialized < end { unsafe { self.buf[self.initialized..end] .as_mut_ptr() .write_bytes(0, end - self.initialized); } self.initialized = end; } let slice = &mut self.buf[self.filled..end]; // safety: just above, we checked that the end of the buf has // been initialized to some value. unsafe { slice_assume_init_mut(slice) } } /// Returns the number of bytes at the end of the slice that have not yet been filled. #[inline] pub fn remaining(&self) -> usize { self.capacity() - self.filled } /// Clears the buffer, resetting the filled region to empty. /// /// The number of initialized bytes is not changed, and the contents of the buffer are not modified. #[inline] pub fn clear(&mut self) { self.filled = 0; } /// Advances the size of the filled region of the buffer. /// /// The number of initialized bytes is not changed. /// /// # Panics /// /// Panics if the filled region of the buffer would become larger than the initialized region. #[inline] #[track_caller] pub fn advance(&mut self, n: usize) { let new = self.filled.checked_add(n).expect("filled overflow"); self.set_filled(new); } /// Sets the size of the filled region of the buffer. /// /// The number of initialized bytes is not changed. /// /// Note that this can be used to *shrink* the filled region of the buffer in addition to growing it (for /// example, by a `AsyncRead` implementation that compresses data in-place). /// /// # Panics /// /// Panics if the filled region of the buffer would become larger than the initialized region. #[inline] #[track_caller] pub fn set_filled(&mut self, n: usize) { assert!( n <= self.initialized, "filled must not become larger than initialized" ); self.filled = n; } /// Asserts that the first `n` unfilled bytes of the buffer are initialized. /// /// `ReadBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer /// bytes than are already known to be initialized. /// /// # Safety /// /// The caller must ensure that `n` unfilled bytes of the buffer have already been initialized. #[inline] pub unsafe fn assume_init(&mut self, n: usize) { let new = self.filled + n; if new > self.initialized { self.initialized = new; } } /// Appends data to the buffer, advancing the written position and possibly also the initialized position. /// /// # Panics /// /// Panics if `self.remaining()` is less than `buf.len()`. #[inline] #[track_caller] pub fn put_slice(&mut self, buf: &[u8]) { assert!( self.remaining() >= buf.len(), "buf.len() must fit in remaining(); buf.len() = {}, remaining() = {}", buf.len(), self.remaining() ); let amt = buf.len(); // Cannot overflow, asserted above let end = self.filled + amt; // Safety: the length is asserted above unsafe { self.buf[self.filled..end] .as_mut_ptr() .cast::<u8>() .copy_from_nonoverlapping(buf.as_ptr(), amt); } if self.initialized < end { self.initialized = end; } self.filled = end; } } #[cfg(feature = "io-util")] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] unsafe impl<'a> bytes::BufMut for ReadBuf<'a> { fn remaining_mut(&self) -> usize { self.remaining() } // SAFETY: The caller guarantees that at least `cnt` unfilled bytes have been initialized. unsafe fn advance_mut(&mut self, cnt: usize) { unsafe { self.assume_init(cnt); } self.advance(cnt); } fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice { // SAFETY: No region of `unfilled` will be deinitialized because it is // exposed as an `UninitSlice`, whose API guarantees that the memory is // never deinitialized. let unfilled = unsafe { self.unfilled_mut() }; let len = unfilled.len(); let ptr = unfilled.as_mut_ptr() as *mut u8; // SAFETY: The pointer is valid for `len` bytes because it comes from a // slice of that length. unsafe { bytes::buf::UninitSlice::from_raw_parts_mut(ptr, len) } } } impl fmt::Debug for ReadBuf<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ReadBuf") .field("filled", &self.filled) .field("initialized", &self.initialized) .field("capacity", &self.capacity()) .finish() } } /// # Safety /// /// The caller must ensure that `slice` is fully initialized /// and never writes uninitialized bytes to the returned slice. unsafe fn slice_to_uninit_mut(slice: &mut [u8]) -> &mut [MaybeUninit<u8>] { // SAFETY: `MaybeUninit<u8>` has the same memory layout as u8, and the caller // promises to not write uninitialized bytes to the returned slice. unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit<u8>]) } } /// # Safety /// /// The caller must ensure that `slice` is fully initialized. // TODO: This could use `MaybeUninit::slice_assume_init` when it is stable. unsafe fn slice_assume_init(slice: &[MaybeUninit<u8>]) -> &[u8] { // SAFETY: `MaybeUninit<u8>` has the same memory layout as u8, and the caller // promises that `slice` is fully initialized. unsafe { &*(slice as *const [MaybeUninit<u8>] as *const [u8]) } } /// # Safety /// /// The caller must ensure that `slice` is fully initialized. // TODO: This could use `MaybeUninit::slice_assume_init_mut` when it is stable. unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit<u8>]) -> &mut [u8] { // SAFETY: `MaybeUninit<u8>` has the same memory layout as `u8`, and the caller // promises that `slice` is fully initialized. unsafe { &mut *(slice as *mut [MaybeUninit<u8>] as *mut [u8]) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/seek.rs
tokio/src/io/seek.rs
use crate::io::AsyncSeek; use pin_project_lite::pin_project; use std::future::Future; use std::io::{self, SeekFrom}; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Seek<'a, S: ?Sized> { seek: &'a mut S, pos: Option<SeekFrom>, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } pub(crate) fn seek<S>(seek: &mut S, pos: SeekFrom) -> Seek<'_, S> where S: AsyncSeek + ?Sized + Unpin, { Seek { seek, pos: Some(pos), _pin: PhantomPinned, } } impl<S> Future for Seek<'_, S> where S: AsyncSeek + ?Sized + Unpin, { type Output = io::Result<u64>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); match me.pos { Some(pos) => { // ensure no seek in progress ready!(Pin::new(&mut *me.seek).poll_complete(cx))?; match Pin::new(&mut *me.seek).start_seek(*pos) { Ok(()) => { *me.pos = None; Pin::new(&mut *me.seek).poll_complete(cx) } Err(e) => Poll::Ready(Err(e)), } } None => Pin::new(&mut *me.seek).poll_complete(cx), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/split.rs
tokio/src/io/split.rs
//! Split a single value implementing `AsyncRead + AsyncWrite` into separate //! `AsyncRead` and `AsyncWrite` handles. //! //! To restore this read/write object from its `split::ReadHalf` and //! `split::WriteHalf` use `unsplit`. use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use std::fmt; use std::io; use std::pin::Pin; use std::sync::Arc; use std::sync::Mutex; use std::task::{Context, Poll}; cfg_io_util! { /// The readable half of a value returned from [`split`](split()). pub struct ReadHalf<T> { inner: Arc<Inner<T>>, } /// The writable half of a value returned from [`split`](split()). pub struct WriteHalf<T> { inner: Arc<Inner<T>>, } /// Splits a single value implementing `AsyncRead + AsyncWrite` into separate /// `AsyncRead` and `AsyncWrite` handles. /// /// To restore this read/write object from its `ReadHalf` and /// `WriteHalf` use [`unsplit`](ReadHalf::unsplit()). pub fn split<T>(stream: T) -> (ReadHalf<T>, WriteHalf<T>) where T: AsyncRead + AsyncWrite, { let is_write_vectored = stream.is_write_vectored(); let inner = Arc::new(Inner { stream: Mutex::new(stream), is_write_vectored, }); let rd = ReadHalf { inner: inner.clone(), }; let wr = WriteHalf { inner }; (rd, wr) } } struct Inner<T> { stream: Mutex<T>, is_write_vectored: bool, } impl<T> Inner<T> { fn with_lock<R>(&self, f: impl FnOnce(Pin<&mut T>) -> R) -> R { let mut guard = self.stream.lock().unwrap(); // safety: we do not move the stream. let stream = unsafe { Pin::new_unchecked(&mut *guard) }; f(stream) } } impl<T> ReadHalf<T> { /// Checks if this `ReadHalf` and some `WriteHalf` were split from the same /// stream. pub fn is_pair_of(&self, other: &WriteHalf<T>) -> bool { other.is_pair_of(self) } /// Reunites with a previously split `WriteHalf`. /// /// # Panics /// /// If this `ReadHalf` and the given `WriteHalf` do not originate from the /// same `split` operation this method will panic. /// This can be checked ahead of time by calling [`is_pair_of()`](Self::is_pair_of). #[track_caller] pub fn unsplit(self, wr: WriteHalf<T>) -> T where T: Unpin, { if self.is_pair_of(&wr) { drop(wr); let inner = Arc::try_unwrap(self.inner) .ok() .expect("`Arc::try_unwrap` failed"); inner.stream.into_inner().unwrap() } else { panic!("Unrelated `split::Write` passed to `split::Read::unsplit`.") } } } impl<T> WriteHalf<T> { /// Checks if this `WriteHalf` and some `ReadHalf` were split from the same /// stream. pub fn is_pair_of(&self, other: &ReadHalf<T>) -> bool { Arc::ptr_eq(&self.inner, &other.inner) } } impl<T: AsyncRead> AsyncRead for ReadHalf<T> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.inner.with_lock(|stream| stream.poll_read(cx, buf)) } } impl<T: AsyncWrite> AsyncWrite for WriteHalf<T> { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<Result<usize, io::Error>> { self.inner.with_lock(|stream| stream.poll_write(cx, buf)) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { self.inner.with_lock(|stream| stream.poll_flush(cx)) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { self.inner.with_lock(|stream| stream.poll_shutdown(cx)) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<Result<usize, io::Error>> { self.inner .with_lock(|stream| stream.poll_write_vectored(cx, bufs)) } fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored } } unsafe impl<T: Send> Send for ReadHalf<T> {} unsafe impl<T: Send> Send for WriteHalf<T> {} unsafe impl<T: Sync> Sync for ReadHalf<T> {} unsafe impl<T: Sync> Sync for WriteHalf<T> {} impl<T: fmt::Debug> fmt::Debug for ReadHalf<T> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("split::ReadHalf").finish() } } impl<T: fmt::Debug> fmt::Debug for WriteHalf<T> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("split::WriteHalf").finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/write_int.rs
tokio/src/io/util/write_int.rs
use crate::io::AsyncWrite; use bytes::BufMut; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; macro_rules! writer { ($name:ident, $ty:ty, $writer:ident) => { writer!($name, $ty, $writer, std::mem::size_of::<$ty>()); }; ($name:ident, $ty:ty, $writer:ident, $bytes:expr) => { pin_project! { #[doc(hidden)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct $name<W> { #[pin] dst: W, buf: [u8; $bytes], written: u8, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } impl<W> $name<W> { pub(crate) fn new(w: W, value: $ty) -> Self { let mut writer = Self { buf: [0; $bytes], written: 0, dst: w, _pin: PhantomPinned, }; BufMut::$writer(&mut &mut writer.buf[..], value); writer } } impl<W> Future for $name<W> where W: AsyncWrite, { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut me = self.project(); if *me.written == $bytes as u8 { return Poll::Ready(Ok(())); } while *me.written < $bytes as u8 { *me.written += match me .dst .as_mut() .poll_write(cx, &me.buf[*me.written as usize..]) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), Poll::Ready(Ok(0)) => { return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); } Poll::Ready(Ok(n)) => n as u8, }; } Poll::Ready(Ok(())) } } }; } macro_rules! writer8 { ($name:ident, $ty:ty) => { pin_project! { #[doc(hidden)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct $name<W> { #[pin] dst: W, byte: $ty, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } impl<W> $name<W> { pub(crate) fn new(dst: W, byte: $ty) -> Self { Self { dst, byte, _pin: PhantomPinned, } } } impl<W> Future for $name<W> where W: AsyncWrite, { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); let buf = [*me.byte as u8]; match me.dst.poll_write(cx, &buf[..]) { Poll::Pending => Poll::Pending, Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), Poll::Ready(Ok(0)) => Poll::Ready(Err(io::ErrorKind::WriteZero.into())), Poll::Ready(Ok(1)) => Poll::Ready(Ok(())), Poll::Ready(Ok(_)) => unreachable!(), } } } }; } writer8!(WriteU8, u8); writer8!(WriteI8, i8); writer!(WriteU16, u16, put_u16); writer!(WriteU32, u32, put_u32); writer!(WriteU64, u64, put_u64); writer!(WriteU128, u128, put_u128); writer!(WriteI16, i16, put_i16); writer!(WriteI32, i32, put_i32); writer!(WriteI64, i64, put_i64); writer!(WriteI128, i128, put_i128); writer!(WriteF32, f32, put_f32); writer!(WriteF64, f64, put_f64); writer!(WriteU16Le, u16, put_u16_le); writer!(WriteU32Le, u32, put_u32_le); writer!(WriteU64Le, u64, put_u64_le); writer!(WriteU128Le, u128, put_u128_le); writer!(WriteI16Le, i16, put_i16_le); writer!(WriteI32Le, i32, put_i32_le); writer!(WriteI64Le, i64, put_i64_le); writer!(WriteI128Le, i128, put_i128_le); writer!(WriteF32Le, f32, put_f32_le); writer!(WriteF64Le, f64, put_f64_le);
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/copy_bidirectional.rs
tokio/src/io/util/copy_bidirectional.rs
use super::copy::CopyBuffer; use crate::io::{AsyncRead, AsyncWrite}; use std::future::poll_fn; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; enum TransferState { Running(CopyBuffer), ShuttingDown(u64), Done(u64), } fn transfer_one_direction<A, B>( cx: &mut Context<'_>, state: &mut TransferState, r: &mut A, w: &mut B, ) -> Poll<io::Result<u64>> where A: AsyncRead + AsyncWrite + Unpin + ?Sized, B: AsyncRead + AsyncWrite + Unpin + ?Sized, { let mut r = Pin::new(r); let mut w = Pin::new(w); loop { match state { TransferState::Running(buf) => { let count = ready!(buf.poll_copy(cx, r.as_mut(), w.as_mut()))?; *state = TransferState::ShuttingDown(count); } TransferState::ShuttingDown(count) => { ready!(w.as_mut().poll_shutdown(cx))?; *state = TransferState::Done(*count); } TransferState::Done(count) => return Poll::Ready(Ok(*count)), } } } /// Copies data in both directions between `a` and `b`. /// /// This function returns a future that will read from both streams, /// writing any data read to the opposing stream. /// This happens in both directions concurrently. /// /// If an EOF is observed on one stream, [`shutdown()`] will be invoked on /// the other, and reading from that stream will stop. Copying of data in /// the other direction will continue. /// /// The future will complete successfully once both directions of communication has been shut down. /// A direction is shut down when the reader reports EOF, /// at which point [`shutdown()`] is called on the corresponding writer. When finished, /// it will return a tuple of the number of bytes copied from a to b /// and the number of bytes copied from b to a, in that order. /// /// It uses two 8 KB buffers for transferring bytes between `a` and `b` by default. /// To set your own buffers sizes use [`copy_bidirectional_with_sizes()`]. /// /// [`shutdown()`]: crate::io::AsyncWriteExt::shutdown /// /// # Errors /// /// The future will immediately return an error if any IO operation on `a` /// or `b` returns an error. Some data read from either stream may be lost (not /// written to the other stream) in this case. /// /// # Return value /// /// Returns a tuple of bytes copied `a` to `b` and bytes copied `b` to `a`. #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub async fn copy_bidirectional<A, B>(a: &mut A, b: &mut B) -> io::Result<(u64, u64)> where A: AsyncRead + AsyncWrite + Unpin + ?Sized, B: AsyncRead + AsyncWrite + Unpin + ?Sized, { copy_bidirectional_impl( a, b, CopyBuffer::new(super::DEFAULT_BUF_SIZE), CopyBuffer::new(super::DEFAULT_BUF_SIZE), ) .await } /// Copies data in both directions between `a` and `b` using buffers of the specified size. /// /// This method is the same as the [`copy_bidirectional()`], except that it allows you to set the /// size of the internal buffers used when copying data. #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub async fn copy_bidirectional_with_sizes<A, B>( a: &mut A, b: &mut B, a_to_b_buf_size: usize, b_to_a_buf_size: usize, ) -> io::Result<(u64, u64)> where A: AsyncRead + AsyncWrite + Unpin + ?Sized, B: AsyncRead + AsyncWrite + Unpin + ?Sized, { copy_bidirectional_impl( a, b, CopyBuffer::new(a_to_b_buf_size), CopyBuffer::new(b_to_a_buf_size), ) .await } async fn copy_bidirectional_impl<A, B>( a: &mut A, b: &mut B, a_to_b_buffer: CopyBuffer, b_to_a_buffer: CopyBuffer, ) -> io::Result<(u64, u64)> where A: AsyncRead + AsyncWrite + Unpin + ?Sized, B: AsyncRead + AsyncWrite + Unpin + ?Sized, { let mut a_to_b = TransferState::Running(a_to_b_buffer); let mut b_to_a = TransferState::Running(b_to_a_buffer); poll_fn(|cx| { let a_to_b = transfer_one_direction(cx, &mut a_to_b, a, b)?; let b_to_a = transfer_one_direction(cx, &mut b_to_a, b, a)?; // It is not a problem if ready! returns early because transfer_one_direction for the // other direction will keep returning TransferState::Done(count) in future calls to poll let a_to_b = ready!(a_to_b); let b_to_a = ready!(b_to_a); Poll::Ready(Ok((a_to_b, b_to_a))) }) .await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read_until.rs
tokio/src/io/util/read_until.rs
use crate::io::AsyncBufRead; use crate::util::memchr; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::mem; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// Future for the [`read_until`](crate::io::AsyncBufReadExt::read_until) method. /// The delimiter is included in the resulting vector. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct ReadUntil<'a, R: ?Sized> { reader: &'a mut R, delimiter: u8, buf: &'a mut Vec<u8>, // The number of bytes appended to buf. This can be less than buf.len() if // the buffer was not empty when the operation was started. read: usize, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } pub(crate) fn read_until<'a, R>( reader: &'a mut R, delimiter: u8, buf: &'a mut Vec<u8>, ) -> ReadUntil<'a, R> where R: AsyncBufRead + ?Sized + Unpin, { ReadUntil { reader, delimiter, buf, read: 0, _pin: PhantomPinned, } } pub(super) fn read_until_internal<R: AsyncBufRead + ?Sized>( mut reader: Pin<&mut R>, cx: &mut Context<'_>, delimiter: u8, buf: &mut Vec<u8>, read: &mut usize, ) -> Poll<io::Result<usize>> { loop { let (done, used) = { let available = ready!(reader.as_mut().poll_fill_buf(cx))?; if let Some(i) = memchr::memchr(delimiter, available) { buf.extend_from_slice(&available[..=i]); (true, i + 1) } else { buf.extend_from_slice(available); (false, available.len()) } }; reader.as_mut().consume(used); *read += used; if done || used == 0 { return Poll::Ready(Ok(mem::replace(read, 0))); } } } impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadUntil<'_, R> { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); read_until_internal(Pin::new(*me.reader), cx, *me.delimiter, me.buf, me.read) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/repeat.rs
tokio/src/io/util/repeat.rs
use bytes::BufMut; use crate::io::util::poll_proceed_and_make_progress; use crate::io::{AsyncRead, ReadBuf}; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; cfg_io_util! { /// An async reader which yields one byte over and over and over and over and /// over and... /// /// This struct is generally created by calling [`repeat`][repeat]. Please /// see the documentation of `repeat()` for more details. /// /// This is an asynchronous version of [`std::io::Repeat`][std]. /// /// [repeat]: fn@repeat /// [std]: std::io::Repeat #[derive(Debug)] pub struct Repeat { byte: u8, } /// Creates an instance of an async reader that infinitely repeats one byte. /// /// All reads from this reader will succeed by filling the specified buffer with /// the given byte. /// /// This is an asynchronous version of [`std::io::repeat`][std]. /// /// [std]: std::io::repeat /// /// # Examples /// /// ``` /// use tokio::io::{self, AsyncReadExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut buffer = [0; 3]; /// io::repeat(0b101).read_exact(&mut buffer).await.unwrap(); /// assert_eq!(buffer, [0b101, 0b101, 0b101]); /// # } /// ``` pub fn repeat(byte: u8) -> Repeat { Repeat { byte } } } impl AsyncRead for Repeat { #[inline] fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); buf.put_bytes(self.byte, buf.remaining()); Poll::Ready(Ok(())) } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<Repeat>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/flush.rs
tokio/src/io/util/flush.rs
use crate::io::AsyncWrite; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// A future used to fully flush an I/O object. /// /// Created by the [`AsyncWriteExt::flush`][flush] function. /// /// [flush]: crate::io::AsyncWriteExt::flush #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Flush<'a, A: ?Sized> { a: &'a mut A, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } /// Creates a future which will entirely flush an I/O object. pub(super) fn flush<A>(a: &mut A) -> Flush<'_, A> where A: AsyncWrite + Unpin + ?Sized, { Flush { a, _pin: PhantomPinned, } } impl<A> Future for Flush<'_, A> where A: AsyncWrite + Unpin + ?Sized, { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); Pin::new(&mut *me.a).poll_flush(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/shutdown.rs
tokio/src/io/util/shutdown.rs
use crate::io::AsyncWrite; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// A future used to shutdown an I/O object. /// /// Created by the [`AsyncWriteExt::shutdown`][shutdown] function. /// [shutdown]: [`crate::io::AsyncWriteExt::shutdown`] #[must_use = "futures do nothing unless you `.await` or poll them"] #[derive(Debug)] pub struct Shutdown<'a, A: ?Sized> { a: &'a mut A, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } /// Creates a future which will shutdown an I/O object. pub(super) fn shutdown<A>(a: &mut A) -> Shutdown<'_, A> where A: AsyncWrite + Unpin + ?Sized, { Shutdown { a, _pin: PhantomPinned, } } impl<A> Future for Shutdown<'_, A> where A: AsyncWrite + Unpin + ?Sized, { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); Pin::new(me.a).poll_shutdown(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/async_buf_read_ext.rs
tokio/src/io/util/async_buf_read_ext.rs
use crate::io::util::fill_buf::{fill_buf, FillBuf}; use crate::io::util::lines::{lines, Lines}; use crate::io::util::read_line::{read_line, ReadLine}; use crate::io::util::read_until::{read_until, ReadUntil}; use crate::io::util::split::{split, Split}; use crate::io::AsyncBufRead; cfg_io_util! { /// An extension trait which adds utility methods to [`AsyncBufRead`] types. /// /// [`AsyncBufRead`]: crate::io::AsyncBufRead pub trait AsyncBufReadExt: AsyncBufRead { /// Reads all bytes into `buf` until the delimiter `byte` or EOF is reached. /// /// Equivalent to: /// /// ```ignore /// async fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize>; /// ``` /// /// This function will read bytes from the underlying stream until the /// delimiter or EOF is found. Once found, all bytes up to, and including, /// the delimiter (if found) will be appended to `buf`. /// /// If successful, this function will return the total number of bytes read. /// /// If this function returns `Ok(0)`, the stream has reached EOF. /// /// # Errors /// /// This function will ignore all instances of [`ErrorKind::Interrupted`] and /// will otherwise return any errors returned by [`fill_buf`]. /// /// If an I/O error is encountered then all bytes read so far will be /// present in `buf` and its length will have been adjusted appropriately. /// /// [`fill_buf`]: AsyncBufRead::poll_fill_buf /// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted /// /// # Cancel safety /// /// If the method is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then some data may have been partially read. Any /// partially read bytes are appended to `buf`, and the method can be /// called again to continue reading until `byte`. /// /// This method returns the total number of bytes read. If you cancel /// the call to `read_until` and then call it again to continue reading, /// the counter is reset. /// /// # Examples /// /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In /// this example, we use [`Cursor`] to read all the bytes in a byte slice /// in hyphen delimited segments: /// /// [`Cursor`]: std::io::Cursor /// /// ``` /// use tokio::io::AsyncBufReadExt; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut cursor = Cursor::new(b"lorem-ipsum"); /// let mut buf = vec![]; /// /// // cursor is at 'l' /// let num_bytes = cursor.read_until(b'-', &mut buf) /// .await /// .expect("reading from cursor won't fail"); /// /// assert_eq!(num_bytes, 6); /// assert_eq!(buf, b"lorem-"); /// buf.clear(); /// /// // cursor is at 'i' /// let num_bytes = cursor.read_until(b'-', &mut buf) /// .await /// .expect("reading from cursor won't fail"); /// /// assert_eq!(num_bytes, 5); /// assert_eq!(buf, b"ipsum"); /// buf.clear(); /// /// // cursor is at EOF /// let num_bytes = cursor.read_until(b'-', &mut buf) /// .await /// .expect("reading from cursor won't fail"); /// assert_eq!(num_bytes, 0); /// assert_eq!(buf, b""); /// # } /// ``` fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec<u8>) -> ReadUntil<'a, Self> where Self: Unpin, { read_until(self, byte, buf) } /// Reads all bytes until a newline (the 0xA byte) is reached, and append /// them to the provided buffer. /// /// Equivalent to: /// /// ```ignore /// async fn read_line(&mut self, buf: &mut String) -> io::Result<usize>; /// ``` /// /// This function will read bytes from the underlying stream until the /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes /// up to, and including, the delimiter (if found) will be appended to /// `buf`. /// /// If successful, this function will return the total number of bytes read. /// /// If this function returns `Ok(0)`, the stream has reached EOF. /// /// # Errors /// /// This function has the same error semantics as [`read_until`] and will /// also return an error if the read bytes are not valid UTF-8. If an I/O /// error is encountered then `buf` may contain some bytes already read in /// the event that all data read so far was valid UTF-8. /// /// [`read_until`]: AsyncBufReadExt::read_until /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may have been partially /// read, and this data is lost. There are no guarantees regarding the /// contents of `buf` when the call is cancelled. The current /// implementation replaces `buf` with the empty string, but this may /// change in the future. /// /// This function does not behave like [`read_until`] because of the /// requirement that a string contains only valid utf-8. If you need a /// cancellation safe `read_line`, there are three options: /// /// * Call [`read_until`] with a newline character and manually perform the utf-8 check. /// * The stream returned by [`lines`] has a cancellation safe /// [`next_line`] method. /// * Use [`tokio_util::codec::LinesCodec`][LinesCodec]. /// /// [LinesCodec]: https://docs.rs/tokio-util/latest/tokio_util/codec/struct.LinesCodec.html /// [`read_until`]: Self::read_until /// [`lines`]: Self::lines /// [`next_line`]: crate::io::Lines::next_line /// /// # Examples /// /// [`std::io::Cursor`][`Cursor`] is a type that implements /// `AsyncBufRead`. In this example, we use [`Cursor`] to read all the /// lines in a byte slice: /// /// [`Cursor`]: std::io::Cursor /// /// ``` /// use tokio::io::AsyncBufReadExt; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut cursor = Cursor::new(b"foo\nbar"); /// let mut buf = String::new(); /// /// // cursor is at 'f' /// let num_bytes = cursor.read_line(&mut buf) /// .await /// .expect("reading from cursor won't fail"); /// /// assert_eq!(num_bytes, 4); /// assert_eq!(buf, "foo\n"); /// buf.clear(); /// /// // cursor is at 'b' /// let num_bytes = cursor.read_line(&mut buf) /// .await /// .expect("reading from cursor won't fail"); /// /// assert_eq!(num_bytes, 3); /// assert_eq!(buf, "bar"); /// buf.clear(); /// /// // cursor is at EOF /// let num_bytes = cursor.read_line(&mut buf) /// .await /// .expect("reading from cursor won't fail"); /// /// assert_eq!(num_bytes, 0); /// assert_eq!(buf, ""); /// # } /// ``` fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self> where Self: Unpin, { read_line(self, buf) } /// Returns a stream of the contents of this reader split on the byte /// `byte`. /// /// This method is the asynchronous equivalent to /// [`BufRead::split`](std::io::BufRead::split). /// /// The stream returned from this function will yield instances of /// [`io::Result`]`<`[`Option`]`<`[`Vec<u8>`]`>>`. Each vector returned will *not* have /// the delimiter byte at the end. /// /// [`io::Result`]: std::io::Result /// [`Option`]: core::option::Option /// [`Vec<u8>`]: std::vec::Vec /// /// # Errors /// /// Each item of the stream has the same error semantics as /// [`AsyncBufReadExt::read_until`](AsyncBufReadExt::read_until). /// /// # Examples /// /// ``` /// # use tokio::io::AsyncBufRead; /// use tokio::io::AsyncBufReadExt; /// /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { /// let mut segments = my_buf_read.split(b'f'); /// /// while let Some(segment) = segments.next_segment().await? { /// println!("length = {}", segment.len()) /// } /// # Ok(()) /// # } /// ``` fn split(self, byte: u8) -> Split<Self> where Self: Sized + Unpin, { split(self, byte) } /// Returns the contents of the internal buffer, filling it with more /// data from the inner reader if it is empty. /// /// This function is a lower-level call. It needs to be paired with the /// [`consume`] method to function properly. When calling this method, /// none of the contents will be "read" in the sense that later calling /// `read` may return the same contents. As such, [`consume`] must be /// called with the number of bytes that are consumed from this buffer /// to ensure that the bytes are never returned twice. /// /// An empty buffer returned indicates that the stream has reached EOF. /// /// Equivalent to: /// /// ```ignore /// async fn fill_buf(&mut self) -> io::Result<&[u8]>; /// ``` /// /// # Errors /// /// This function will return an I/O error if the underlying reader was /// read, but returned an error. /// /// # Cancel safety /// /// This method is cancel safe. If you use it as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no data was read. /// /// [`consume`]: crate::io::AsyncBufReadExt::consume fn fill_buf(&mut self) -> FillBuf<'_, Self> where Self: Unpin, { fill_buf(self) } /// Tells this buffer that `amt` bytes have been consumed from the /// buffer, so they should no longer be returned in calls to [`read`]. /// /// This function is a lower-level call. It needs to be paired with the /// [`fill_buf`] method to function properly. This function does not /// perform any I/O, it simply informs this object that some amount of /// its buffer, returned from [`fill_buf`], has been consumed and should /// no longer be returned. As such, this function may do odd things if /// [`fill_buf`] isn't called before calling it. /// /// The `amt` must be less than the number of bytes in the buffer /// returned by [`fill_buf`]. /// /// [`read`]: crate::io::AsyncReadExt::read /// [`fill_buf`]: crate::io::AsyncBufReadExt::fill_buf fn consume(&mut self, amt: usize) where Self: Unpin, { std::pin::Pin::new(self).consume(amt); } /// Returns a stream over the lines of this reader. /// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines). /// /// The stream returned from this function will yield instances of /// [`io::Result`]`<`[`Option`]`<`[`String`]`>>`. Each string returned will *not* have a newline /// byte (the 0xA byte) or `CRLF` (0xD, 0xA bytes) at the end. /// /// [`io::Result`]: std::io::Result /// [`Option`]: core::option::Option /// [`String`]: String /// /// # Errors /// /// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`]. /// /// # Examples /// /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In /// this example, we use [`Cursor`] to iterate over all the lines in a byte /// slice. /// /// [`Cursor`]: std::io::Cursor /// /// ``` /// use tokio::io::AsyncBufReadExt; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let cursor = Cursor::new(b"lorem\nipsum\r\ndolor"); /// /// let mut lines = cursor.lines(); /// /// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("lorem"))); /// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("ipsum"))); /// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("dolor"))); /// assert_eq!(lines.next_line().await.unwrap(), None); /// # } /// ``` /// /// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line fn lines(self) -> Lines<Self> where Self: Sized, { lines(self) } } } impl<R: AsyncBufRead + ?Sized> AsyncBufReadExt for R {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/async_write_ext.rs
tokio/src/io/util/async_write_ext.rs
use crate::io::util::flush::{flush, Flush}; use crate::io::util::shutdown::{shutdown, Shutdown}; use crate::io::util::write::{write, Write}; use crate::io::util::write_all::{write_all, WriteAll}; use crate::io::util::write_all_buf::{write_all_buf, WriteAllBuf}; use crate::io::util::write_buf::{write_buf, WriteBuf}; use crate::io::util::write_int::{WriteF32, WriteF32Le, WriteF64, WriteF64Le}; use crate::io::util::write_int::{ WriteI128, WriteI128Le, WriteI16, WriteI16Le, WriteI32, WriteI32Le, WriteI64, WriteI64Le, WriteI8, }; use crate::io::util::write_int::{ WriteU128, WriteU128Le, WriteU16, WriteU16Le, WriteU32, WriteU32Le, WriteU64, WriteU64Le, WriteU8, }; use crate::io::util::write_vectored::{write_vectored, WriteVectored}; use crate::io::AsyncWrite; use std::io::IoSlice; use bytes::Buf; cfg_io_util! { /// Defines numeric writer. macro_rules! write_impl { ( $( $(#[$outer:meta])* fn $name:ident(&mut self, n: $ty:ty) -> $($fut:ident)*; )* ) => { $( $(#[$outer])* fn $name(&mut self, n: $ty) -> $($fut)*<&mut Self> where Self: Unpin { $($fut)*::new(self, n) } )* } } /// Writes bytes to a sink. /// /// Implemented as an extension trait, adding utility methods to all /// [`AsyncWrite`] types. Callers will tend to import this trait instead of /// [`AsyncWrite`]. /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncWriteExt}; /// use tokio::fs::File; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let data = b"some bytes"; /// /// let mut pos = 0; /// let mut buffer = File::create("foo.txt").await?; /// /// while pos < data.len() { /// let bytes_written = buffer.write(&data[pos..]).await?; /// pos += bytes_written; /// } /// /// Ok(()) /// } /// # } /// ``` /// /// See [module][crate::io] documentation for more details. /// /// [`AsyncWrite`]: AsyncWrite pub trait AsyncWriteExt: AsyncWrite { /// Writes a buffer into this writer, returning how many bytes were /// written. /// /// Equivalent to: /// /// ```ignore /// async fn write(&mut self, buf: &[u8]) -> io::Result<usize>; /// ``` /// /// This function will attempt to write the entire contents of `buf`, but /// the entire write may not succeed, or the write may also generate an /// error. A call to `write` represents *at most one* attempt to write to /// any wrapped object. /// /// # Return /// /// If the return value is `Ok(n)` then it must be guaranteed that `n <= /// buf.len()`. A return value of `0` typically means that the /// underlying object is no longer able to accept bytes and will likely /// not be able to in the future as well, or that the buffer provided is /// empty. /// /// # Errors /// /// Each call to `write` may generate an I/O error indicating that the /// operation could not be completed. If an error is returned then no bytes /// in the buffer were written to this writer. /// /// It is **not** considered an error if the entire buffer could not be /// written to this writer. /// /// # Cancel safety /// /// This method is cancellation safe in the sense that if it is used as /// the event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then it is guaranteed that no data was /// written to this `AsyncWrite`. /// /// # Examples /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncWriteExt}; /// use tokio::fs::File; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut file = File::create("foo.txt").await?; /// /// // Writes some prefix of the byte string, not necessarily all of it. /// file.write(b"some bytes").await?; /// file.flush().await?; /// Ok(()) /// } /// # } /// ``` fn write<'a>(&'a mut self, src: &'a [u8]) -> Write<'a, Self> where Self: Unpin, { write(self, src) } /// Like [`write`], except that it writes from a slice of buffers. /// /// Equivalent to: /// /// ```ignore /// async fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize>; /// ``` /// /// See [`AsyncWrite::poll_write_vectored`] for more details. /// /// # Cancel safety /// /// This method is cancellation safe in the sense that if it is used as /// the event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then it is guaranteed that no data was /// written to this `AsyncWrite`. /// /// # Examples /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncWriteExt}; /// use tokio::fs::File; /// use std::io::IoSlice; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut file = File::create("foo.txt").await?; /// /// let bufs: &[_] = &[ /// IoSlice::new(b"hello"), /// IoSlice::new(b" "), /// IoSlice::new(b"world"), /// ]; /// /// file.write_vectored(&bufs).await?; /// file.flush().await?; /// /// Ok(()) /// } /// # } /// ``` /// /// [`write`]: AsyncWriteExt::write fn write_vectored<'a, 'b>(&'a mut self, bufs: &'a [IoSlice<'b>]) -> WriteVectored<'a, 'b, Self> where Self: Unpin, { write_vectored(self, bufs) } /// Writes a buffer into this writer, advancing the buffer's internal /// cursor. /// /// Equivalent to: /// /// ```ignore /// async fn write_buf<B: Buf>(&mut self, buf: &mut B) -> io::Result<usize>; /// ``` /// /// This function will attempt to write the entire contents of `buf`, but /// the entire write may not succeed, or the write may also generate an /// error. After the operation completes, the buffer's /// internal cursor is advanced by the number of bytes written. A /// subsequent call to `write_buf` using the **same** `buf` value will /// resume from the point that the first call to `write_buf` completed. /// A call to `write_buf` represents *at most one* attempt to write to any /// wrapped object. /// /// # Return /// /// If the return value is `Ok(n)` then it must be guaranteed that `n <= /// buf.len()`. A return value of `0` typically means that the /// underlying object is no longer able to accept bytes and will likely /// not be able to in the future as well, or that the buffer provided is /// empty. /// /// # Errors /// /// Each call to `write` may generate an I/O error indicating that the /// operation could not be completed. If an error is returned then no bytes /// in the buffer were written to this writer. /// /// It is **not** considered an error if the entire buffer could not be /// written to this writer. /// /// # Cancel safety /// /// This method is cancellation safe in the sense that if it is used as /// the event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then it is guaranteed that no data was /// written to this `AsyncWrite`. /// /// # Examples /// /// [`File`] implements [`AsyncWrite`] and [`Cursor`]`<&[u8]>` implements [`Buf`]: /// /// [`File`]: crate::fs::File /// [`Buf`]: bytes::Buf /// [`Cursor`]: std::io::Cursor /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncWriteExt}; /// use tokio::fs::File; /// /// use bytes::Buf; /// use std::io::Cursor; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut file = File::create("foo.txt").await?; /// let mut buffer = Cursor::new(b"data to write"); /// /// // Loop until the entire contents of the buffer are written to /// // the file. /// while buffer.has_remaining() { /// // Writes some prefix of the byte string, not necessarily /// // all of it. /// file.write_buf(&mut buffer).await?; /// } /// file.flush().await?; /// /// Ok(()) /// } /// # } /// ``` fn write_buf<'a, B>(&'a mut self, src: &'a mut B) -> WriteBuf<'a, Self, B> where Self: Sized + Unpin, B: Buf, { write_buf(self, src) } /// Attempts to write an entire buffer into this writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_all_buf(&mut self, buf: impl Buf) -> Result<(), io::Error> { /// while buf.has_remaining() { /// self.write_buf(&mut buf).await?; /// } /// Ok(()) /// } /// ``` /// /// This method will continuously call [`write`] until /// [`buf.has_remaining()`](bytes::Buf::has_remaining) returns false. This method will not /// return until the entire buffer has been successfully written or an error occurs. The /// first error generated will be returned. /// /// The buffer is advanced after each chunk is successfully written. After failure, /// `src.chunk()` will return the chunk that failed to write. /// /// # Cancel safety /// /// If `write_all_buf` is used as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then the data in the provided buffer may have been /// partially written. However, it is guaranteed that the provided /// buffer has been [advanced] by the amount of bytes that have been /// partially written. /// /// # Examples /// /// [`File`] implements [`AsyncWrite`] and [`Cursor`]`<&[u8]>` implements [`Buf`]: /// /// [`File`]: crate::fs::File /// [`Buf`]: bytes::Buf /// [`Cursor`]: std::io::Cursor /// [advanced]: bytes::Buf::advance /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncWriteExt}; /// use tokio::fs::File; /// /// use std::io::Cursor; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut file = File::create("foo.txt").await?; /// let mut buffer = Cursor::new(b"data to write"); /// /// file.write_all_buf(&mut buffer).await?; /// file.flush().await?; /// Ok(()) /// } /// # } /// ``` /// /// [`write`]: AsyncWriteExt::write fn write_all_buf<'a, B>(&'a mut self, src: &'a mut B) -> WriteAllBuf<'a, Self, B> where Self: Sized + Unpin, B: Buf, { write_all_buf(self, src) } /// Attempts to write an entire buffer into this writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_all(&mut self, buf: &[u8]) -> io::Result<()>; /// ``` /// /// This method will continuously call [`write`] until there is no more data /// to be written. This method will not return until the entire buffer /// has been successfully written or such an error occurs. The first /// error generated from this method will be returned. /// /// # Cancel safety /// /// This method is not cancellation safe. If it is used as the event /// in a [`tokio::select!`](crate::select) statement and some other /// branch completes first, then the provided buffer may have been /// partially written, but future calls to `write_all` will start over /// from the beginning of the buffer. /// /// # Errors /// /// This function will return the first error that [`write`] returns. /// /// # Examples /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncWriteExt}; /// use tokio::fs::File; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut file = File::create("foo.txt").await?; /// /// file.write_all(b"some bytes").await?; /// file.flush().await?; /// Ok(()) /// } /// # } /// ``` /// /// [`write`]: AsyncWriteExt::write fn write_all<'a>(&'a mut self, src: &'a [u8]) -> WriteAll<'a, Self> where Self: Unpin, { write_all(self, src) } write_impl! { /// Writes an unsigned 8-bit integer to the underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_u8(&mut self, n: u8) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write unsigned 8 bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_u8(2).await?; /// writer.write_u8(5).await?; /// /// assert_eq!(writer, b"\x02\x05"); /// Ok(()) /// # } /// ``` fn write_u8(&mut self, n: u8) -> WriteU8; /// Writes a signed 8-bit integer to the underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_i8(&mut self, n: i8) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write signed 8 bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_i8(-2).await?; /// writer.write_i8(126).await?; /// /// assert_eq!(writer, b"\xFE\x7E"); /// Ok(()) /// # } /// ``` fn write_i8(&mut self, n: i8) -> WriteI8; /// Writes an unsigned 16-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_u16(&mut self, n: u16) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write unsigned 16-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_u16(517).await?; /// writer.write_u16(768).await?; /// /// assert_eq!(writer, b"\x02\x05\x03\x00"); /// Ok(()) /// # } /// ``` fn write_u16(&mut self, n: u16) -> WriteU16; /// Writes a signed 16-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_i16(&mut self, n: i16) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write signed 16-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_i16(193).await?; /// writer.write_i16(-132).await?; /// /// assert_eq!(writer, b"\x00\xc1\xff\x7c"); /// Ok(()) /// # } /// ``` fn write_i16(&mut self, n: i16) -> WriteI16; /// Writes an unsigned 32-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_u32(&mut self, n: u32) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write unsigned 32-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_u32(267).await?; /// writer.write_u32(1205419366).await?; /// /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); /// Ok(()) /// # } /// ``` fn write_u32(&mut self, n: u32) -> WriteU32; /// Writes a signed 32-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_i32(&mut self, n: i32) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write signed 32-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_i32(267).await?; /// writer.write_i32(1205419366).await?; /// /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); /// Ok(()) /// # } /// ``` fn write_i32(&mut self, n: i32) -> WriteI32; /// Writes an unsigned 64-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_u64(&mut self, n: u64) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write unsigned 64-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_u64(918733457491587).await?; /// writer.write_u64(143).await?; /// /// assert_eq!(writer, b"\x00\x03\x43\x95\x4d\x60\x86\x83\x00\x00\x00\x00\x00\x00\x00\x8f"); /// Ok(()) /// # } /// ``` fn write_u64(&mut self, n: u64) -> WriteU64; /// Writes an signed 64-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_i64(&mut self, n: i64) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write signed 64-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_i64(i64::MIN).await?; /// writer.write_i64(i64::MAX).await?; /// /// assert_eq!(writer, b"\x80\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff"); /// Ok(()) /// # } /// ``` fn write_i64(&mut self, n: i64) -> WriteI64; /// Writes an unsigned 128-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_u128(&mut self, n: u128) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write unsigned 128-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_u128(16947640962301618749969007319746179).await?; /// /// assert_eq!(writer, vec![ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 /// ]); /// Ok(()) /// # } /// ``` fn write_u128(&mut self, n: u128) -> WriteU128; /// Writes an signed 128-bit integer in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_i128(&mut self, n: i128) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write signed 128-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_i128(i128::MIN).await?; /// /// assert_eq!(writer, vec![ /// 0x80, 0, 0, 0, 0, 0, 0, 0, /// 0, 0, 0, 0, 0, 0, 0, 0 /// ]); /// Ok(()) /// # } /// ``` fn write_i128(&mut self, n: i128) -> WriteI128; /// Writes an 32-bit floating point type in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_f32(&mut self, n: f32) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write 32-bit floating point type to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_f32(f32::MIN).await?; /// /// assert_eq!(writer, vec![0xff, 0x7f, 0xff, 0xff]); /// Ok(()) /// # } /// ``` fn write_f32(&mut self, n: f32) -> WriteF32; /// Writes an 64-bit floating point type in big-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_f64(&mut self, n: f64) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write 64-bit floating point type to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_f64(f64::MIN).await?; /// /// assert_eq!(writer, vec![ /// 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff /// ]); /// Ok(()) /// # } /// ``` fn write_f64(&mut self, n: f64) -> WriteF64; /// Writes an unsigned 16-bit integer in little-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_u16_le(&mut self, n: u16) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write unsigned 16-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut writer = Vec::new(); /// /// writer.write_u16_le(517).await?; /// writer.write_u16_le(768).await?; /// /// assert_eq!(writer, b"\x05\x02\x00\x03"); /// Ok(()) /// # } /// ``` fn write_u16_le(&mut self, n: u16) -> WriteU16Le; /// Writes a signed 16-bit integer in little-endian order to the /// underlying writer. /// /// Equivalent to: /// /// ```ignore /// async fn write_i16_le(&mut self, n: i16) -> io::Result<()>; /// ``` /// /// It is recommended to use a buffered writer to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::write_all`]. /// /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all /// /// # Examples /// /// Write signed 16-bit integers to a `AsyncWrite`: /// /// ```rust /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")]
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/write.rs
tokio/src/io/util/write.rs
use crate::io::AsyncWrite; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// A future to write some of the buffer to an `AsyncWrite`. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Write<'a, W: ?Sized> { writer: &'a mut W, buf: &'a [u8], // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } /// Tries to write some bytes from the given `buf` to the writer in an /// asynchronous manner, returning a future. pub(crate) fn write<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> Write<'a, W> where W: AsyncWrite + Unpin + ?Sized, { Write { writer, buf, _pin: PhantomPinned, } } impl<W> Future for Write<'_, W> where W: AsyncWrite + Unpin + ?Sized, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { let me = self.project(); Pin::new(&mut *me.writer).poll_write(cx, me.buf) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read_int.rs
tokio/src/io/util/read_int.rs
use crate::io::{AsyncRead, ReadBuf}; use bytes::Buf; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::io::ErrorKind::UnexpectedEof; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; macro_rules! reader { ($name:ident, $ty:ty, $reader:ident) => { reader!($name, $ty, $reader, std::mem::size_of::<$ty>()); }; ($name:ident, $ty:ty, $reader:ident, $bytes:expr) => { pin_project! { #[doc(hidden)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct $name<R> { #[pin] src: R, buf: [u8; $bytes], read: u8, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } impl<R> $name<R> { pub(crate) fn new(src: R) -> Self { $name { src, buf: [0; $bytes], read: 0, _pin: PhantomPinned, } } } impl<R> Future for $name<R> where R: AsyncRead, { type Output = io::Result<$ty>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut me = self.project(); if *me.read == $bytes as u8 { return Poll::Ready(Ok(Buf::$reader(&mut &me.buf[..]))); } while *me.read < $bytes as u8 { let mut buf = ReadBuf::new(&mut me.buf[*me.read as usize..]); *me.read += match me.src.as_mut().poll_read(cx, &mut buf) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), Poll::Ready(Ok(())) => { let n = buf.filled().len(); if n == 0 { return Poll::Ready(Err(UnexpectedEof.into())); } n as u8 } }; } let num = Buf::$reader(&mut &me.buf[..]); Poll::Ready(Ok(num)) } } }; } macro_rules! reader8 { ($name:ident, $ty:ty) => { pin_project! { /// Future returned from `read_u8` #[doc(hidden)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct $name<R> { #[pin] reader: R, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } impl<R> $name<R> { pub(crate) fn new(reader: R) -> $name<R> { $name { reader, _pin: PhantomPinned, } } } impl<R> Future for $name<R> where R: AsyncRead, { type Output = io::Result<$ty>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); let mut buf = [0; 1]; let mut buf = ReadBuf::new(&mut buf); match me.reader.poll_read(cx, &mut buf) { Poll::Pending => Poll::Pending, Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), Poll::Ready(Ok(())) => { if buf.filled().len() == 0 { return Poll::Ready(Err(UnexpectedEof.into())); } Poll::Ready(Ok(buf.filled()[0] as $ty)) } } } } }; } reader8!(ReadU8, u8); reader8!(ReadI8, i8); reader!(ReadU16, u16, get_u16); reader!(ReadU32, u32, get_u32); reader!(ReadU64, u64, get_u64); reader!(ReadU128, u128, get_u128); reader!(ReadI16, i16, get_i16); reader!(ReadI32, i32, get_i32); reader!(ReadI64, i64, get_i64); reader!(ReadI128, i128, get_i128); reader!(ReadF32, f32, get_f32); reader!(ReadF64, f64, get_f64); reader!(ReadU16Le, u16, get_u16_le); reader!(ReadU32Le, u32, get_u32_le); reader!(ReadU64Le, u64, get_u64_le); reader!(ReadU128Le, u128, get_u128_le); reader!(ReadI16Le, i16, get_i16_le); reader!(ReadI32Le, i32, get_i32_le); reader!(ReadI64Le, i64, get_i64_le); reader!(ReadI128Le, i128, get_i128_le); reader!(ReadF32Le, f32, get_f32_le); reader!(ReadF64Le, f64, get_f64_le);
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/empty.rs
tokio/src/io/util/empty.rs
use crate::io::util::poll_proceed_and_make_progress; use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; use std::fmt; use std::io::{self, SeekFrom}; use std::pin::Pin; use std::task::{ready, Context, Poll}; cfg_io_util! { /// `Empty` ignores any data written via [`AsyncWrite`], and will always be empty /// (returning zero bytes) when read via [`AsyncRead`]. /// /// This struct is generally created by calling [`empty`]. Please see /// the documentation of [`empty()`][`empty`] for more details. /// /// This is an asynchronous version of [`std::io::empty`][std]. /// /// [`empty`]: fn@empty /// [std]: std::io::empty pub struct Empty { _p: (), } /// Creates a value that is always at EOF for reads, and ignores all data written. /// /// All writes on the returned instance will return `Poll::Ready(Ok(buf.len()))` /// and the contents of the buffer will not be inspected. /// /// All reads from the returned instance will return `Poll::Ready(Ok(0))`. /// /// This is an asynchronous version of [`std::io::empty`][std]. /// /// [std]: std::io::empty /// /// # Examples /// /// A slightly sad example of not reading anything into a buffer: /// /// ``` /// use tokio::io::{self, AsyncReadExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut buffer = String::new(); /// io::empty().read_to_string(&mut buffer).await.unwrap(); /// assert!(buffer.is_empty()); /// # } /// ``` /// /// A convoluted way of getting the length of a buffer: /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let buffer = vec![1, 2, 3, 5, 8]; /// let num_bytes = io::empty().write(&buffer).await.unwrap(); /// assert_eq!(num_bytes, 5); /// # } /// ``` pub fn empty() -> Empty { Empty { _p: () } } } impl AsyncRead for Empty { #[inline] fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, _: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } } impl AsyncBufRead for Empty { #[inline] fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(&[])) } #[inline] fn consume(self: Pin<&mut Self>, _: usize) {} } impl AsyncWrite for Empty { #[inline] fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(buf.len())) } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } #[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } #[inline] fn is_write_vectored(&self) -> bool { true } #[inline] fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<Result<usize, io::Error>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); let num_bytes = bufs.iter().map(|b| b.len()).sum(); Poll::Ready(Ok(num_bytes)) } } impl AsyncSeek for Empty { #[inline] fn start_seek(self: Pin<&mut Self>, _position: SeekFrom) -> io::Result<()> { Ok(()) } #[inline] fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(0)) } } impl fmt::Debug for Empty { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Empty { .. }") } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<Empty>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/take.rs
tokio/src/io/util/take.rs
use crate::io::{AsyncBufRead, AsyncRead, ReadBuf}; use pin_project_lite::pin_project; use std::convert::TryFrom; use std::pin::Pin; use std::task::{ready, Context, Poll}; use std::{cmp, io}; pin_project! { /// Stream for the [`take`](super::AsyncReadExt::take) method. #[derive(Debug)] #[must_use = "streams do nothing unless you `.await` or poll them"] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct Take<R> { #[pin] inner: R, // Add '_' to avoid conflicts with `limit` method. limit_: u64, } } pub(super) fn take<R: AsyncRead>(inner: R, limit: u64) -> Take<R> { Take { inner, limit_: limit, } } impl<R: AsyncRead> Take<R> { /// Returns the remaining number of bytes that can be /// read before this instance will return EOF. /// /// # Note /// /// This instance may reach `EOF` after reading fewer bytes than indicated by /// this method if the underlying [`AsyncRead`] instance reaches EOF. pub fn limit(&self) -> u64 { self.limit_ } /// Sets the number of bytes that can be read before this instance will /// return EOF. This is the same as constructing a new `Take` instance, so /// the amount of bytes read and the previous limit value don't matter when /// calling this method. pub fn set_limit(&mut self, limit: u64) { self.limit_ = limit; } /// Gets a reference to the underlying reader. pub fn get_ref(&self) -> &R { &self.inner } /// Gets a mutable reference to the underlying reader. /// /// Care should be taken to avoid modifying the internal I/O state of the /// underlying reader as doing so may corrupt the internal limit of this /// `Take`. pub fn get_mut(&mut self) -> &mut R { &mut self.inner } /// Gets a pinned mutable reference to the underlying reader. /// /// Care should be taken to avoid modifying the internal I/O state of the /// underlying reader as doing so may corrupt the internal limit of this /// `Take`. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().inner } /// Consumes the `Take`, returning the wrapped reader. pub fn into_inner(self) -> R { self.inner } } impl<R: AsyncRead> AsyncRead for Take<R> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<Result<(), io::Error>> { if self.limit_ == 0 { return Poll::Ready(Ok(())); } let me = self.project(); let mut b = buf.take(usize::try_from(*me.limit_).unwrap_or(usize::MAX)); let buf_ptr = b.filled().as_ptr(); ready!(me.inner.poll_read(cx, &mut b))?; assert_eq!(b.filled().as_ptr(), buf_ptr); let n = b.filled().len(); // We need to update the original ReadBuf unsafe { buf.assume_init(n); } buf.advance(n); *me.limit_ -= n as u64; Poll::Ready(Ok(())) } } impl<R: AsyncBufRead> AsyncBufRead for Take<R> { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { let me = self.project(); // Don't call into inner reader at all at EOF because it may still block if *me.limit_ == 0 { return Poll::Ready(Ok(&[])); } let buf = ready!(me.inner.poll_fill_buf(cx)?); let cap = cmp::min(buf.len() as u64, *me.limit_) as usize; Poll::Ready(Ok(&buf[..cap])) } fn consume(self: Pin<&mut Self>, amt: usize) { let me = self.project(); // Don't let callers reset the limit by passing an overlarge value let amt = cmp::min(amt as u64, *me.limit_) as usize; *me.limit_ -= amt as u64; me.inner.consume(amt); } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<Take<()>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read_to_string.rs
tokio/src/io/util/read_to_string.rs
use crate::io::util::read_line::finish_string_read; use crate::io::util::read_to_end::read_to_end_internal; use crate::io::util::vec_with_initialized::VecWithInitialized; use crate::io::AsyncRead; use pin_project_lite::pin_project; use std::future::Future; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{ready, Context, Poll}; use std::{io, mem}; pin_project! { /// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct ReadToString<'a, R: ?Sized> { reader: &'a mut R, // This is the buffer we were provided. It will be replaced with an empty string // while reading to postpone utf-8 handling until after reading. output: &'a mut String, // The actual allocation of the string is moved into this vector instead. buf: VecWithInitialized<Vec<u8>>, // The number of bytes appended to buf. This can be less than buf.len() if // the buffer was not empty when the operation was started. read: usize, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } pub(crate) fn read_to_string<'a, R>( reader: &'a mut R, string: &'a mut String, ) -> ReadToString<'a, R> where R: AsyncRead + ?Sized + Unpin, { let buf = mem::take(string).into_bytes(); ReadToString { reader, buf: VecWithInitialized::new(buf), output: string, read: 0, _pin: PhantomPinned, } } fn read_to_string_internal<R: AsyncRead + ?Sized>( reader: Pin<&mut R>, output: &mut String, buf: &mut VecWithInitialized<Vec<u8>>, read: &mut usize, cx: &mut Context<'_>, ) -> Poll<io::Result<usize>> { let io_res = ready!(read_to_end_internal(buf, reader, read, cx)); let utf8_res = String::from_utf8(buf.take()); // At this point both buf and output are empty. The allocation is in utf8_res. debug_assert!(buf.is_empty()); debug_assert!(output.is_empty()); finish_string_read(io_res, utf8_res, *read, output, true) } impl<A> Future for ReadToString<'_, A> where A: AsyncRead + ?Sized + Unpin, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); read_to_string_internal(Pin::new(*me.reader), me.output, me.buf, me.read, cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/write_vectored.rs
tokio/src/io/util/write_vectored.rs
use crate::io::AsyncWrite; use pin_project_lite::pin_project; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; use std::{future::Future, io::IoSlice}; pin_project! { /// A future to write a slice of buffers to an `AsyncWrite`. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct WriteVectored<'a, 'b, W: ?Sized> { writer: &'a mut W, bufs: &'a [IoSlice<'b>], // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } pub(crate) fn write_vectored<'a, 'b, W>( writer: &'a mut W, bufs: &'a [IoSlice<'b>], ) -> WriteVectored<'a, 'b, W> where W: AsyncWrite + Unpin + ?Sized, { WriteVectored { writer, bufs, _pin: PhantomPinned, } } impl<W> Future for WriteVectored<'_, '_, W> where W: AsyncWrite + Unpin + ?Sized, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { let me = self.project(); Pin::new(&mut *me.writer).poll_write_vectored(cx, me.bufs) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read_line.rs
tokio/src/io/util/read_line.rs
use crate::io::util::read_until::read_until_internal; use crate::io::AsyncBufRead; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::mem; use std::pin::Pin; use std::string::FromUtf8Error; use std::task::{ready, Context, Poll}; pin_project! { /// Future for the [`read_line`](crate::io::AsyncBufReadExt::read_line) method. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct ReadLine<'a, R: ?Sized> { reader: &'a mut R, // This is the buffer we were provided. It will be replaced with an empty string // while reading to postpone utf-8 handling until after reading. output: &'a mut String, // The actual allocation of the string is moved into this vector instead. buf: Vec<u8>, // The number of bytes appended to buf. This can be less than buf.len() if // the buffer was not empty when the operation was started. read: usize, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } pub(crate) fn read_line<'a, R>(reader: &'a mut R, string: &'a mut String) -> ReadLine<'a, R> where R: AsyncBufRead + ?Sized + Unpin, { ReadLine { reader, buf: mem::take(string).into_bytes(), output: string, read: 0, _pin: PhantomPinned, } } fn put_back_original_data(output: &mut String, mut vector: Vec<u8>, num_bytes_read: usize) { let original_len = vector.len() - num_bytes_read; vector.truncate(original_len); *output = String::from_utf8(vector).expect("The original data must be valid utf-8."); } /// This handles the various failure cases and puts the string back into `output`. /// /// The `truncate_on_io_error` `bool` is necessary because `read_to_string` and `read_line` /// disagree on what should happen when an IO error occurs. pub(super) fn finish_string_read( io_res: io::Result<usize>, utf8_res: Result<String, FromUtf8Error>, read: usize, output: &mut String, truncate_on_io_error: bool, ) -> Poll<io::Result<usize>> { match (io_res, utf8_res) { (Ok(num_bytes), Ok(string)) => { debug_assert_eq!(read, 0); *output = string; Poll::Ready(Ok(num_bytes)) } (Err(io_err), Ok(string)) => { *output = string; if truncate_on_io_error { let original_len = output.len() - read; output.truncate(original_len); } Poll::Ready(Err(io_err)) } (Ok(num_bytes), Err(utf8_err)) => { debug_assert_eq!(read, 0); put_back_original_data(output, utf8_err.into_bytes(), num_bytes); Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidData, "stream did not contain valid UTF-8", ))) } (Err(io_err), Err(utf8_err)) => { put_back_original_data(output, utf8_err.into_bytes(), read); Poll::Ready(Err(io_err)) } } } pub(super) fn read_line_internal<R: AsyncBufRead + ?Sized>( reader: Pin<&mut R>, cx: &mut Context<'_>, output: &mut String, buf: &mut Vec<u8>, read: &mut usize, ) -> Poll<io::Result<usize>> { let io_res = ready!(read_until_internal(reader, cx, b'\n', buf, read)); let utf8_res = String::from_utf8(mem::take(buf)); // At this point both buf and output are empty. The allocation is in utf8_res. debug_assert!(buf.is_empty()); debug_assert!(output.is_empty()); finish_string_read(io_res, utf8_res, *read, output, false) } impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadLine<'_, R> { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); read_line_internal(Pin::new(*me.reader), cx, me.output, me.buf, me.read) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/chain.rs
tokio/src/io/util/chain.rs
use crate::io::{AsyncBufRead, AsyncRead, ReadBuf}; use pin_project_lite::pin_project; use std::fmt; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// Stream for the [`chain`](super::AsyncReadExt::chain) method. #[must_use = "streams do nothing unless polled"] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct Chain<T, U> { #[pin] first: T, #[pin] second: U, done_first: bool, } } pub(super) fn chain<T, U>(first: T, second: U) -> Chain<T, U> where T: AsyncRead, U: AsyncRead, { Chain { first, second, done_first: false, } } impl<T, U> Chain<T, U> where T: AsyncRead, U: AsyncRead, { /// Gets references to the underlying readers in this `Chain`. pub fn get_ref(&self) -> (&T, &U) { (&self.first, &self.second) } /// Gets mutable references to the underlying readers in this `Chain`. /// /// Care should be taken to avoid modifying the internal I/O state of the /// underlying readers as doing so may corrupt the internal state of this /// `Chain`. pub fn get_mut(&mut self) -> (&mut T, &mut U) { (&mut self.first, &mut self.second) } /// Gets pinned mutable references to the underlying readers in this `Chain`. /// /// Care should be taken to avoid modifying the internal I/O state of the /// underlying readers as doing so may corrupt the internal state of this /// `Chain`. pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) { let me = self.project(); (me.first, me.second) } /// Consumes the `Chain`, returning the wrapped readers. pub fn into_inner(self) -> (T, U) { (self.first, self.second) } } impl<T, U> fmt::Debug for Chain<T, U> where T: fmt::Debug, U: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Chain") .field("t", &self.first) .field("u", &self.second) .finish() } } impl<T, U> AsyncRead for Chain<T, U> where T: AsyncRead, U: AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { let me = self.project(); if !*me.done_first { let rem = buf.remaining(); ready!(me.first.poll_read(cx, buf))?; if buf.remaining() == rem { *me.done_first = true; } else { return Poll::Ready(Ok(())); } } me.second.poll_read(cx, buf) } } impl<T, U> AsyncBufRead for Chain<T, U> where T: AsyncBufRead, U: AsyncBufRead, { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { let me = self.project(); if !*me.done_first { match ready!(me.first.poll_fill_buf(cx)?) { [] => { *me.done_first = true; } buf => return Poll::Ready(Ok(buf)), } } me.second.poll_fill_buf(cx) } fn consume(self: Pin<&mut Self>, amt: usize) { let me = self.project(); if !*me.done_first { me.first.consume(amt) } else { me.second.consume(amt) } } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<Chain<(), ()>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/buf_writer.rs
tokio/src/io/util/buf_writer.rs
use crate::io::util::DEFAULT_BUF_SIZE; use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; use pin_project_lite::pin_project; use std::fmt; use std::io::{self, IoSlice, SeekFrom, Write}; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// Wraps a writer and buffers its output. /// /// It can be excessively inefficient to work directly with something that /// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and /// writes it to an underlying writer in large, infrequent batches. /// /// `BufWriter` can improve the speed of programs that make *small* and /// *repeated* write calls to the same file or network socket. It does not /// help when writing very large amounts at once, or writing just one or a few /// times. It also provides no advantage when writing to a destination that is /// in memory, like a `Vec<u8>`. /// /// When the `BufWriter` is dropped, the contents of its buffer will be /// discarded. Creating multiple instances of a `BufWriter` on the same /// stream can cause data loss. If you need to write out the contents of its /// buffer, you must manually call flush before the writer is dropped. /// /// [`AsyncWrite`]: AsyncWrite /// [`flush`]: super::AsyncWriteExt::flush /// #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct BufWriter<W> { #[pin] pub(super) inner: W, pub(super) buf: Vec<u8>, pub(super) written: usize, pub(super) seek_state: SeekState, } } impl<W: AsyncWrite> BufWriter<W> { /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. pub fn new(inner: W) -> Self { Self::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter` with the specified buffer capacity. pub fn with_capacity(cap: usize, inner: W) -> Self { Self { inner, buf: Vec::with_capacity(cap), written: 0, seek_state: SeekState::Init, } } fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { let mut me = self.project(); let len = me.buf.len(); let mut ret = Ok(()); while *me.written < len { match ready!(me.inner.as_mut().poll_write(cx, &me.buf[*me.written..])) { Ok(0) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Ok(n) => *me.written += n, Err(e) => { ret = Err(e); break; } } } if *me.written > 0 { me.buf.drain(..*me.written); } *me.written = 0; Poll::Ready(ret) } /// Gets a reference to the underlying writer. pub fn get_ref(&self) -> &W { &self.inner } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_mut(&mut self) -> &mut W { &mut self.inner } /// Gets a pinned mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().inner } /// Consumes this `BufWriter`, returning the underlying writer. /// /// Note that any leftover data in the internal buffer is lost. pub fn into_inner(self) -> W { self.inner } /// Returns a reference to the internally buffered data. pub fn buffer(&self) -> &[u8] { &self.buf } } impl<W: AsyncWrite> AsyncWrite for BufWriter<W> { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { if self.buf.len() + buf.len() > self.buf.capacity() { ready!(self.as_mut().flush_buf(cx))?; } let me = self.project(); if buf.len() >= me.buf.capacity() { me.inner.poll_write(cx, buf) } else { Poll::Ready(me.buf.write(buf)) } } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, mut bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { if self.inner.is_write_vectored() { let total_len = bufs .iter() .fold(0usize, |acc, b| acc.saturating_add(b.len())); if total_len > self.buf.capacity() - self.buf.len() { ready!(self.as_mut().flush_buf(cx))?; } let me = self.as_mut().project(); if total_len >= me.buf.capacity() { // It's more efficient to pass the slices directly to the // underlying writer than to buffer them. // The case when the total_len calculation saturates at // usize::MAX is also handled here. me.inner.poll_write_vectored(cx, bufs) } else { bufs.iter().for_each(|b| me.buf.extend_from_slice(b)); Poll::Ready(Ok(total_len)) } } else { // Remove empty buffers at the beginning of bufs. while bufs.first().map(|buf| buf.len()) == Some(0) { bufs = &bufs[1..]; } if bufs.is_empty() { return Poll::Ready(Ok(0)); } // Flush if the first buffer doesn't fit. let first_len = bufs[0].len(); if first_len > self.buf.capacity() - self.buf.len() { ready!(self.as_mut().flush_buf(cx))?; debug_assert!(self.buf.is_empty()); } let me = self.as_mut().project(); if first_len >= me.buf.capacity() { // The slice is at least as large as the buffering capacity, // so it's better to write it directly, bypassing the buffer. debug_assert!(me.buf.is_empty()); return me.inner.poll_write(cx, &bufs[0]); } else { me.buf.extend_from_slice(&bufs[0]); bufs = &bufs[1..]; } let mut total_written = first_len; debug_assert!(total_written != 0); // Append the buffers that fit in the internal buffer. for buf in bufs { if buf.len() > me.buf.capacity() - me.buf.len() { break; } else { me.buf.extend_from_slice(buf); total_written += buf.len(); } } Poll::Ready(Ok(total_written)) } } fn is_write_vectored(&self) -> bool { true } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { ready!(self.as_mut().flush_buf(cx))?; self.get_pin_mut().poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { ready!(self.as_mut().flush_buf(cx))?; self.get_pin_mut().poll_shutdown(cx) } } #[derive(Debug, Clone, Copy)] pub(super) enum SeekState { /// `start_seek` has not been called. Init, /// `start_seek` has been called, but `poll_complete` has not yet been called. Start(SeekFrom), /// Waiting for completion of `poll_complete`. Pending, } /// Seek to the offset, in bytes, in the underlying writer. /// /// Seeking always writes out the internal buffer before seeking. impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> { fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { // We need to flush the internal buffer before seeking. // It receives a `Context` and returns a `Poll`, so it cannot be called // inside `start_seek`. *self.project().seek_state = SeekState::Start(pos); Ok(()) } fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { let pos = match self.seek_state { SeekState::Init => { return self.project().inner.poll_complete(cx); } SeekState::Start(pos) => Some(pos), SeekState::Pending => None, }; // Flush the internal buffer before seeking. ready!(self.as_mut().flush_buf(cx))?; let mut me = self.project(); if let Some(pos) = pos { // Ensure previous seeks have finished before starting a new one ready!(me.inner.as_mut().poll_complete(cx))?; if let Err(e) = me.inner.as_mut().start_seek(pos) { *me.seek_state = SeekState::Init; return Poll::Ready(Err(e)); } } match me.inner.poll_complete(cx) { Poll::Ready(res) => { *me.seek_state = SeekState::Init; Poll::Ready(res) } Poll::Pending => { *me.seek_state = SeekState::Pending; Poll::Pending } } } } impl<W: AsyncWrite + AsyncRead> AsyncRead for BufWriter<W> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.get_pin_mut().poll_read(cx, buf) } } impl<W: AsyncWrite + AsyncBufRead> AsyncBufRead for BufWriter<W> { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { self.get_pin_mut().poll_fill_buf(cx) } fn consume(self: Pin<&mut Self>, amt: usize) { self.get_pin_mut().consume(amt); } } impl<W: fmt::Debug> fmt::Debug for BufWriter<W> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BufWriter") .field("writer", &self.inner) .field( "buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()), ) .field("written", &self.written) .finish() } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<BufWriter<()>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/vec_with_initialized.rs
tokio/src/io/util/vec_with_initialized.rs
use crate::io::ReadBuf; use std::mem::MaybeUninit; /// Something that looks like a `Vec<u8>`. /// /// # Safety /// /// The implementor must guarantee that the vector returned by the /// `as_mut` and `as_mut` methods do not change from one call to /// another. pub(crate) unsafe trait VecU8: AsRef<Vec<u8>> + AsMut<Vec<u8>> {} unsafe impl VecU8 for Vec<u8> {} unsafe impl VecU8 for &mut Vec<u8> {} /// This struct wraps a `Vec<u8>` or `&mut Vec<u8>`, combining it with a /// `num_initialized`, which keeps track of the number of initialized bytes /// in the unused capacity. /// /// The purpose of this struct is to remember how many bytes were initialized /// through a `ReadBuf` from call to call. /// /// This struct has the safety invariant that the first `num_initialized` of the /// vector's allocation must be initialized at any time. #[derive(Debug)] pub(crate) struct VecWithInitialized<V> { vec: V, // The number of initialized bytes in the vector. // Always between `vec.len()` and `vec.capacity()`. num_initialized: usize, starting_capacity: usize, } impl VecWithInitialized<Vec<u8>> { #[cfg(feature = "io-util")] pub(crate) fn take(&mut self) -> Vec<u8> { self.num_initialized = 0; std::mem::take(&mut self.vec) } } impl<V> VecWithInitialized<V> where V: VecU8, { pub(crate) fn new(mut vec: V) -> Self { // SAFETY: The safety invariants of vector guarantee that the bytes up // to its length are initialized. Self { num_initialized: vec.as_mut().len(), starting_capacity: vec.as_ref().capacity(), vec, } } pub(crate) fn reserve(&mut self, num_bytes: usize) { let vec = self.vec.as_mut(); if vec.capacity() - vec.len() >= num_bytes { return; } // SAFETY: Setting num_initialized to `vec.len()` is correct as // `reserve` does not change the length of the vector. self.num_initialized = vec.len(); vec.reserve(num_bytes); } #[cfg(feature = "io-util")] pub(crate) fn is_empty(&self) -> bool { self.vec.as_ref().is_empty() } pub(crate) fn get_read_buf<'a>(&'a mut self) -> ReadBuf<'a> { let num_initialized = self.num_initialized; // SAFETY: Creating the slice is safe because of the safety invariants // on Vec<u8>. The safety invariants of `ReadBuf` will further guarantee // that no bytes in the slice are de-initialized. let vec = self.vec.as_mut(); let len = vec.len(); let cap = vec.capacity(); let ptr = vec.as_mut_ptr().cast::<MaybeUninit<u8>>(); let slice = unsafe { std::slice::from_raw_parts_mut::<'a, MaybeUninit<u8>>(ptr, cap) }; // SAFETY: This is safe because the safety invariants of // VecWithInitialized say that the first num_initialized bytes must be // initialized. let mut read_buf = ReadBuf::uninit(slice); unsafe { read_buf.assume_init(num_initialized); } read_buf.set_filled(len); read_buf } pub(crate) fn apply_read_buf(&mut self, parts: ReadBufParts) { let vec = self.vec.as_mut(); assert_eq!(vec.as_ptr(), parts.ptr); // SAFETY: // The ReadBufParts really does point inside `self.vec` due to the above // check, and the safety invariants of `ReadBuf` guarantee that the // first `parts.initialized` bytes of `self.vec` really have been // initialized. Additionally, `ReadBuf` guarantees that `parts.len` is // at most `parts.initialized`, so the first `parts.len` bytes are also // initialized. // // Note that this relies on the fact that `V` is either `Vec<u8>` or // `&mut Vec<u8>`, so the vector returned by `self.vec.as_mut()` cannot // change from call to call. unsafe { self.num_initialized = parts.initialized; vec.set_len(parts.len); } } // Returns a boolean telling the caller to try reading into a small local buffer first if true. // Doing so would avoid overallocating when vec is filled to capacity and we reached EOF. pub(crate) fn try_small_read_first(&self, num_bytes: usize) -> bool { let vec = self.vec.as_ref(); vec.capacity() - vec.len() < num_bytes && self.starting_capacity == vec.capacity() && self.starting_capacity >= num_bytes } } pub(crate) struct ReadBufParts { // Pointer is only used to check that the ReadBuf actually came from the // right VecWithInitialized. ptr: *const u8, len: usize, initialized: usize, } // This is needed to release the borrow on `VecWithInitialized<V>`. pub(crate) fn into_read_buf_parts(rb: ReadBuf<'_>) -> ReadBufParts { ReadBufParts { ptr: rb.filled().as_ptr(), len: rb.filled().len(), initialized: rb.initialized().len(), } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/write_all_buf.rs
tokio/src/io/util/write_all_buf.rs
use crate::io::AsyncWrite; use bytes::Buf; use pin_project_lite::pin_project; use std::future::Future; use std::io::{self, IoSlice}; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// A future to write some of the buffer to an `AsyncWrite`. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct WriteAllBuf<'a, W, B> { writer: &'a mut W, buf: &'a mut B, #[pin] _pin: PhantomPinned, } } /// Tries to write some bytes from the given `buf` to the writer in an /// asynchronous manner, returning a future. pub(crate) fn write_all_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteAllBuf<'a, W, B> where W: AsyncWrite + Unpin, B: Buf, { WriteAllBuf { writer, buf, _pin: PhantomPinned, } } impl<W, B> Future for WriteAllBuf<'_, W, B> where W: AsyncWrite + Unpin, B: Buf, { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { const MAX_VECTOR_ELEMENTS: usize = 64; let me = self.project(); while me.buf.has_remaining() { let n = if me.writer.is_write_vectored() { let mut slices = [IoSlice::new(&[]); MAX_VECTOR_ELEMENTS]; let cnt = me.buf.chunks_vectored(&mut slices); ready!(Pin::new(&mut *me.writer).poll_write_vectored(cx, &slices[..cnt]))? } else { ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf.chunk())?) }; me.buf.advance(n); if n == 0 { return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); } } Poll::Ready(Ok(())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/copy.rs
tokio/src/io/util/copy.rs
use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use std::future::Future; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; #[derive(Debug)] pub(super) struct CopyBuffer { read_done: bool, need_flush: bool, pos: usize, cap: usize, amt: u64, buf: Box<[u8]>, } impl CopyBuffer { pub(super) fn new(buf_size: usize) -> Self { Self { read_done: false, need_flush: false, pos: 0, cap: 0, amt: 0, buf: vec![0; buf_size].into_boxed_slice(), } } fn poll_fill_buf<R>( &mut self, cx: &mut Context<'_>, reader: Pin<&mut R>, ) -> Poll<io::Result<()>> where R: AsyncRead + ?Sized, { let me = &mut *self; let mut buf = ReadBuf::new(&mut me.buf); buf.set_filled(me.cap); let res = reader.poll_read(cx, &mut buf); if let Poll::Ready(Ok(())) = res { let filled_len = buf.filled().len(); me.read_done = me.cap == filled_len; me.cap = filled_len; } res } fn poll_write_buf<R, W>( &mut self, cx: &mut Context<'_>, mut reader: Pin<&mut R>, mut writer: Pin<&mut W>, ) -> Poll<io::Result<usize>> where R: AsyncRead + ?Sized, W: AsyncWrite + ?Sized, { let me = &mut *self; match writer.as_mut().poll_write(cx, &me.buf[me.pos..me.cap]) { Poll::Pending => { // Top up the buffer towards full if we can read a bit more // data - this should improve the chances of a large write if !me.read_done && me.cap < me.buf.len() { ready!(me.poll_fill_buf(cx, reader.as_mut()))?; } Poll::Pending } res => res, } } pub(super) fn poll_copy<R, W>( &mut self, cx: &mut Context<'_>, mut reader: Pin<&mut R>, mut writer: Pin<&mut W>, ) -> Poll<io::Result<u64>> where R: AsyncRead + ?Sized, W: AsyncWrite + ?Sized, { ready!(crate::trace::trace_leaf(cx)); #[cfg(any( feature = "fs", feature = "io-std", feature = "net", feature = "process", feature = "rt", feature = "signal", feature = "sync", feature = "time", ))] // Keep track of task budget let coop = ready!(crate::task::coop::poll_proceed(cx)); loop { // If there is some space left in our buffer, then we try to read some // data to continue, thus maximizing the chances of a large write. if self.cap < self.buf.len() && !self.read_done { match self.poll_fill_buf(cx, reader.as_mut()) { Poll::Ready(Ok(())) => { #[cfg(any( feature = "fs", feature = "io-std", feature = "net", feature = "process", feature = "rt", feature = "signal", feature = "sync", feature = "time", ))] coop.made_progress(); } Poll::Ready(Err(err)) => { #[cfg(any( feature = "fs", feature = "io-std", feature = "net", feature = "process", feature = "rt", feature = "signal", feature = "sync", feature = "time", ))] coop.made_progress(); return Poll::Ready(Err(err)); } Poll::Pending => { // Ignore pending reads when our buffer is not empty, because // we can try to write data immediately. if self.pos == self.cap { // Try flushing when the reader has no progress to avoid deadlock // when the reader depends on buffered writer. if self.need_flush { ready!(writer.as_mut().poll_flush(cx))?; #[cfg(any( feature = "fs", feature = "io-std", feature = "net", feature = "process", feature = "rt", feature = "signal", feature = "sync", feature = "time", ))] coop.made_progress(); self.need_flush = false; } return Poll::Pending; } } } } // If our buffer has some data, let's write it out! while self.pos < self.cap { let i = ready!(self.poll_write_buf(cx, reader.as_mut(), writer.as_mut()))?; #[cfg(any( feature = "fs", feature = "io-std", feature = "net", feature = "process", feature = "rt", feature = "signal", feature = "sync", feature = "time", ))] coop.made_progress(); if i == 0 { return Poll::Ready(Err(io::Error::new( io::ErrorKind::WriteZero, "write zero byte into writer", ))); } else { self.pos += i; self.amt += i as u64; self.need_flush = true; } } // If pos larger than cap, this loop will never stop. // In particular, user's wrong poll_write implementation returning // incorrect written length may lead to thread blocking. debug_assert!( self.pos <= self.cap, "writer returned length larger than input slice" ); // All data has been written, the buffer can be considered empty again self.pos = 0; self.cap = 0; // If we've written all the data and we've seen EOF, flush out the // data and finish the transfer. if self.read_done { ready!(writer.as_mut().poll_flush(cx))?; #[cfg(any( feature = "fs", feature = "io-std", feature = "net", feature = "process", feature = "rt", feature = "signal", feature = "sync", feature = "time", ))] coop.made_progress(); return Poll::Ready(Ok(self.amt)); } } } } /// A future that asynchronously copies the entire contents of a reader into a /// writer. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] struct Copy<'a, R: ?Sized, W: ?Sized> { reader: &'a mut R, writer: &'a mut W, buf: CopyBuffer, } cfg_io_util! { /// Asynchronously copies the entire contents of a reader into a writer. /// /// This function returns a future that will continuously read data from /// `reader` and then write it into `writer` in a streaming fashion until /// `reader` returns EOF or fails. /// /// On success, the total number of bytes that were copied from `reader` to /// `writer` is returned. /// /// This is an asynchronous version of [`std::io::copy`][std]. /// /// A heap-allocated copy buffer with 8 KB is created to take data from the /// reader to the writer, check [`copy_buf`] if you want an alternative for /// [`AsyncBufRead`]. You can use `copy_buf` with [`BufReader`] to change the /// buffer capacity. /// /// # When to use async alternatives instead of `SyncIoBridge` /// /// If you are looking to use [`std::io::copy`] with a synchronous consumer /// (like a `hasher` or compressor), consider using async alternatives instead of /// wrapping the reader with [`SyncIoBridge`]. /// See the [`SyncIoBridge`] documentation for detailed examples and guidance. /// /// [std]: std::io::copy /// [`copy_buf`]: crate::io::copy_buf /// [`AsyncBufRead`]: crate::io::AsyncBufRead /// [`BufReader`]: crate::io::BufReader /// [`SyncIoBridge`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.SyncIoBridge.html /// /// # Errors /// /// The returned future will return an error immediately if any call to /// `poll_read` or `poll_write` returns an error. /// /// # Examples /// /// ``` /// use tokio::io; /// /// # async fn dox() -> std::io::Result<()> { /// let mut reader: &[u8] = b"hello"; /// let mut writer: Vec<u8> = vec![]; /// /// io::copy(&mut reader, &mut writer).await?; /// /// assert_eq!(&b"hello"[..], &writer[..]); /// # Ok(()) /// # } /// ``` pub async fn copy<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> io::Result<u64> where R: AsyncRead + Unpin + ?Sized, W: AsyncWrite + Unpin + ?Sized, { Copy { reader, writer, buf: CopyBuffer::new(super::DEFAULT_BUF_SIZE) }.await } } impl<R, W> Future for Copy<'_, R, W> where R: AsyncRead + Unpin + ?Sized, W: AsyncWrite + Unpin + ?Sized, { type Output = io::Result<u64>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { let me = &mut *self; me.buf .poll_copy(cx, Pin::new(&mut *me.reader), Pin::new(&mut *me.writer)) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read_exact.rs
tokio/src/io/util/read_exact.rs
use crate::io::{AsyncRead, ReadBuf}; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::marker::Unpin; use std::pin::Pin; use std::task::{ready, Context, Poll}; /// A future which can be used to easily read exactly enough bytes to fill /// a buffer. /// /// Created by the [`AsyncReadExt::read_exact`][read_exact]. /// [`read_exact`]: [`crate::io::AsyncReadExt::read_exact`] pub(crate) fn read_exact<'a, A>(reader: &'a mut A, buf: &'a mut [u8]) -> ReadExact<'a, A> where A: AsyncRead + Unpin + ?Sized, { ReadExact { reader, buf: ReadBuf::new(buf), _pin: PhantomPinned, } } pin_project! { /// Creates a future which will read exactly enough bytes to fill `buf`, /// returning an error if EOF is hit sooner. /// /// On success the number of bytes is returned #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct ReadExact<'a, A: ?Sized> { reader: &'a mut A, buf: ReadBuf<'a>, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } fn eof() -> io::Error { io::Error::new(io::ErrorKind::UnexpectedEof, "early eof") } impl<A> Future for ReadExact<'_, A> where A: AsyncRead + Unpin + ?Sized, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { let me = self.project(); loop { // if our buffer is empty, then we need to read some data to continue. let rem = me.buf.remaining(); if rem != 0 { ready!(Pin::new(&mut *me.reader).poll_read(cx, me.buf))?; if me.buf.remaining() == rem { return Err(eof()).into(); } } else { return Poll::Ready(Ok(me.buf.capacity())); } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/sink.rs
tokio/src/io/util/sink.rs
use crate::io::util::poll_proceed_and_make_progress; use crate::io::AsyncWrite; use std::fmt; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; cfg_io_util! { /// An async writer which will move data into the void. /// /// This struct is generally created by calling [`sink`][sink]. Please /// see the documentation of `sink()` for more details. /// /// This is an asynchronous version of [`std::io::Sink`][std]. /// /// [sink]: sink() /// [std]: std::io::Sink pub struct Sink { _p: (), } /// Creates an instance of an async writer which will successfully consume all /// data. /// /// All calls to [`poll_write`] on the returned instance will return /// `Poll::Ready(Ok(buf.len()))` and the contents of the buffer will not be /// inspected. /// /// This is an asynchronous version of [`std::io::sink`][std]. /// /// [`poll_write`]: crate::io::AsyncWrite::poll_write() /// [std]: std::io::sink /// /// # Examples /// /// ``` /// use tokio::io::{self, AsyncWriteExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let buffer = vec![1, 2, 3, 5, 8]; /// let num_bytes = io::sink().write(&buffer).await?; /// assert_eq!(num_bytes, 5); /// Ok(()) /// # } /// ``` pub fn sink() -> Sink { Sink { _p: () } } } impl AsyncWrite for Sink { #[inline] fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<Result<usize, io::Error>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(buf.len())) } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } #[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { ready!(crate::trace::trace_leaf(cx)); ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } } impl fmt::Debug for Sink { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Sink { .. }") } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<Sink>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/buf_stream.rs
tokio/src/io/util/buf_stream.rs
use crate::io::util::{BufReader, BufWriter}; use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; use pin_project_lite::pin_project; use std::io::{self, IoSlice, SeekFrom}; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// Wraps a type that is [`AsyncWrite`] and [`AsyncRead`], and buffers its input and output. /// /// It can be excessively inefficient to work directly with something that implements [`AsyncWrite`] /// and [`AsyncRead`]. For example, every `write`, however small, has to traverse the syscall /// interface, and similarly, every read has to do the same. The [`BufWriter`] and [`BufReader`] /// types aid with these problems respectively, but do so in only one direction. `BufStream` wraps /// one in the other so that both directions are buffered. See their documentation for details. #[derive(Debug)] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct BufStream<RW> { #[pin] inner: BufReader<BufWriter<RW>>, } } impl<RW: AsyncRead + AsyncWrite> BufStream<RW> { /// Wraps a type in both [`BufWriter`] and [`BufReader`]. /// /// See the documentation for those types and [`BufStream`] for details. pub fn new(stream: RW) -> BufStream<RW> { BufStream { inner: BufReader::new(BufWriter::new(stream)), } } /// Creates a `BufStream` with the specified [`BufReader`] capacity and [`BufWriter`] /// capacity. /// /// See the documentation for those types and [`BufStream`] for details. pub fn with_capacity( reader_capacity: usize, writer_capacity: usize, stream: RW, ) -> BufStream<RW> { BufStream { inner: BufReader::with_capacity( reader_capacity, BufWriter::with_capacity(writer_capacity, stream), ), } } /// Gets a reference to the underlying I/O object. /// /// It is inadvisable to directly read from the underlying I/O object. pub fn get_ref(&self) -> &RW { self.inner.get_ref().get_ref() } /// Gets a mutable reference to the underlying I/O object. /// /// It is inadvisable to directly read from the underlying I/O object. pub fn get_mut(&mut self) -> &mut RW { self.inner.get_mut().get_mut() } /// Gets a pinned mutable reference to the underlying I/O object. /// /// It is inadvisable to directly read from the underlying I/O object. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut RW> { self.project().inner.get_pin_mut().get_pin_mut() } /// Consumes this `BufStream`, returning the underlying I/O object. /// /// Note that any leftover data in the internal buffer is lost. pub fn into_inner(self) -> RW { self.inner.into_inner().into_inner() } } impl<RW> From<BufReader<BufWriter<RW>>> for BufStream<RW> { fn from(b: BufReader<BufWriter<RW>>) -> Self { BufStream { inner: b } } } impl<RW> From<BufWriter<BufReader<RW>>> for BufStream<RW> { fn from(b: BufWriter<BufReader<RW>>) -> Self { // we need to "invert" the reader and writer let BufWriter { inner: BufReader { inner, buf: rbuf, pos, cap, seek_state: rseek_state, }, buf: wbuf, written, seek_state: wseek_state, } = b; BufStream { inner: BufReader { inner: BufWriter { inner, buf: wbuf, written, seek_state: wseek_state, }, buf: rbuf, pos, cap, seek_state: rseek_state, }, } } } impl<RW: AsyncRead + AsyncWrite> AsyncWrite for BufStream<RW> { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.project().inner.poll_write(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.project().inner.poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.project().inner.poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.project().inner.poll_shutdown(cx) } } impl<RW: AsyncRead + AsyncWrite> AsyncRead for BufStream<RW> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.project().inner.poll_read(cx, buf) } } /// Seek to an offset, in bytes, in the underlying stream. /// /// The position used for seeking with `SeekFrom::Current(_)` is the /// position the underlying stream would be at if the `BufStream` had no /// internal buffer. /// /// Seeking always discards the internal buffer, even if the seek position /// would otherwise fall within it. This guarantees that calling /// `.into_inner()` immediately after a seek yields the underlying reader /// at the same position. /// /// See [`AsyncSeek`] for more details. /// /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` /// where `n` minus the internal buffer length overflows an `i64`, two /// seeks will be performed instead of one. If the second seek returns /// `Err`, the underlying reader will be left at the same position it would /// have if you called `seek` with `SeekFrom::Current(0)`. impl<RW: AsyncRead + AsyncWrite + AsyncSeek> AsyncSeek for BufStream<RW> { fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()> { self.project().inner.start_seek(position) } fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { self.project().inner.poll_complete(cx) } } impl<RW: AsyncRead + AsyncWrite> AsyncBufRead for BufStream<RW> { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { self.project().inner.poll_fill_buf(cx) } fn consume(self: Pin<&mut Self>, amt: usize) { self.project().inner.consume(amt); } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<BufStream<()>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/write_buf.rs
tokio/src/io/util/write_buf.rs
use crate::io::AsyncWrite; use bytes::Buf; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// A future to write some of the buffer to an `AsyncWrite`. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct WriteBuf<'a, W, B> { writer: &'a mut W, buf: &'a mut B, #[pin] _pin: PhantomPinned, } } /// Tries to write some bytes from the given `buf` to the writer in an /// asynchronous manner, returning a future. pub(crate) fn write_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteBuf<'a, W, B> where W: AsyncWrite + Unpin, B: Buf, { WriteBuf { writer, buf, _pin: PhantomPinned, } } impl<W, B> Future for WriteBuf<'_, W, B> where W: AsyncWrite + Unpin, B: Buf, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { let me = self.project(); if !me.buf.has_remaining() { return Poll::Ready(Ok(0)); } let n = ready!(Pin::new(me.writer).poll_write(cx, me.buf.chunk()))?; me.buf.advance(n); Poll::Ready(Ok(n)) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/copy_buf.rs
tokio/src/io/util/copy_buf.rs
use crate::io::{AsyncBufRead, AsyncWrite}; use std::future::Future; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; cfg_io_util! { /// A future that asynchronously copies the entire contents of a reader into a /// writer. /// /// This struct is generally created by calling [`copy_buf`][copy_buf]. Please /// see the documentation of `copy_buf()` for more details. /// /// [copy_buf]: copy_buf() #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] struct CopyBuf<'a, R: ?Sized, W: ?Sized> { reader: &'a mut R, writer: &'a mut W, amt: u64, } /// Asynchronously copies the entire contents of a reader into a writer. /// /// This function returns a future that will continuously read data from /// `reader` and then write it into `writer` in a streaming fashion until /// `reader` returns EOF or fails. /// /// On success, the total number of bytes that were copied from `reader` to /// `writer` is returned. /// /// This is a [`tokio::io::copy`] alternative for [`AsyncBufRead`] readers /// with no extra buffer allocation, since [`AsyncBufRead`] allow access /// to the reader's inner buffer. /// /// # When to use async alternatives instead of `SyncIoBridge` /// /// If you are looking to use [`std::io::copy`] with a synchronous consumer /// (like a `hasher` or compressor), consider using async alternatives instead of /// wrapping the reader with [`SyncIoBridge`]. See the [`SyncIoBridge`] /// documentation for detailed examples and guidance on hashing, compression, /// and data parsing. /// /// [`tokio::io::copy`]: crate::io::copy /// [`AsyncBufRead`]: crate::io::AsyncBufRead /// [`SyncIoBridge`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.SyncIoBridge.html /// /// # Errors /// /// The returned future will finish with an error will return an error /// immediately if any call to `poll_fill_buf` or `poll_write` returns an /// error. /// /// # Examples /// /// ``` /// use tokio::io; /// /// # async fn dox() -> std::io::Result<()> { /// let mut reader: &[u8] = b"hello"; /// let mut writer: Vec<u8> = vec![]; /// /// io::copy_buf(&mut reader, &mut writer).await?; /// /// assert_eq!(b"hello", &writer[..]); /// # Ok(()) /// # } /// ``` pub async fn copy_buf<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> io::Result<u64> where R: AsyncBufRead + Unpin + ?Sized, W: AsyncWrite + Unpin + ?Sized, { CopyBuf { reader, writer, amt: 0, }.await } } impl<R, W> Future for CopyBuf<'_, R, W> where R: AsyncBufRead + Unpin + ?Sized, W: AsyncWrite + Unpin + ?Sized, { type Output = io::Result<u64>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { loop { let me = &mut *self; let buffer = ready!(Pin::new(&mut *me.reader).poll_fill_buf(cx))?; if buffer.is_empty() { ready!(Pin::new(&mut self.writer).poll_flush(cx))?; return Poll::Ready(Ok(self.amt)); } let i = ready!(Pin::new(&mut *me.writer).poll_write(cx, buffer))?; if i == 0 { return Poll::Ready(Err(std::io::ErrorKind::WriteZero.into())); } self.amt += i as u64; Pin::new(&mut *self.reader).consume(i); } } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { use std::marker::PhantomPinned; crate::is_unpin::<CopyBuf<'_, PhantomPinned, PhantomPinned>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/buf_reader.rs
tokio/src/io/util/buf_reader.rs
use crate::io::util::DEFAULT_BUF_SIZE; use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; use pin_project_lite::pin_project; use std::io::{self, IoSlice, SeekFrom}; use std::pin::Pin; use std::task::{ready, Context, Poll}; use std::{cmp, fmt, mem}; pin_project! { /// The `BufReader` struct adds buffering to any reader. /// /// It can be excessively inefficient to work directly with a [`AsyncRead`] /// instance. A `BufReader` performs large, infrequent reads on the underlying /// [`AsyncRead`] and maintains an in-memory buffer of the results. /// /// `BufReader` can improve the speed of programs that make *small* and /// *repeated* read calls to the same file or network socket. It does not /// help when reading very large amounts at once, or reading just one or a few /// times. It also provides no advantage when reading from a source that is /// already in memory, like a `Vec<u8>`. /// /// When the `BufReader` is dropped, the contents of its buffer will be /// discarded. Creating multiple instances of a `BufReader` on the same /// stream can cause data loss. #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct BufReader<R> { #[pin] pub(super) inner: R, pub(super) buf: Box<[u8]>, pub(super) pos: usize, pub(super) cap: usize, pub(super) seek_state: SeekState, } } impl<R: AsyncRead> BufReader<R> { /// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. pub fn new(inner: R) -> Self { Self::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufReader` with the specified buffer capacity. pub fn with_capacity(capacity: usize, inner: R) -> Self { let buffer = vec![0; capacity]; Self { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0, seek_state: SeekState::Init, } } /// Gets a reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. pub fn get_ref(&self) -> &R { &self.inner } /// Gets a mutable reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. pub fn get_mut(&mut self) -> &mut R { &mut self.inner } /// Gets a pinned mutable reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().inner } /// Consumes this `BufReader`, returning the underlying reader. /// /// Note that any leftover data in the internal buffer is lost. pub fn into_inner(self) -> R { self.inner } /// Returns a reference to the internally buffered data. /// /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. pub fn buffer(&self) -> &[u8] { &self.buf[self.pos..self.cap] } /// Invalidates all data in the internal buffer. #[inline] fn discard_buffer(self: Pin<&mut Self>) { let me = self.project(); *me.pos = 0; *me.cap = 0; } } impl<R: AsyncRead> AsyncRead for BufReader<R> { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { // If we don't have any buffered data and we're doing a massive read // (larger than our internal buffer), bypass our internal buffer // entirely. if self.pos == self.cap && buf.remaining() >= self.buf.len() { let res = ready!(self.as_mut().get_pin_mut().poll_read(cx, buf)); self.discard_buffer(); return Poll::Ready(res); } let rem = ready!(self.as_mut().poll_fill_buf(cx))?; let amt = std::cmp::min(rem.len(), buf.remaining()); buf.put_slice(&rem[..amt]); self.consume(amt); Poll::Ready(Ok(())) } } impl<R: AsyncRead> AsyncBufRead for BufReader<R> { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { let me = self.project(); // If we've reached the end of our internal buffer then we need to fetch // some more data from the underlying reader. // Branch using `>=` instead of the more correct `==` // to tell the compiler that the pos..cap slice is always valid. if *me.pos >= *me.cap { debug_assert!(*me.pos == *me.cap); let mut buf = ReadBuf::new(me.buf); ready!(me.inner.poll_read(cx, &mut buf))?; *me.cap = buf.filled().len(); *me.pos = 0; } Poll::Ready(Ok(&me.buf[*me.pos..*me.cap])) } fn consume(self: Pin<&mut Self>, amt: usize) { let me = self.project(); *me.pos = cmp::min(*me.pos + amt, *me.cap); } } #[derive(Debug, Clone, Copy)] pub(super) enum SeekState { /// `start_seek` has not been called. Init, /// `start_seek` has been called, but `poll_complete` has not yet been called. Start(SeekFrom), /// Waiting for completion of the first `poll_complete` in the `n.checked_sub(remainder).is_none()` branch. PendingOverflowed(i64), /// Waiting for completion of `poll_complete`. Pending, } /// Seeks to an offset, in bytes, in the underlying reader. /// /// The position used for seeking with `SeekFrom::Current(_)` is the /// position the underlying reader would be at if the `BufReader` had no /// internal buffer. /// /// Seeking always discards the internal buffer, even if the seek position /// would otherwise fall within it. This guarantees that calling /// `.into_inner()` immediately after a seek yields the underlying reader /// at the same position. /// /// See [`AsyncSeek`] for more details. /// /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` /// where `n` minus the internal buffer length overflows an `i64`, two /// seeks will be performed instead of one. If the second seek returns /// `Err`, the underlying reader will be left at the same position it would /// have if you called `seek` with `SeekFrom::Current(0)`. impl<R: AsyncRead + AsyncSeek> AsyncSeek for BufReader<R> { fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { // We needs to call seek operation multiple times. // And we should always call both start_seek and poll_complete, // as start_seek alone cannot guarantee that the operation will be completed. // poll_complete receives a Context and returns a Poll, so it cannot be called // inside start_seek. *self.project().seek_state = SeekState::Start(pos); Ok(()) } fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { let res = match mem::replace(self.as_mut().project().seek_state, SeekState::Init) { SeekState::Init => { // 1.x AsyncSeek recommends calling poll_complete before start_seek. // We don't have to guarantee that the value returned by // poll_complete called without start_seek is correct, // so we'll return 0. return Poll::Ready(Ok(0)); } SeekState::Start(SeekFrom::Current(n)) => { let remainder = (self.cap - self.pos) as i64; // it should be safe to assume that remainder fits within an i64 as the alternative // means we managed to allocate 8 exbibytes and that's absurd. // But it's not out of the realm of possibility for some weird underlying reader to // support seeking by i64::MIN so we need to handle underflow when subtracting // remainder. if let Some(offset) = n.checked_sub(remainder) { self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(offset))?; } else { // seek backwards by our remainder, and then by the offset self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(-remainder))?; if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() { *self.as_mut().project().seek_state = SeekState::PendingOverflowed(n); return Poll::Pending; } // https://github.com/rust-lang/rust/pull/61157#issuecomment-495932676 self.as_mut().discard_buffer(); self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(n))?; } self.as_mut().get_pin_mut().poll_complete(cx)? } SeekState::PendingOverflowed(n) => { if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() { *self.as_mut().project().seek_state = SeekState::PendingOverflowed(n); return Poll::Pending; } // https://github.com/rust-lang/rust/pull/61157#issuecomment-495932676 self.as_mut().discard_buffer(); self.as_mut() .get_pin_mut() .start_seek(SeekFrom::Current(n))?; self.as_mut().get_pin_mut().poll_complete(cx)? } SeekState::Start(pos) => { // Seeking with Start/End doesn't care about our buffer length. self.as_mut().get_pin_mut().start_seek(pos)?; self.as_mut().get_pin_mut().poll_complete(cx)? } SeekState::Pending => self.as_mut().get_pin_mut().poll_complete(cx)?, }; match res { Poll::Ready(res) => { self.discard_buffer(); Poll::Ready(Ok(res)) } Poll::Pending => { *self.as_mut().project().seek_state = SeekState::Pending; Poll::Pending } } } } impl<R: AsyncRead + AsyncWrite> AsyncWrite for BufReader<R> { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.get_pin_mut().poll_write(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.get_pin_mut().poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { self.get_ref().is_write_vectored() } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.get_pin_mut().poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.get_pin_mut().poll_shutdown(cx) } } impl<R: fmt::Debug> fmt::Debug for BufReader<R> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BufReader") .field("reader", &self.inner) .field( "buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len()), ) .finish() } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<BufReader<()>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/mod.rs
tokio/src/io/util/mod.rs
#![allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 cfg_io_util! { mod async_buf_read_ext; pub use async_buf_read_ext::AsyncBufReadExt; mod async_read_ext; pub use async_read_ext::AsyncReadExt; mod async_seek_ext; pub use async_seek_ext::AsyncSeekExt; mod async_write_ext; pub use async_write_ext::AsyncWriteExt; mod buf_reader; pub use buf_reader::BufReader; mod buf_stream; pub use buf_stream::BufStream; mod buf_writer; pub use buf_writer::BufWriter; mod chain; pub use chain::Chain; mod copy; pub use copy::copy; mod copy_bidirectional; pub use copy_bidirectional::{copy_bidirectional, copy_bidirectional_with_sizes}; mod copy_buf; pub use copy_buf::copy_buf; mod empty; pub use empty::{empty, Empty}; mod flush; mod lines; pub use lines::Lines; mod mem; pub use mem::{duplex, simplex, DuplexStream, SimplexStream}; mod read; mod read_buf; mod read_exact; mod read_int; mod read_line; mod fill_buf; mod read_to_end; mod vec_with_initialized; cfg_process! { pub(crate) use read_to_end::read_to_end; } mod read_to_string; mod read_until; mod repeat; pub use repeat::{repeat, Repeat}; mod shutdown; mod sink; pub use sink::{sink, Sink}; mod split; pub use split::Split; mod take; pub use take::Take; mod write; mod write_vectored; mod write_all; mod write_buf; mod write_all_buf; mod write_int; // used by `BufReader` and `BufWriter` // https://github.com/rust-lang/rust/blob/master/library/std/src/sys_common/io.rs#L1 const DEFAULT_BUF_SIZE: usize = 8 * 1024; cfg_coop! { fn poll_proceed_and_make_progress(cx: &mut std::task::Context<'_>) -> std::task::Poll<()> { let coop = std::task::ready!(crate::task::coop::poll_proceed(cx)); coop.made_progress(); std::task::Poll::Ready(()) } } cfg_not_coop! { fn poll_proceed_and_make_progress(_: &mut std::task::Context<'_>) -> std::task::Poll<()> { std::task::Poll::Ready(()) } } } cfg_not_io_util! { cfg_process! { mod vec_with_initialized; mod read_to_end; // Used by process pub(crate) use read_to_end::read_to_end; } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/async_read_ext.rs
tokio/src/io/util/async_read_ext.rs
use crate::io::util::chain::{chain, Chain}; use crate::io::util::read::{read, Read}; use crate::io::util::read_buf::{read_buf, ReadBuf}; use crate::io::util::read_exact::{read_exact, ReadExact}; use crate::io::util::read_int::{ReadF32, ReadF32Le, ReadF64, ReadF64Le}; use crate::io::util::read_int::{ ReadI128, ReadI128Le, ReadI16, ReadI16Le, ReadI32, ReadI32Le, ReadI64, ReadI64Le, ReadI8, }; use crate::io::util::read_int::{ ReadU128, ReadU128Le, ReadU16, ReadU16Le, ReadU32, ReadU32Le, ReadU64, ReadU64Le, ReadU8, }; use crate::io::util::read_to_end::{read_to_end, ReadToEnd}; use crate::io::util::read_to_string::{read_to_string, ReadToString}; use crate::io::util::take::{take, Take}; use crate::io::AsyncRead; use bytes::BufMut; cfg_io_util! { /// Defines numeric reader macro_rules! read_impl { ( $( $(#[$outer:meta])* fn $name:ident(&mut self) -> $($fut:ident)*; )* ) => { $( $(#[$outer])* fn $name(&mut self) -> $($fut)*<&mut Self> where Self: Unpin { $($fut)*::new(self) } )* } } /// Reads bytes from a source. /// /// Implemented as an extension trait, adding utility methods to all /// [`AsyncRead`] types. Callers will tend to import this trait instead of /// [`AsyncRead`]. /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = [0; 10]; /// /// // The `read` method is defined by this trait. /// let n = f.read(&mut buffer[..]).await?; /// /// Ok(()) /// } /// # } /// ``` /// /// See [module][crate::io] documentation for more details. /// /// [`AsyncRead`]: AsyncRead pub trait AsyncReadExt: AsyncRead { /// Creates a new `AsyncRead` instance that chains this stream with /// `next`. /// /// The returned `AsyncRead` instance will first read all bytes from this object /// until EOF is encountered. Afterwards the output is equivalent to the /// output of `next`. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `AsyncRead`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let f1 = File::open("foo.txt").await?; /// let f2 = File::open("bar.txt").await?; /// /// let mut handle = f1.chain(f2); /// let mut buffer = String::new(); /// /// // read the value into a String. We could use any AsyncRead /// // method here, this is just one example. /// handle.read_to_string(&mut buffer).await?; /// Ok(()) /// } /// # } /// ``` fn chain<R>(self, next: R) -> Chain<Self, R> where Self: Sized, R: AsyncRead, { chain(self, next) } /// Pulls some bytes from this source into the specified buffer, /// returning how many bytes were read. /// /// Equivalent to: /// /// ```ignore /// async fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>; /// ``` /// /// This method does not provide any guarantees about whether it /// completes immediately or asynchronously. /// /// # Return /// /// If the return value of this method is `Ok(n)`, then it must be /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates /// that the buffer `buf` has been filled in with `n` bytes of data from /// this source. If `n` is `0`, then it can indicate one of two /// scenarios: /// /// 1. This reader has reached its "end of file" and will likely no longer /// be able to produce bytes. Note that this does not mean that the /// reader will *always* no longer be able to produce bytes. /// 2. The buffer specified was 0 bytes in length. /// /// No guarantees are provided about the contents of `buf` when this /// function is called, implementations cannot rely on any property of the /// contents of `buf` being true. It is recommended that *implementations* /// only write data to `buf` instead of reading its contents. /// /// Correspondingly, however, *callers* of this method may not assume /// any guarantees about how the implementation uses `buf`. It is /// possible that the code that's supposed to write to the buffer might /// also read from it. It is your responsibility to make sure that `buf` /// is initialized before calling `read`. /// /// # Errors /// /// If this function encounters any form of I/O or other error, an error /// variant will be returned. If an error is returned then it must be /// guaranteed that no bytes were read. /// /// # Cancel safety /// /// This method is cancel safe. If you use it as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no data was read. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `Read`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = [0; 10]; /// /// // read up to 10 bytes /// let n = f.read(&mut buffer[..]).await?; /// /// println!("The bytes: {:?}", &buffer[..n]); /// Ok(()) /// } /// # } /// ``` fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self> where Self: Unpin, { read(self, buf) } /// Pulls some bytes from this source into the specified buffer, /// advancing the buffer's internal cursor. /// /// Equivalent to: /// /// ```ignore /// async fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> io::Result<usize>; /// ``` /// /// Usually, only a single `read` syscall is issued, even if there is /// more space in the supplied buffer. /// /// This method does not provide any guarantees about whether it /// completes immediately or asynchronously. /// /// # Return /// /// A nonzero `n` value indicates that the buffer `buf` has been filled /// in with `n` bytes of data from this source. If `n` is `0`, then it /// can indicate one of two scenarios: /// /// 1. This reader has reached its "end of file" and will likely no longer /// be able to produce bytes. Note that this does not mean that the /// reader will *always* no longer be able to produce bytes. /// 2. The buffer specified had a remaining capacity of zero. /// /// # Errors /// /// If this function encounters any form of I/O or other error, an error /// variant will be returned. If an error is returned then it must be /// guaranteed that no bytes were read. /// /// # Cancel safety /// /// This method is cancel safe. If you use it as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no data was read. /// /// # Examples /// /// [`File`] implements `Read` and [`BytesMut`] implements [`BufMut`]: /// /// [`File`]: crate::fs::File /// [`BytesMut`]: bytes::BytesMut /// [`BufMut`]: bytes::BufMut /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// use bytes::BytesMut; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = BytesMut::with_capacity(10); /// /// assert!(buffer.is_empty()); /// assert!(buffer.capacity() >= 10); /// /// // note that the return value is not needed to access the data /// // that was read as `buffer`'s internal cursor is updated. /// // /// // this might read more than 10 bytes if the capacity of `buffer` /// // is larger than 10. /// f.read_buf(&mut buffer).await?; /// /// println!("The bytes: {:?}", &buffer[..]); /// Ok(()) /// } /// # } /// ``` fn read_buf<'a, B>(&'a mut self, buf: &'a mut B) -> ReadBuf<'a, Self, B> where Self: Unpin, B: BufMut + ?Sized, { read_buf(self, buf) } /// Reads the exact number of bytes required to fill `buf`. /// /// Equivalent to: /// /// ```ignore /// async fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<usize>; /// ``` /// /// This function reads as many bytes as necessary to completely fill /// the specified buffer `buf`. /// /// # Errors /// /// If the operation encounters an "end of file" before completely /// filling the buffer, it returns an error of the kind /// [`ErrorKind::UnexpectedEof`]. The contents of `buf` are unspecified /// in this case. /// /// If any other read error is encountered then the operation /// immediately returns. The contents of `buf` are unspecified in this /// case. /// /// If this operation returns an error, it is unspecified how many bytes /// it has read, but it will never read more than would be necessary to /// completely fill the buffer. /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may already have been /// read into `buf`. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `Read`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let len = 10; /// let mut buffer = vec![0; len]; /// /// // read exactly 10 bytes /// f.read_exact(&mut buffer).await?; /// Ok(()) /// } /// # } /// ``` /// /// [`ErrorKind::UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self> where Self: Unpin, { read_exact(self, buf) } read_impl! { /// Reads an unsigned 8 bit integer from the underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u8(&mut self) -> io::Result<u8>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is cancel safe. If this method is used as an event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, it is guaranteed that no data were read. /// /// # Examples /// /// Read unsigned 8 bit integers from an `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![2, 5]); /// /// assert_eq!(2, reader.read_u8().await?); /// assert_eq!(5, reader.read_u8().await?); /// /// Ok(()) /// # } /// ``` fn read_u8(&mut self) -> ReadU8; /// Reads a signed 8 bit integer from the underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i8(&mut self) -> io::Result<i8>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is cancel safe. If this method is used as an event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, it is guaranteed that no data were read. /// /// # Examples /// /// Read unsigned 8 bit integers from an `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x02, 0xfb]); /// /// assert_eq!(2, reader.read_i8().await?); /// assert_eq!(-5, reader.read_i8().await?); /// /// Ok(()) /// # } /// ``` fn read_i8(&mut self) -> ReadI8; /// Reads an unsigned 16-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u16(&mut self) -> io::Result<u16>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 16 bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); /// /// assert_eq!(517, reader.read_u16().await?); /// assert_eq!(768, reader.read_u16().await?); /// Ok(()) /// # } /// ``` fn read_u16(&mut self) -> ReadU16; /// Reads a signed 16-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i16(&mut self) -> io::Result<i16>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 16 bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); /// /// assert_eq!(193, reader.read_i16().await?); /// assert_eq!(-132, reader.read_i16().await?); /// Ok(()) /// # } /// ``` fn read_i16(&mut self) -> ReadI16; /// Reads an unsigned 32-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u32(&mut self) -> io::Result<u32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 32-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); /// /// assert_eq!(267, reader.read_u32().await?); /// Ok(()) /// # } /// ``` fn read_u32(&mut self) -> ReadU32; /// Reads a signed 32-bit integer in big-endian order from the /// underlying reader. /// /// /// Equivalent to: /// /// ```ignore /// async fn read_i32(&mut self) -> io::Result<i32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 32-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); /// /// assert_eq!(-34253, reader.read_i32().await?); /// Ok(()) /// # } /// ``` fn read_i32(&mut self) -> ReadI32; /// Reads an unsigned 64-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u64(&mut self) -> io::Result<u64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 64-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 /// ]); /// /// assert_eq!(918733457491587, reader.read_u64().await?); /// Ok(()) /// # } /// ``` fn read_u64(&mut self) -> ReadU64; /// Reads an signed 64-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i64(&mut self) -> io::Result<i64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 64-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); /// /// assert_eq!(i64::MIN, reader.read_i64().await?); /// Ok(()) /// # } /// ``` fn read_i64(&mut self) -> ReadI64; /// Reads an unsigned 128-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u128(&mut self) -> io::Result<u128>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 128-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 /// ]); /// /// assert_eq!(16947640962301618749969007319746179, reader.read_u128().await?); /// Ok(()) /// # } /// ``` fn read_u128(&mut self) -> ReadU128; /// Reads an signed 128-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i128(&mut self) -> io::Result<i128>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 128-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x80, 0, 0, 0, 0, 0, 0, 0, /// 0, 0, 0, 0, 0, 0, 0, 0 /// ]); /// /// assert_eq!(i128::MIN, reader.read_i128().await?); /// Ok(()) /// # } /// ``` fn read_i128(&mut self) -> ReadI128; /// Reads an 32-bit floating point type in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_f32(&mut self) -> io::Result<f32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read 32-bit floating point type from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0xff, 0x7f, 0xff, 0xff]); /// /// assert_eq!(f32::MIN, reader.read_f32().await?); /// Ok(()) /// # } /// ``` fn read_f32(&mut self) -> ReadF32; /// Reads an 64-bit floating point type in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_f64(&mut self) -> io::Result<f64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read 64-bit floating point type from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff /// ]); /// /// assert_eq!(f64::MIN, reader.read_f64().await?); /// Ok(()) /// # } /// ``` fn read_f64(&mut self) -> ReadF64; /// Reads an unsigned 16-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u16_le(&mut self) -> io::Result<u16>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/mem.rs
tokio/src/io/util/mem.rs
//! In-process memory IO types. use crate::io::{split, AsyncRead, AsyncWrite, ReadBuf, ReadHalf, WriteHalf}; use crate::loom::sync::Mutex; use bytes::{Buf, BytesMut}; use std::{ pin::Pin, sync::Arc, task::{self, ready, Poll, Waker}, }; /// A bidirectional pipe to read and write bytes in memory. /// /// A pair of `DuplexStream`s are created together, and they act as a "channel" /// that can be used as in-memory IO types. Writing to one of the pairs will /// allow that data to be read from the other, and vice versa. /// /// # Closing a `DuplexStream` /// /// If one end of the `DuplexStream` channel is dropped, any pending reads on /// the other side will continue to read data until the buffer is drained, then /// they will signal EOF by returning 0 bytes. Any writes to the other side, /// including pending ones (that are waiting for free space in the buffer) will /// return `Err(BrokenPipe)` immediately. /// /// # Example /// /// ``` /// # async fn ex() -> std::io::Result<()> { /// # use tokio::io::{AsyncReadExt, AsyncWriteExt}; /// let (mut client, mut server) = tokio::io::duplex(64); /// /// client.write_all(b"ping").await?; /// /// let mut buf = [0u8; 4]; /// server.read_exact(&mut buf).await?; /// assert_eq!(&buf, b"ping"); /// /// server.write_all(b"pong").await?; /// /// client.read_exact(&mut buf).await?; /// assert_eq!(&buf, b"pong"); /// # Ok(()) /// # } /// ``` #[derive(Debug)] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct DuplexStream { read: Arc<Mutex<SimplexStream>>, write: Arc<Mutex<SimplexStream>>, } /// A unidirectional pipe to read and write bytes in memory. /// /// It can be constructed by [`simplex`] function which will create a pair of /// reader and writer or by calling [`SimplexStream::new_unsplit`] that will /// create a handle for both reading and writing. /// /// # Example /// /// ``` /// # async fn ex() -> std::io::Result<()> { /// # use tokio::io::{AsyncReadExt, AsyncWriteExt}; /// let (mut receiver, mut sender) = tokio::io::simplex(64); /// /// sender.write_all(b"ping").await?; /// /// let mut buf = [0u8; 4]; /// receiver.read_exact(&mut buf).await?; /// assert_eq!(&buf, b"ping"); /// # Ok(()) /// # } /// ``` #[derive(Debug)] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct SimplexStream { /// The buffer storing the bytes written, also read from. /// /// Using a `BytesMut` because it has efficient `Buf` and `BufMut` /// functionality already. Additionally, it can try to copy data in the /// same buffer if there read index has advanced far enough. buffer: BytesMut, /// Determines if the write side has been closed. is_closed: bool, /// The maximum amount of bytes that can be written before returning /// `Poll::Pending`. max_buf_size: usize, /// If the `read` side has been polled and is pending, this is the waker /// for that parked task. read_waker: Option<Waker>, /// If the `write` side has filled the `max_buf_size` and returned /// `Poll::Pending`, this is the waker for that parked task. write_waker: Option<Waker>, } // ===== impl DuplexStream ===== /// Create a new pair of `DuplexStream`s that act like a pair of connected sockets. /// /// The `max_buf_size` argument is the maximum amount of bytes that can be /// written to a side before the write returns `Poll::Pending`. #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub fn duplex(max_buf_size: usize) -> (DuplexStream, DuplexStream) { let one = Arc::new(Mutex::new(SimplexStream::new_unsplit(max_buf_size))); let two = Arc::new(Mutex::new(SimplexStream::new_unsplit(max_buf_size))); ( DuplexStream { read: one.clone(), write: two.clone(), }, DuplexStream { read: two, write: one, }, ) } impl AsyncRead for DuplexStream { // Previous rustc required this `self` to be `mut`, even though newer // versions recognize it isn't needed to call `lock()`. So for // compatibility, we include the `mut` and `allow` the lint. // // See https://github.com/rust-lang/rust/issues/73592 #[allow(unused_mut)] fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<std::io::Result<()>> { Pin::new(&mut *self.read.lock()).poll_read(cx, buf) } } impl AsyncWrite for DuplexStream { #[allow(unused_mut)] fn poll_write( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { Pin::new(&mut *self.write.lock()).poll_write(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut task::Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll<Result<usize, std::io::Error>> { Pin::new(&mut *self.write.lock()).poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { true } #[allow(unused_mut)] fn poll_flush( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll<std::io::Result<()>> { Pin::new(&mut *self.write.lock()).poll_flush(cx) } #[allow(unused_mut)] fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll<std::io::Result<()>> { Pin::new(&mut *self.write.lock()).poll_shutdown(cx) } } impl Drop for DuplexStream { fn drop(&mut self) { // notify the other side of the closure self.write.lock().close_write(); self.read.lock().close_read(); } } // ===== impl SimplexStream ===== /// Creates unidirectional buffer that acts like in memory pipe. /// /// The `max_buf_size` argument is the maximum amount of bytes that can be /// written to a buffer before the it returns `Poll::Pending`. /// /// # Unify reader and writer /// /// The reader and writer half can be unified into a single structure /// of `SimplexStream` that supports both reading and writing or /// the `SimplexStream` can be already created as unified structure /// using [`SimplexStream::new_unsplit()`]. /// /// ``` /// # async fn ex() -> std::io::Result<()> { /// # use tokio::io::{AsyncReadExt, AsyncWriteExt}; /// let (reader, writer) = tokio::io::simplex(64); /// let mut simplex_stream = reader.unsplit(writer); /// simplex_stream.write_all(b"hello").await?; /// /// let mut buf = [0u8; 5]; /// simplex_stream.read_exact(&mut buf).await?; /// assert_eq!(&buf, b"hello"); /// # Ok(()) /// # } /// ``` #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub fn simplex(max_buf_size: usize) -> (ReadHalf<SimplexStream>, WriteHalf<SimplexStream>) { split(SimplexStream::new_unsplit(max_buf_size)) } impl SimplexStream { /// Creates unidirectional buffer that acts like in memory pipe. To create split /// version with separate reader and writer you can use [`simplex`] function. /// /// The `max_buf_size` argument is the maximum amount of bytes that can be /// written to a buffer before the it returns `Poll::Pending`. #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub fn new_unsplit(max_buf_size: usize) -> SimplexStream { SimplexStream { buffer: BytesMut::new(), is_closed: false, max_buf_size, read_waker: None, write_waker: None, } } fn close_write(&mut self) { self.is_closed = true; // needs to notify any readers that no more data will come if let Some(waker) = self.read_waker.take() { waker.wake(); } } fn close_read(&mut self) { self.is_closed = true; // needs to notify any writers that they have to abort if let Some(waker) = self.write_waker.take() { waker.wake(); } } fn poll_read_internal( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<std::io::Result<()>> { if self.buffer.has_remaining() { let max = self.buffer.remaining().min(buf.remaining()); buf.put_slice(&self.buffer[..max]); self.buffer.advance(max); if max > 0 { // The passed `buf` might have been empty, don't wake up if // no bytes have been moved. if let Some(waker) = self.write_waker.take() { waker.wake(); } } Poll::Ready(Ok(())) } else if self.is_closed { Poll::Ready(Ok(())) } else { self.read_waker = Some(cx.waker().clone()); Poll::Pending } } fn poll_write_internal( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { if self.is_closed { return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())); } let avail = self.max_buf_size - self.buffer.len(); if avail == 0 { self.write_waker = Some(cx.waker().clone()); return Poll::Pending; } let len = buf.len().min(avail); self.buffer.extend_from_slice(&buf[..len]); if let Some(waker) = self.read_waker.take() { waker.wake(); } Poll::Ready(Ok(len)) } fn poll_write_vectored_internal( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll<Result<usize, std::io::Error>> { if self.is_closed { return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())); } let avail = self.max_buf_size - self.buffer.len(); if avail == 0 { self.write_waker = Some(cx.waker().clone()); return Poll::Pending; } let mut rem = avail; for buf in bufs { if rem == 0 { break; } let len = buf.len().min(rem); self.buffer.extend_from_slice(&buf[..len]); rem -= len; } if let Some(waker) = self.read_waker.take() { waker.wake(); } Poll::Ready(Ok(avail - rem)) } } impl AsyncRead for SimplexStream { cfg_coop! { fn poll_read( self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<std::io::Result<()>> { ready!(crate::trace::trace_leaf(cx)); let coop = ready!(crate::task::coop::poll_proceed(cx)); let ret = self.poll_read_internal(cx, buf); if ret.is_ready() { coop.made_progress(); } ret } } cfg_not_coop! { fn poll_read( self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<std::io::Result<()>> { ready!(crate::trace::trace_leaf(cx)); self.poll_read_internal(cx, buf) } } } impl AsyncWrite for SimplexStream { cfg_coop! { fn poll_write( self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { ready!(crate::trace::trace_leaf(cx)); let coop = ready!(crate::task::coop::poll_proceed(cx)); let ret = self.poll_write_internal(cx, buf); if ret.is_ready() { coop.made_progress(); } ret } } cfg_not_coop! { fn poll_write( self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { ready!(crate::trace::trace_leaf(cx)); self.poll_write_internal(cx, buf) } } cfg_coop! { fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut task::Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll<Result<usize, std::io::Error>> { ready!(crate::trace::trace_leaf(cx)); let coop = ready!(crate::task::coop::poll_proceed(cx)); let ret = self.poll_write_vectored_internal(cx, bufs); if ret.is_ready() { coop.made_progress(); } ret } } cfg_not_coop! { fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut task::Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll<Result<usize, std::io::Error>> { ready!(crate::trace::trace_leaf(cx)); self.poll_write_vectored_internal(cx, bufs) } } fn is_write_vectored(&self) -> bool { true } fn poll_flush(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<std::io::Result<()>> { Poll::Ready(Ok(())) } fn poll_shutdown( mut self: Pin<&mut Self>, _: &mut task::Context<'_>, ) -> Poll<std::io::Result<()>> { self.close_write(); Poll::Ready(Ok(())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/lines.rs
tokio/src/io/util/lines.rs
use crate::io::util::read_line::read_line_internal; use crate::io::AsyncBufRead; use pin_project_lite::pin_project; use std::io; use std::mem; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// Reads lines from an [`AsyncBufRead`]. /// /// A `Lines` can be turned into a `Stream` with [`LinesStream`]. /// /// This type is usually created using the [`lines`] method. /// /// [`AsyncBufRead`]: crate::io::AsyncBufRead /// [`LinesStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.LinesStream.html /// [`lines`]: crate::io::AsyncBufReadExt::lines #[derive(Debug)] #[must_use = "streams do nothing unless polled"] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct Lines<R> { #[pin] reader: R, buf: String, bytes: Vec<u8>, read: usize, } } pub(crate) fn lines<R>(reader: R) -> Lines<R> where R: AsyncBufRead, { Lines { reader, buf: String::new(), bytes: Vec::new(), read: 0, } } impl<R> Lines<R> where R: AsyncBufRead + Unpin, { /// Returns the next line in the stream. /// /// # Cancel safety /// /// This method is cancellation safe. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncBufRead; /// use tokio::io::AsyncBufReadExt; /// /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { /// let mut lines = my_buf_read.lines(); /// /// while let Some(line) = lines.next_line().await? { /// println!("length = {}", line.len()) /// } /// # Ok(()) /// # } /// ``` pub async fn next_line(&mut self) -> io::Result<Option<String>> { use std::future::poll_fn; poll_fn(|cx| Pin::new(&mut *self).poll_next_line(cx)).await } /// Obtains a mutable reference to the underlying reader. pub fn get_mut(&mut self) -> &mut R { &mut self.reader } /// Obtains a reference to the underlying reader. pub fn get_ref(&mut self) -> &R { &self.reader } /// Unwraps this `Lines<R>`, returning the underlying reader. /// /// Note that any leftover data in the internal buffer is lost. /// Therefore, a following read from the underlying reader may lead to data loss. pub fn into_inner(self) -> R { self.reader } } impl<R> Lines<R> where R: AsyncBufRead, { /// Polls for the next line in the stream. /// /// This method returns: /// /// * `Poll::Pending` if the next line is not yet available. /// * `Poll::Ready(Ok(Some(line)))` if the next line is available. /// * `Poll::Ready(Ok(None))` if there are no more lines in this stream. /// * `Poll::Ready(Err(err))` if an IO error occurred while reading the next line. /// /// When the method returns `Poll::Pending`, the `Waker` in the provided /// `Context` is scheduled to receive a wakeup when more bytes become /// available on the underlying IO resource. Note that on multiple calls to /// `poll_next_line`, only the `Waker` from the `Context` passed to the most /// recent call is scheduled to receive a wakeup. pub fn poll_next_line( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<io::Result<Option<String>>> { let me = self.project(); let n = ready!(read_line_internal(me.reader, cx, me.buf, me.bytes, me.read))?; debug_assert_eq!(*me.read, 0); if n == 0 && me.buf.is_empty() { return Poll::Ready(Ok(None)); } if me.buf.ends_with('\n') { me.buf.pop(); if me.buf.ends_with('\r') { me.buf.pop(); } } Poll::Ready(Ok(Some(mem::take(me.buf)))) } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<Lines<()>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read.rs
tokio/src/io/util/read.rs
use crate::io::{AsyncRead, ReadBuf}; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::marker::Unpin; use std::pin::Pin; use std::task::{ready, Context, Poll}; /// Tries to read some bytes directly into the given `buf` in asynchronous /// manner, returning a future type. /// /// The returned future will resolve to both the I/O stream and the buffer /// as well as the number of bytes read once the read operation is completed. pub(crate) fn read<'a, R>(reader: &'a mut R, buf: &'a mut [u8]) -> Read<'a, R> where R: AsyncRead + Unpin + ?Sized, { Read { reader, buf, _pin: PhantomPinned, } } pin_project! { /// A future which can be used to easily read available number of bytes to fill /// a buffer. /// /// Created by the [`read`] function. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Read<'a, R: ?Sized> { reader: &'a mut R, buf: &'a mut [u8], // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } impl<R> Future for Read<'_, R> where R: AsyncRead + Unpin + ?Sized, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { let me = self.project(); let mut buf = ReadBuf::new(me.buf); ready!(Pin::new(me.reader).poll_read(cx, &mut buf))?; Poll::Ready(Ok(buf.filled().len())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read_to_end.rs
tokio/src/io/util/read_to_end.rs
use crate::io::util::vec_with_initialized::{into_read_buf_parts, VecU8, VecWithInitialized}; use crate::io::{AsyncRead, ReadBuf}; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::mem::{self, MaybeUninit}; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct ReadToEnd<'a, R: ?Sized> { reader: &'a mut R, buf: VecWithInitialized<&'a mut Vec<u8>>, // The number of bytes appended to buf. This can be less than buf.len() if // the buffer was not empty when the operation was started. read: usize, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } pub(crate) fn read_to_end<'a, R>(reader: &'a mut R, buffer: &'a mut Vec<u8>) -> ReadToEnd<'a, R> where R: AsyncRead + Unpin + ?Sized, { ReadToEnd { reader, buf: VecWithInitialized::new(buffer), read: 0, _pin: PhantomPinned, } } pub(super) fn read_to_end_internal<V: VecU8, R: AsyncRead + ?Sized>( buf: &mut VecWithInitialized<V>, mut reader: Pin<&mut R>, num_read: &mut usize, cx: &mut Context<'_>, ) -> Poll<io::Result<usize>> { loop { let ret = ready!(poll_read_to_end(buf, reader.as_mut(), cx)); match ret { Err(err) => return Poll::Ready(Err(err)), Ok(0) => return Poll::Ready(Ok(mem::replace(num_read, 0))), Ok(num) => { *num_read += num; } } } } /// Tries to read from the provided [`AsyncRead`]. /// /// The length of the buffer is increased by the number of bytes read. fn poll_read_to_end<V: VecU8, R: AsyncRead + ?Sized>( buf: &mut VecWithInitialized<V>, read: Pin<&mut R>, cx: &mut Context<'_>, ) -> Poll<io::Result<usize>> { // This uses an adaptive system to extend the vector when it fills. We want to // avoid paying to allocate and zero a huge chunk of memory if the reader only // has 4 bytes while still making large reads if the reader does have a ton // of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every // time is 4,500 times (!) slower than this if the reader has a very small // amount of data to return. When the vector is full with its starting // capacity, we first try to read into a small buffer to see if we reached // an EOF. This only happens when the starting capacity is >= NUM_BYTES, since // we allocate at least NUM_BYTES each time. This avoids the unnecessary // allocation that we attempt before reading into the vector. const NUM_BYTES: usize = 32; let try_small_read = buf.try_small_read_first(NUM_BYTES); // Get a ReadBuf into the vector. let mut read_buf; let poll_result; let n = if try_small_read { // Read some bytes using a small read. let mut small_buf: [MaybeUninit<u8>; NUM_BYTES] = [MaybeUninit::uninit(); NUM_BYTES]; let mut small_read_buf = ReadBuf::uninit(&mut small_buf); poll_result = read.poll_read(cx, &mut small_read_buf); let to_write = small_read_buf.filled(); // Ensure we have enough space to fill our vector with what we read. read_buf = buf.get_read_buf(); if to_write.len() > read_buf.remaining() { buf.reserve(NUM_BYTES); read_buf = buf.get_read_buf(); } read_buf.put_slice(to_write); to_write.len() } else { // Ensure we have enough space for reading. buf.reserve(NUM_BYTES); read_buf = buf.get_read_buf(); // Read data directly into vector. let filled_before = read_buf.filled().len(); poll_result = read.poll_read(cx, &mut read_buf); // Compute the number of bytes read. read_buf.filled().len() - filled_before }; // Update the length of the vector using the result of poll_read. let read_buf_parts = into_read_buf_parts(read_buf); buf.apply_read_buf(read_buf_parts); match poll_result { Poll::Pending => { // In this case, nothing should have been read. However we still // update the vector in case the poll_read call initialized parts of // the vector's unused capacity. debug_assert_eq!(n, 0); Poll::Pending } Poll::Ready(Err(err)) => { debug_assert_eq!(n, 0); Poll::Ready(Err(err)) } Poll::Ready(Ok(())) => Poll::Ready(Ok(n)), } } impl<A> Future for ReadToEnd<'_, A> where A: AsyncRead + ?Sized + Unpin, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); read_to_end_internal(me.buf, Pin::new(*me.reader), me.read, cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/fill_buf.rs
tokio/src/io/util/fill_buf.rs
use crate::io::AsyncBufRead; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// Future for the [`fill_buf`](crate::io::AsyncBufReadExt::fill_buf) method. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct FillBuf<'a, R: ?Sized> { reader: Option<&'a mut R>, #[pin] _pin: PhantomPinned, } } pub(crate) fn fill_buf<R>(reader: &mut R) -> FillBuf<'_, R> where R: AsyncBufRead + ?Sized + Unpin, { FillBuf { reader: Some(reader), _pin: PhantomPinned, } } impl<'a, R: AsyncBufRead + ?Sized + Unpin> Future for FillBuf<'a, R> { type Output = io::Result<&'a [u8]>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); let reader = me.reader.take().expect("Polled after completion."); match Pin::new(&mut *reader).poll_fill_buf(cx) { Poll::Ready(Ok(slice)) => unsafe { // Safety: This is necessary only due to a limitation in the // borrow checker. Once Rust starts using the polonius borrow // checker, this can be simplified. // // The safety of this transmute relies on the fact that the // value of `reader` is `None` when we return in this branch. // Otherwise the caller could poll us again after // completion, and access the mutable reference while the // returned immutable reference still exists. let slice = std::mem::transmute::<&[u8], &'a [u8]>(slice); Poll::Ready(Ok(slice)) }, Poll::Ready(Err(err)) => Poll::Ready(Err(err)), Poll::Pending => { *me.reader = Some(reader); Poll::Pending } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/write_all.rs
tokio/src/io/util/write_all.rs
use crate::io::AsyncWrite; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::mem; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct WriteAll<'a, W: ?Sized> { writer: &'a mut W, buf: &'a [u8], // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } pub(crate) fn write_all<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> WriteAll<'a, W> where W: AsyncWrite + Unpin + ?Sized, { WriteAll { writer, buf, _pin: PhantomPinned, } } impl<W> Future for WriteAll<'_, W> where W: AsyncWrite + Unpin + ?Sized, { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { let me = self.project(); while !me.buf.is_empty() { let n = ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf))?; { let (_, rest) = mem::take(&mut *me.buf).split_at(n); *me.buf = rest; } if n == 0 { return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); } } Poll::Ready(Ok(())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/read_buf.rs
tokio/src/io/util/read_buf.rs
use crate::io::AsyncRead; use bytes::BufMut; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{ready, Context, Poll}; pub(crate) fn read_buf<'a, R, B>(reader: &'a mut R, buf: &'a mut B) -> ReadBuf<'a, R, B> where R: AsyncRead + Unpin + ?Sized, B: BufMut + ?Sized, { ReadBuf { reader, buf, _pin: PhantomPinned, } } pin_project! { /// Future returned by [`read_buf`](crate::io::AsyncReadExt::read_buf). #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct ReadBuf<'a, R: ?Sized, B: ?Sized> { reader: &'a mut R, buf: &'a mut B, #[pin] _pin: PhantomPinned, } } impl<R, B> Future for ReadBuf<'_, R, B> where R: AsyncRead + Unpin + ?Sized, B: BufMut + ?Sized, { type Output = io::Result<usize>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { use crate::io::ReadBuf; let me = self.project(); if !me.buf.has_remaining_mut() { return Poll::Ready(Ok(0)); } let n = { let dst = me.buf.chunk_mut(); let dst = unsafe { dst.as_uninit_slice_mut() }; let mut buf = ReadBuf::uninit(dst); let ptr = buf.filled().as_ptr(); ready!(Pin::new(me.reader).poll_read(cx, &mut buf)?); // Ensure the pointer does not change from under us assert_eq!(ptr, buf.filled().as_ptr()); buf.filled().len() }; // Safety: This is guaranteed to be the number of initialized (and read) // bytes due to the invariants provided by `ReadBuf::filled`. unsafe { me.buf.advance_mut(n); } Poll::Ready(Ok(n)) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/async_seek_ext.rs
tokio/src/io/util/async_seek_ext.rs
use crate::io::seek::{seek, Seek}; use crate::io::AsyncSeek; use std::io::SeekFrom; cfg_io_util! { /// An extension trait that adds utility methods to [`AsyncSeek`] types. /// /// # Examples /// /// ``` /// use std::io::{self, Cursor, SeekFrom}; /// use tokio::io::{AsyncSeekExt, AsyncReadExt}; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut cursor = Cursor::new(b"abcdefg"); /// /// // the `seek` method is defined by this trait /// cursor.seek(SeekFrom::Start(3)).await?; /// /// let mut buf = [0; 1]; /// let n = cursor.read(&mut buf).await?; /// assert_eq!(n, 1); /// assert_eq!(buf, [b'd']); /// /// Ok(()) /// # } /// ``` /// /// See [module][crate::io] documentation for more details. /// /// [`AsyncSeek`]: AsyncSeek pub trait AsyncSeekExt: AsyncSeek { /// Creates a future which will seek an IO object, and then yield the /// new position in the object and the object itself. /// /// Equivalent to: /// /// ```ignore /// async fn seek(&mut self, pos: SeekFrom) -> io::Result<u64>; /// ``` /// /// In the case of an error the buffer and the object will be discarded, with /// the error yielded. /// /// # Examples /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{AsyncSeekExt, AsyncReadExt}; /// /// use std::io::SeekFrom; /// /// # async fn dox() -> std::io::Result<()> { /// let mut file = File::open("foo.txt").await?; /// file.seek(SeekFrom::Start(6)).await?; /// /// let mut contents = vec![0u8; 10]; /// file.read_exact(&mut contents).await?; /// # Ok(()) /// # } /// # } /// ``` fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self> where Self: Unpin, { seek(self, pos) } /// Creates a future which will rewind to the beginning of the stream. /// /// This is convenience method, equivalent to `self.seek(SeekFrom::Start(0))`. fn rewind(&mut self) -> Seek<'_, Self> where Self: Unpin, { self.seek(SeekFrom::Start(0)) } /// Creates a future which will return the current seek position from the /// start of the stream. /// /// This is equivalent to `self.seek(SeekFrom::Current(0))`. fn stream_position(&mut self) -> Seek<'_, Self> where Self: Unpin, { self.seek(SeekFrom::Current(0)) } } } impl<S: AsyncSeek + ?Sized> AsyncSeekExt for S {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/util/split.rs
tokio/src/io/util/split.rs
use crate::io::util::read_until::read_until_internal; use crate::io::AsyncBufRead; use pin_project_lite::pin_project; use std::io; use std::mem; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// Splitter for the [`split`](crate::io::AsyncBufReadExt::split) method. /// /// A `Split` can be turned into a `Stream` with [`SplitStream`]. /// /// [`SplitStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.SplitStream.html #[derive(Debug)] #[must_use = "streams do nothing unless polled"] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] pub struct Split<R> { #[pin] reader: R, buf: Vec<u8>, delim: u8, read: usize, } } pub(crate) fn split<R>(reader: R, delim: u8) -> Split<R> where R: AsyncBufRead, { Split { reader, buf: Vec::new(), delim, read: 0, } } impl<R> Split<R> where R: AsyncBufRead + Unpin, { /// Returns the next segment in the stream. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncBufRead; /// use tokio::io::AsyncBufReadExt; /// /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { /// let mut segments = my_buf_read.split(b'f'); /// /// while let Some(segment) = segments.next_segment().await? { /// println!("length = {}", segment.len()) /// } /// # Ok(()) /// # } /// ``` pub async fn next_segment(&mut self) -> io::Result<Option<Vec<u8>>> { use std::future::poll_fn; poll_fn(|cx| Pin::new(&mut *self).poll_next_segment(cx)).await } } impl<R> Split<R> where R: AsyncBufRead, { /// Polls for the next segment in the stream. /// /// This method returns: /// /// * `Poll::Pending` if the next segment is not yet available. /// * `Poll::Ready(Ok(Some(segment)))` if the next segment is available. /// * `Poll::Ready(Ok(None))` if there are no more segments in this stream. /// * `Poll::Ready(Err(err))` if an IO error occurred while reading the /// next segment. /// /// When the method returns `Poll::Pending`, the `Waker` in the provided /// `Context` is scheduled to receive a wakeup when more bytes become /// available on the underlying IO resource. /// /// Note that on multiple calls to `poll_next_segment`, only the `Waker` /// from the `Context` passed to the most recent call is scheduled to /// receive a wakeup. pub fn poll_next_segment( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<io::Result<Option<Vec<u8>>>> { let me = self.project(); let n = ready!(read_until_internal( me.reader, cx, *me.delim, me.buf, me.read, ))?; // read_until_internal resets me.read to zero once it finds the delimiter debug_assert_eq!(*me.read, 0); if n == 0 && me.buf.is_empty() { return Poll::Ready(Ok(None)); } if me.buf.last() == Some(me.delim) { me.buf.pop(); } Poll::Ready(Ok(Some(mem::take(me.buf)))) } } #[cfg(test)] mod tests { use super::*; #[test] fn assert_unpin() { crate::is_unpin::<Split<()>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/uring/open.rs
tokio/src/io/uring/open.rs
use super::utils::cstr; use crate::fs::UringOpenOptions; use crate::runtime::driver::op::{CancelData, Cancellable, Completable, CqeResult, Op}; use io_uring::{opcode, types}; use std::ffi::CString; use std::io::{self, Error}; use std::os::fd::FromRawFd; use std::path::Path; #[derive(Debug)] pub(crate) struct Open { /// This field will be read by the kernel during the operation, so we /// need to ensure it is valid for the entire duration of the operation. #[allow(dead_code)] path: CString, } impl Completable for Open { type Output = io::Result<crate::fs::File>; fn complete(self, cqe: CqeResult) -> Self::Output { cqe.result .map(|fd| unsafe { crate::fs::File::from_raw_fd(fd as i32) }) } fn complete_with_error(self, err: Error) -> Self::Output { Err(err) } } impl Cancellable for Open { fn cancel(self) -> CancelData { CancelData::Open(self) } } impl Op<Open> { /// Submit a request to open a file. pub(crate) fn open(path: &Path, options: &UringOpenOptions) -> io::Result<Op<Open>> { let inner_opt = options; let path = cstr(path)?; let custom_flags = inner_opt.custom_flags; let flags = libc::O_CLOEXEC | options.access_mode()? | options.creation_mode()? | (custom_flags & !libc::O_ACCMODE); let open_op = opcode::OpenAt::new(types::Fd(libc::AT_FDCWD), path.as_ptr()) .flags(flags) .mode(inner_opt.mode) .build(); // SAFETY: Parameters are valid for the entire duration of the operation let op = unsafe { Op::new(open_op, Open { path }) }; Ok(op) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/uring/write.rs
tokio/src/io/uring/write.rs
use crate::runtime::driver::op::{CancelData, Cancellable, Completable, CqeResult, Op}; use crate::util::as_ref::OwnedBuf; use io_uring::{opcode, types}; use std::io::{self, Error}; use std::os::fd::{AsRawFd, OwnedFd}; #[derive(Debug)] pub(crate) struct Write { buf: OwnedBuf, fd: OwnedFd, } impl Completable for Write { type Output = (io::Result<u32>, OwnedBuf, OwnedFd); fn complete(self, cqe: CqeResult) -> Self::Output { (cqe.result, self.buf, self.fd) } fn complete_with_error(self, err: Error) -> Self::Output { (Err(err), self.buf, self.fd) } } impl Cancellable for Write { fn cancel(self) -> CancelData { CancelData::Write(self) } } impl Op<Write> { /// Issue a write that starts at `buf_offset` within `buf` and writes some bytes /// into `file` at `file_offset`. pub(crate) fn write_at( fd: OwnedFd, buf: OwnedBuf, buf_offset: usize, file_offset: u64, ) -> io::Result<Self> { // There is a cap on how many bytes we can write in a single uring write operation. // ref: https://github.com/axboe/liburing/discussions/497 let len = u32::try_from(buf.as_ref().len() - buf_offset).unwrap_or(u32::MAX); let ptr = buf.as_ref()[buf_offset..buf_offset + len as usize].as_ptr(); let sqe = opcode::Write::new(types::Fd(fd.as_raw_fd()), ptr, len) .offset(file_offset) .build(); // SAFETY: parameters of the entry, such as `fd` and `buf`, are valid // until this operation completes. let op = unsafe { Op::new(sqe, Write { buf, fd }) }; Ok(op) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/uring/utils.rs
tokio/src/io/uring/utils.rs
use std::os::unix::ffi::OsStrExt; use std::{ffi::CString, io, path::Path}; pub(crate) fn cstr(p: &Path) -> io::Result<CString> { Ok(CString::new(p.as_os_str().as_bytes())?) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/uring/mod.rs
tokio/src/io/uring/mod.rs
pub(crate) mod open; pub(crate) mod read; pub(crate) mod utils; pub(crate) mod write;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/uring/read.rs
tokio/src/io/uring/read.rs
use crate::runtime::driver::op::{CancelData, Cancellable, Completable, CqeResult, Op}; use io_uring::{opcode, types}; use std::io::{self, Error}; use std::os::fd::{AsRawFd, OwnedFd}; #[derive(Debug)] pub(crate) struct Read { fd: OwnedFd, buf: Vec<u8>, } impl Completable for Read { type Output = (io::Result<u32>, OwnedFd, Vec<u8>); fn complete(self, cqe: CqeResult) -> Self::Output { let mut buf = self.buf; if let Ok(len) = cqe.result { let new_len = buf.len() + len as usize; // SAFETY: Kernel read len bytes unsafe { buf.set_len(new_len) }; } (cqe.result, self.fd, buf) } fn complete_with_error(self, err: Error) -> Self::Output { (Err(err), self.fd, self.buf) } } impl Cancellable for Read { fn cancel(self) -> CancelData { CancelData::Read(self) } } impl Op<Read> { // Submit a request to read a FD at given length and offset into a // dynamic buffer with uninitialized memory. The read happens on uninitialized // buffer and no overwriting happens. // SAFETY: The `len` of the amount to be read and the buffer that is passed // should have capacity > len. // // If `len` read is higher than vector capacity then setting its length by // the caller in terms of size_read can be unsound. pub(crate) fn read(fd: OwnedFd, mut buf: Vec<u8>, len: u32, offset: u64) -> Self { // don't overwrite on already written part assert!(buf.spare_capacity_mut().len() >= len as usize); let buf_mut_ptr = buf.spare_capacity_mut().as_mut_ptr().cast(); let read_op = opcode::Read::new(types::Fd(fd.as_raw_fd()), buf_mut_ptr, len) .offset(offset) .build(); // SAFETY: Parameters are valid for the entire duration of the operation unsafe { Op::new(read_op, Read { fd, buf }) } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/io/bsd/poll_aio.rs
tokio/src/io/bsd/poll_aio.rs
//! Use POSIX AIO futures with Tokio. use crate::io::interest::Interest; use crate::runtime::io::{ReadyEvent, Registration}; use crate::runtime::scheduler; use mio::event::Source; use mio::Registry; use mio::Token; use std::fmt; use std::io; use std::ops::{Deref, DerefMut}; use std::os::unix::io::AsRawFd; use std::os::unix::prelude::RawFd; use std::task::{ready, Context, Poll}; /// Like [`mio::event::Source`], but for POSIX AIO only. /// /// Tokio's consumer must pass an implementor of this trait to create a /// [`Aio`] object. pub trait AioSource { /// Registers this AIO event source with Tokio's reactor. fn register(&mut self, kq: RawFd, token: usize); /// Deregisters this AIO event source with Tokio's reactor. fn deregister(&mut self); } /// Wraps the user's AioSource in order to implement mio::event::Source, which /// is what the rest of the crate wants. struct MioSource<T>(T); impl<T: AioSource> Source for MioSource<T> { fn register( &mut self, registry: &Registry, token: Token, interests: mio::Interest, ) -> io::Result<()> { assert!(interests.is_aio() || interests.is_lio()); self.0.register(registry.as_raw_fd(), usize::from(token)); Ok(()) } fn deregister(&mut self, _registry: &Registry) -> io::Result<()> { self.0.deregister(); Ok(()) } fn reregister( &mut self, registry: &Registry, token: Token, interests: mio::Interest, ) -> io::Result<()> { assert!(interests.is_aio() || interests.is_lio()); self.0.register(registry.as_raw_fd(), usize::from(token)); Ok(()) } } /// Associates a POSIX AIO control block with the reactor that drives it. /// /// `Aio`'s wrapped type must implement [`AioSource`] to be driven /// by the reactor. /// /// The wrapped source may be accessed through the `Aio` via the `Deref` and /// `DerefMut` traits. /// /// ## Clearing readiness /// /// If [`Aio::poll_ready`] returns ready, but the consumer determines that the /// Source is not completely ready and must return to the Pending state, /// [`Aio::clear_ready`] may be used. This can be useful with /// [`lio_listio`], which may generate a kevent when only a portion of the /// operations have completed. /// /// ## Platforms /// /// Only FreeBSD implements POSIX AIO with kqueue notification, so /// `Aio` is only available for that operating system. /// /// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html // Note: Unlike every other kqueue event source, POSIX AIO registers events not // via kevent(2) but when the aiocb is submitted to the kernel via aio_read, // aio_write, etc. It needs the kqueue's file descriptor to do that. So // AsyncFd can't be used for POSIX AIO. // // Note that Aio doesn't implement Drop. There's no need. Unlike other // kqueue sources, simply dropping the object effectively deregisters it. pub struct Aio<E> { io: MioSource<E>, registration: Registration, } // ===== impl Aio ===== impl<E: AioSource> Aio<E> { /// Creates a new `Aio` suitable for use with POSIX AIO functions. /// /// It will be associated with the default reactor. The runtime is usually /// set implicitly when this function is called from a future driven by a /// Tokio runtime, otherwise runtime can be set explicitly with /// [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn new_for_aio(io: E) -> io::Result<Self> { Self::new_with_interest(io, Interest::AIO) } /// Creates a new `Aio` suitable for use with [`lio_listio`]. /// /// It will be associated with the default reactor. The runtime is usually /// set implicitly when this function is called from a future driven by a /// Tokio runtime, otherwise runtime can be set explicitly with /// [`Runtime::enter`](crate::runtime::Runtime::enter) function. /// /// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html pub fn new_for_lio(io: E) -> io::Result<Self> { Self::new_with_interest(io, Interest::LIO) } fn new_with_interest(io: E, interest: Interest) -> io::Result<Self> { let mut io = MioSource(io); let handle = scheduler::Handle::current(); let registration = Registration::new_with_interest_and_handle(&mut io, interest, handle)?; Ok(Self { io, registration }) } /// Indicates to Tokio that the source is no longer ready. The internal /// readiness flag will be cleared, and tokio will wait for the next /// edge-triggered readiness notification from the OS. /// /// It is critical that this method not be called unless your code /// _actually observes_ that the source is _not_ ready. The OS must /// deliver a subsequent notification, or this source will block /// forever. It is equally critical that you `do` call this method if you /// resubmit the same structure to the kernel and poll it again. /// /// This method is not very useful with AIO readiness, since each `aiocb` /// structure is typically only used once. It's main use with /// [`lio_listio`], which will sometimes send notification when only a /// portion of its elements are complete. In that case, the caller must /// call `clear_ready` before resubmitting it. /// /// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html pub fn clear_ready(&self, ev: AioEvent) { self.registration.clear_readiness(ev.0) } /// Destroy the [`Aio`] and return its inner source. pub fn into_inner(self) -> E { self.io.0 } /// Polls for readiness. Either AIO or LIO counts. /// /// This method returns: /// * `Poll::Pending` if the underlying operation is not complete, whether /// or not it completed successfully. This will be true if the OS is /// still processing it, or if it has not yet been submitted to the OS. /// * `Poll::Ready(Ok(_))` if the underlying operation is complete. /// * `Poll::Ready(Err(_))` if the reactor has been shutdown. This does /// _not_ indicate that the underlying operation encountered an error. /// /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` /// is scheduled to receive a wakeup when the underlying operation /// completes. Note that on multiple calls to `poll_ready`, only the `Waker` from the /// `Context` passed to the most recent call is scheduled to receive a wakeup. pub fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<AioEvent>> { let ev = ready!(self.registration.poll_read_ready(cx))?; Poll::Ready(Ok(AioEvent(ev))) } } impl<E: AioSource> Deref for Aio<E> { type Target = E; fn deref(&self) -> &E { &self.io.0 } } impl<E: AioSource> DerefMut for Aio<E> { fn deref_mut(&mut self) -> &mut E { &mut self.io.0 } } impl<E: AioSource + fmt::Debug> fmt::Debug for Aio<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Aio").field("io", &self.io.0).finish() } } /// Opaque data returned by [`Aio::poll_ready`]. /// /// It can be fed back to [`Aio::clear_ready`]. #[derive(Debug)] pub struct AioEvent(ReadyEvent);
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/future/try_join.rs
tokio/src/future/try_join.rs
use crate::future::maybe_done::{maybe_done, MaybeDone}; use pin_project_lite::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; pub(crate) fn try_join3<T1, F1, T2, F2, T3, F3, E>( future1: F1, future2: F2, future3: F3, ) -> TryJoin3<F1, F2, F3> where F1: Future<Output = Result<T1, E>>, F2: Future<Output = Result<T2, E>>, F3: Future<Output = Result<T3, E>>, { TryJoin3 { future1: maybe_done(future1), future2: maybe_done(future2), future3: maybe_done(future3), } } pin_project! { pub(crate) struct TryJoin3<F1, F2, F3> where F1: Future, F2: Future, F3: Future, { #[pin] future1: MaybeDone<F1>, #[pin] future2: MaybeDone<F2>, #[pin] future3: MaybeDone<F3>, } } impl<T1, F1, T2, F2, T3, F3, E> Future for TryJoin3<F1, F2, F3> where F1: Future<Output = Result<T1, E>>, F2: Future<Output = Result<T2, E>>, F3: Future<Output = Result<T3, E>>, { type Output = Result<(T1, T2, T3), E>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut all_done = true; let mut me = self.project(); if me.future1.as_mut().poll(cx).is_pending() { all_done = false; } else if me.future1.as_mut().output_mut().unwrap().is_err() { return Poll::Ready(Err(me.future1.take_output().unwrap().err().unwrap())); } if me.future2.as_mut().poll(cx).is_pending() { all_done = false; } else if me.future2.as_mut().output_mut().unwrap().is_err() { return Poll::Ready(Err(me.future2.take_output().unwrap().err().unwrap())); } if me.future3.as_mut().poll(cx).is_pending() { all_done = false; } else if me.future3.as_mut().output_mut().unwrap().is_err() { return Poll::Ready(Err(me.future3.take_output().unwrap().err().unwrap())); } if all_done { Poll::Ready(Ok(( me.future1.take_output().unwrap().ok().unwrap(), me.future2.take_output().unwrap().ok().unwrap(), me.future3.take_output().unwrap().ok().unwrap(), ))) } else { Poll::Pending } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/future/trace.rs
tokio/src/future/trace.rs
use std::future::Future; pub(crate) trait InstrumentedFuture: Future { fn id(&self) -> Option<tracing::Id>; } impl<F: Future> InstrumentedFuture for tracing::instrument::Instrumented<F> { fn id(&self) -> Option<tracing::Id> { self.span().id() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/future/block_on.rs
tokio/src/future/block_on.rs
use std::future::Future; cfg_rt! { #[track_caller] pub(crate) fn block_on<F: Future>(f: F) -> F::Output { let mut e = crate::runtime::context::try_enter_blocking_region().expect( "Cannot block the current thread from within a runtime. This \ happens because a function attempted to block the current \ thread while the thread is being used to drive asynchronous \ tasks." ); e.block_on(f).unwrap() } } cfg_not_rt! { #[track_caller] pub(crate) fn block_on<F: Future>(f: F) -> F::Output { let mut park = crate::runtime::park::CachedParkThread::new(); park.block_on(f).unwrap() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/future/maybe_done.rs
tokio/src/future/maybe_done.rs
//! Definition of the [`MaybeDone`] combinator. use pin_project_lite::pin_project; use std::future::{Future, IntoFuture}; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// A future that may have completed. #[derive(Debug)] #[project = MaybeDoneProj] #[project_replace = MaybeDoneProjReplace] #[repr(C)] // https://github.com/rust-lang/miri/issues/3780 pub enum MaybeDone<Fut: Future> { /// A not-yet-completed future. Future { #[pin] future: Fut }, /// The output of the completed future. Done { output: Fut::Output }, /// The empty variant after the result of a [`MaybeDone`] has been /// taken using the [`take_output`](MaybeDone::take_output) method. Gone, } } /// Wraps a future into a `MaybeDone`. pub fn maybe_done<F: IntoFuture>(future: F) -> MaybeDone<F::IntoFuture> { MaybeDone::Future { future: future.into_future(), } } impl<Fut: Future> MaybeDone<Fut> { /// Returns an [`Option`] containing a mutable reference to the output of the future. /// The output of this method will be [`Some`] if and only if the inner /// future has been completed and [`take_output`](MaybeDone::take_output) /// has not yet been called. pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> { match self.project() { MaybeDoneProj::Done { output } => Some(output), _ => None, } } /// Attempts to take the output of a `MaybeDone` without driving it /// towards completion. #[inline] pub fn take_output(self: Pin<&mut Self>) -> Option<Fut::Output> { match *self { MaybeDone::Done { .. } => {} MaybeDone::Future { .. } | MaybeDone::Gone => return None, }; if let MaybeDoneProjReplace::Done { output } = self.project_replace(MaybeDone::Gone) { Some(output) } else { unreachable!() } } } impl<Fut: Future> Future for MaybeDone<Fut> { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let output = match self.as_mut().project() { MaybeDoneProj::Future { future } => ready!(future.poll(cx)), MaybeDoneProj::Done { .. } => return Poll::Ready(()), MaybeDoneProj::Gone => panic!("MaybeDone polled after value taken"), }; self.set(MaybeDone::Done { output }); Poll::Ready(()) } } // Test for https://github.com/tokio-rs/tokio/issues/6729 #[cfg(test)] mod miri_tests { use super::maybe_done; use std::{ future::Future, pin::Pin, sync::Arc, task::{Context, Poll, Wake}, }; struct ThingAdder<'a> { thing: &'a mut String, } impl Future for ThingAdder<'_> { type Output = (); fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> { unsafe { *self.get_unchecked_mut().thing += ", world"; } Poll::Pending } } #[test] fn maybe_done_miri() { let mut thing = "hello".to_owned(); // The async block is necessary to trigger the miri failure. #[allow(clippy::redundant_async_block)] let fut = async move { ThingAdder { thing: &mut thing }.await }; let mut fut = maybe_done(fut); let mut fut = unsafe { Pin::new_unchecked(&mut fut) }; let waker = Arc::new(DummyWaker).into(); let mut ctx = Context::from_waker(&waker); assert_eq!(fut.as_mut().poll(&mut ctx), Poll::Pending); assert_eq!(fut.as_mut().poll(&mut ctx), Poll::Pending); } struct DummyWaker; impl Wake for DummyWaker { fn wake(self: Arc<Self>) {} } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/future/mod.rs
tokio/src/future/mod.rs
#![cfg_attr(not(feature = "macros"), allow(unreachable_pub))] //! Asynchronous values. #[cfg(any(feature = "macros", feature = "process"))] pub(crate) mod maybe_done; cfg_process! { mod try_join; pub(crate) use try_join::try_join3; } cfg_sync! { mod block_on; pub(crate) use block_on::block_on; } cfg_trace! { mod trace; #[allow(unused_imports)] pub(crate) use trace::InstrumentedFuture as Future; } cfg_not_trace! { cfg_rt! { pub(crate) use std::future::Future; } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/mocked.rs
tokio/src/loom/mocked.rs
pub(crate) use loom::*; pub(crate) mod sync { pub(crate) use loom::sync::{MutexGuard, RwLockReadGuard, RwLockWriteGuard}; #[derive(Debug)] pub(crate) struct Mutex<T>(loom::sync::Mutex<T>); #[allow(dead_code)] impl<T> Mutex<T> { #[inline] pub(crate) fn new(t: T) -> Mutex<T> { Mutex(loom::sync::Mutex::new(t)) } #[inline] #[track_caller] pub(crate) fn lock(&self) -> MutexGuard<'_, T> { self.0.lock().unwrap() } #[inline] pub(crate) fn try_lock(&self) -> Option<MutexGuard<'_, T>> { self.0.try_lock().ok() } } #[derive(Debug)] pub(crate) struct RwLock<T>(loom::sync::RwLock<T>); #[allow(dead_code)] impl<T> RwLock<T> { #[inline] pub(crate) fn new(t: T) -> Self { Self(loom::sync::RwLock::new(t)) } #[inline] pub(crate) fn read(&self) -> RwLockReadGuard<'_, T> { self.0.read().unwrap() } #[inline] pub(crate) fn try_read(&self) -> Option<RwLockReadGuard<'_, T>> { self.0.try_read().ok() } #[inline] pub(crate) fn write(&self) -> RwLockWriteGuard<'_, T> { self.0.write().unwrap() } #[inline] pub(crate) fn try_write(&self) -> Option<RwLockWriteGuard<'_, T>> { self.0.try_write().ok() } } pub(crate) use loom::sync::*; pub(crate) mod atomic { pub(crate) use loom::sync::atomic::*; // TODO: implement a loom version pub(crate) type StaticAtomicU64 = std::sync::atomic::AtomicU64; } } pub(crate) mod rand { pub(crate) fn seed() -> u64 { 1 } } pub(crate) mod sys { pub(crate) fn num_cpus() -> usize { 2 } } pub(crate) mod thread { pub use loom::lazy_static::AccessError; pub use loom::thread::*; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/mod.rs
tokio/src/loom/mod.rs
//! This module abstracts over `loom` and `std::sync` depending on whether we //! are running tests or not. #![allow(unused)] #[cfg(not(all(test, loom)))] mod std; #[cfg(not(all(test, loom)))] pub(crate) use self::std::*; #[cfg(all(test, loom))] mod mocked; #[cfg(all(test, loom))] pub(crate) use self::mocked::*;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_u16.rs
tokio/src/loom/std/atomic_u16.rs
use std::cell::UnsafeCell; use std::fmt; use std::ops::Deref; use std::panic; /// `AtomicU16` providing an additional `unsync_load` function. pub(crate) struct AtomicU16 { inner: UnsafeCell<std::sync::atomic::AtomicU16>, } unsafe impl Send for AtomicU16 {} unsafe impl Sync for AtomicU16 {} impl panic::RefUnwindSafe for AtomicU16 {} impl panic::UnwindSafe for AtomicU16 {} impl AtomicU16 { pub(crate) const fn new(val: u16) -> AtomicU16 { let inner = UnsafeCell::new(std::sync::atomic::AtomicU16::new(val)); AtomicU16 { inner } } /// Performs an unsynchronized load. /// /// # Safety /// /// All mutations must have happened before the unsynchronized load. /// Additionally, there must be no concurrent mutations. pub(crate) unsafe fn unsync_load(&self) -> u16 { unsafe { core::ptr::read(self.inner.get() as *const u16) } } } impl Deref for AtomicU16 { type Target = std::sync::atomic::AtomicU16; fn deref(&self) -> &Self::Target { // safety: it is always safe to access `&self` fns on the inner value as // we never perform unsafe mutations. unsafe { &*self.inner.get() } } } impl fmt::Debug for AtomicU16 { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.deref().fmt(fmt) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/parking_lot.rs
tokio/src/loom/std/parking_lot.rs
//! A minimal adaption of the `parking_lot` synchronization primitives to the //! equivalent `std::sync` types. //! //! This can be extended to additional types/methods as required. use std::fmt; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use std::sync::{LockResult, TryLockError}; use std::time::Duration; // All types in this file are marked with PhantomData to ensure that // parking_lot's send_guard feature does not leak through and affect when Tokio // types are Send. // // See <https://github.com/tokio-rs/tokio/pull/4359> for more info. // Types that do not need wrapping pub(crate) use parking_lot::WaitTimeoutResult; #[derive(Debug)] pub(crate) struct Mutex<T: ?Sized>(PhantomData<std::sync::Mutex<T>>, parking_lot::Mutex<T>); #[derive(Debug)] pub(crate) struct RwLock<T>(PhantomData<std::sync::RwLock<T>>, parking_lot::RwLock<T>); #[derive(Debug)] pub(crate) struct Condvar(PhantomData<std::sync::Condvar>, parking_lot::Condvar); #[derive(Debug)] pub(crate) struct MutexGuard<'a, T: ?Sized>( PhantomData<std::sync::MutexGuard<'a, T>>, parking_lot::MutexGuard<'a, T>, ); #[derive(Debug)] pub(crate) struct RwLockReadGuard<'a, T: ?Sized>( PhantomData<std::sync::RwLockReadGuard<'a, T>>, parking_lot::RwLockReadGuard<'a, T>, ); #[derive(Debug)] pub(crate) struct RwLockWriteGuard<'a, T: ?Sized>( PhantomData<std::sync::RwLockWriteGuard<'a, T>>, parking_lot::RwLockWriteGuard<'a, T>, ); impl<T> Mutex<T> { #[inline] pub(crate) fn new(t: T) -> Mutex<T> { Mutex(PhantomData, parking_lot::Mutex::new(t)) } #[inline] #[cfg(not(all(loom, test)))] pub(crate) const fn const_new(t: T) -> Mutex<T> { Mutex(PhantomData, parking_lot::const_mutex(t)) } #[inline] pub(crate) fn lock(&self) -> MutexGuard<'_, T> { MutexGuard(PhantomData, self.1.lock()) } #[inline] pub(crate) fn try_lock(&self) -> Option<MutexGuard<'_, T>> { self.1 .try_lock() .map(|guard| MutexGuard(PhantomData, guard)) } #[inline] pub(crate) fn get_mut(&mut self) -> &mut T { self.1.get_mut() } // Note: Additional methods `is_poisoned` and `into_inner`, can be // provided here as needed. } impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { type Target = T; fn deref(&self) -> &T { self.1.deref() } } impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { fn deref_mut(&mut self) -> &mut T { self.1.deref_mut() } } impl<T> RwLock<T> { pub(crate) fn new(t: T) -> RwLock<T> { RwLock(PhantomData, parking_lot::RwLock::new(t)) } pub(crate) fn read(&self) -> RwLockReadGuard<'_, T> { RwLockReadGuard(PhantomData, self.1.read()) } pub(crate) fn try_read(&self) -> Option<RwLockReadGuard<'_, T>> { self.1 .try_read() .map(|guard| RwLockReadGuard(PhantomData, guard)) } pub(crate) fn write(&self) -> RwLockWriteGuard<'_, T> { RwLockWriteGuard(PhantomData, self.1.write()) } pub(crate) fn try_write(&self) -> Option<RwLockWriteGuard<'_, T>> { self.1 .try_write() .map(|guard| RwLockWriteGuard(PhantomData, guard)) } } impl<'a, T: ?Sized> Deref for RwLockReadGuard<'a, T> { type Target = T; fn deref(&self) -> &T { self.1.deref() } } impl<'a, T: ?Sized> Deref for RwLockWriteGuard<'a, T> { type Target = T; fn deref(&self) -> &T { self.1.deref() } } impl<'a, T: ?Sized> DerefMut for RwLockWriteGuard<'a, T> { fn deref_mut(&mut self) -> &mut T { self.1.deref_mut() } } impl Condvar { #[inline] pub(crate) fn new() -> Condvar { Condvar(PhantomData, parking_lot::Condvar::new()) } #[inline] pub(crate) fn notify_one(&self) { self.1.notify_one(); } #[inline] pub(crate) fn notify_all(&self) { self.1.notify_all(); } #[inline] pub(crate) fn wait<'a, T>( &self, mut guard: MutexGuard<'a, T>, ) -> LockResult<MutexGuard<'a, T>> { self.1.wait(&mut guard.1); Ok(guard) } #[inline] pub(crate) fn wait_timeout<'a, T>( &self, mut guard: MutexGuard<'a, T>, timeout: Duration, ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { let wtr = self.1.wait_for(&mut guard.1, timeout); Ok((guard, wtr)) } // Note: Additional methods `wait_timeout_ms`, `wait_timeout_until`, // `wait_until` can be provided here as needed. } impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.1, f) } } impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.1, f) } } impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.1, f) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/rwlock.rs
tokio/src/loom/std/rwlock.rs
use std::sync::{self, RwLockReadGuard, RwLockWriteGuard, TryLockError}; /// Adapter for `std::sync::RwLock` that removes the poisoning aspects /// from its api. #[derive(Debug)] pub(crate) struct RwLock<T: ?Sized>(sync::RwLock<T>); #[allow(dead_code)] impl<T> RwLock<T> { #[inline] pub(crate) fn new(t: T) -> Self { Self(sync::RwLock::new(t)) } #[inline] pub(crate) fn read(&self) -> RwLockReadGuard<'_, T> { match self.0.read() { Ok(guard) => guard, Err(p_err) => p_err.into_inner(), } } #[inline] pub(crate) fn try_read(&self) -> Option<RwLockReadGuard<'_, T>> { match self.0.try_read() { Ok(guard) => Some(guard), Err(TryLockError::Poisoned(p_err)) => Some(p_err.into_inner()), Err(TryLockError::WouldBlock) => None, } } #[inline] pub(crate) fn write(&self) -> RwLockWriteGuard<'_, T> { match self.0.write() { Ok(guard) => guard, Err(p_err) => p_err.into_inner(), } } #[inline] pub(crate) fn try_write(&self) -> Option<RwLockWriteGuard<'_, T>> { match self.0.try_write() { Ok(guard) => Some(guard), Err(TryLockError::Poisoned(p_err)) => Some(p_err.into_inner()), Err(TryLockError::WouldBlock) => None, } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/mutex.rs
tokio/src/loom/std/mutex.rs
use std::sync::{self, MutexGuard, TryLockError}; /// Adapter for `std::Mutex` that removes the poisoning aspects /// from its API. #[derive(Debug)] pub(crate) struct Mutex<T: ?Sized>(sync::Mutex<T>); #[allow(dead_code)] impl<T> Mutex<T> { #[inline] pub(crate) fn new(t: T) -> Mutex<T> { Mutex(sync::Mutex::new(t)) } #[inline] pub(crate) const fn const_new(t: T) -> Mutex<T> { Mutex(sync::Mutex::new(t)) } #[inline] pub(crate) fn lock(&self) -> MutexGuard<'_, T> { match self.0.lock() { Ok(guard) => guard, Err(p_err) => p_err.into_inner(), } } #[inline] pub(crate) fn try_lock(&self) -> Option<MutexGuard<'_, T>> { match self.0.try_lock() { Ok(guard) => Some(guard), Err(TryLockError::Poisoned(p_err)) => Some(p_err.into_inner()), Err(TryLockError::WouldBlock) => None, } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/unsafe_cell.rs
tokio/src/loom/std/unsafe_cell.rs
#[derive(Debug)] pub(crate) struct UnsafeCell<T>(std::cell::UnsafeCell<T>); impl<T> UnsafeCell<T> { pub(crate) const fn new(data: T) -> UnsafeCell<T> { UnsafeCell(std::cell::UnsafeCell::new(data)) } #[inline(always)] pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R { f(self.0.get()) } #[inline(always)] pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R { f(self.0.get()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_u64_static_const_new.rs
tokio/src/loom/std/atomic_u64_static_const_new.rs
use super::AtomicU64; use crate::loom::sync::Mutex; pub(crate) type StaticAtomicU64 = AtomicU64; impl AtomicU64 { pub(crate) const fn new(val: u64) -> Self { Self { inner: Mutex::const_new(val), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_u64_static_once_cell.rs
tokio/src/loom/std/atomic_u64_static_once_cell.rs
use super::AtomicU64; use crate::loom::sync::{atomic::Ordering, Mutex}; use std::sync::OnceLock; pub(crate) struct StaticAtomicU64 { init: u64, cell: OnceLock<Mutex<u64>>, } impl AtomicU64 { pub(crate) fn new(val: u64) -> Self { Self { inner: Mutex::new(val), } } } impl StaticAtomicU64 { pub(crate) const fn new(val: u64) -> StaticAtomicU64 { StaticAtomicU64 { init: val, cell: OnceLock::new(), } } pub(crate) fn load(&self, order: Ordering) -> u64 { *self.inner().lock() } pub(crate) fn fetch_add(&self, val: u64, order: Ordering) -> u64 { let mut lock = self.inner().lock(); let prev = *lock; *lock = prev + val; prev } pub(crate) fn compare_exchange_weak( &self, current: u64, new: u64, _success: Ordering, _failure: Ordering, ) -> Result<u64, u64> { let mut lock = self.inner().lock(); if *lock == current { *lock = new; Ok(current) } else { Err(*lock) } } fn inner(&self) -> &Mutex<u64> { self.cell.get_or_init(|| Mutex::new(self.init)) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/barrier.rs
tokio/src/loom/std/barrier.rs
//! A `Barrier` that provides `wait_timeout`. //! //! This implementation mirrors that of the Rust standard library. use crate::loom::sync::{Condvar, Mutex}; use std::fmt; use std::time::{Duration, Instant}; /// A barrier enables multiple threads to synchronize the beginning /// of some computation. /// /// # Examples /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use std::sync::{Arc, Barrier}; /// use std::thread; /// /// let mut handles = Vec::with_capacity(10); /// let barrier = Arc::new(Barrier::new(10)); /// for _ in 0..10 { /// let c = Arc::clone(&barrier); /// // The same messages will be printed together. /// // You will NOT see any interleaving. /// handles.push(thread::spawn(move|| { /// println!("before wait"); /// c.wait(); /// println!("after wait"); /// })); /// } /// // Wait for other threads to finish. /// for handle in handles { /// handle.join().unwrap(); /// } /// # } /// ``` pub(crate) struct Barrier { lock: Mutex<BarrierState>, cvar: Condvar, num_threads: usize, } // The inner state of a double barrier struct BarrierState { count: usize, generation_id: usize, } /// A `BarrierWaitResult` is returned by [`Barrier::wait()`] when all threads /// in the [`Barrier`] have rendezvoused. /// /// # Examples /// /// ``` /// use std::sync::Barrier; /// /// let barrier = Barrier::new(1); /// let barrier_wait_result = barrier.wait(); /// ``` pub(crate) struct BarrierWaitResult(bool); impl fmt::Debug for Barrier { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Barrier").finish_non_exhaustive() } } impl Barrier { /// Creates a new barrier that can block a given number of threads. /// /// A barrier will block `n`-1 threads which call [`wait()`] and then wake /// up all threads at once when the `n`th thread calls [`wait()`]. /// /// [`wait()`]: Barrier::wait /// /// # Examples /// /// ``` /// use std::sync::Barrier; /// /// let barrier = Barrier::new(10); /// ``` #[must_use] pub(crate) fn new(n: usize) -> Barrier { Barrier { lock: Mutex::new(BarrierState { count: 0, generation_id: 0, }), cvar: Condvar::new(), num_threads: n, } } /// Blocks the current thread until all threads have rendezvoused here. /// /// Barriers are re-usable after all threads have rendezvoused once, and can /// be used continuously. /// /// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that /// returns `true` from [`BarrierWaitResult::is_leader()`] when returning /// from this function, and all other threads will receive a result that /// will return `false` from [`BarrierWaitResult::is_leader()`]. /// /// # Examples /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use std::sync::{Arc, Barrier}; /// use std::thread; /// /// let mut handles = Vec::with_capacity(10); /// let barrier = Arc::new(Barrier::new(10)); /// for _ in 0..10 { /// let c = Arc::clone(&barrier); /// // The same messages will be printed together. /// // You will NOT see any interleaving. /// handles.push(thread::spawn(move|| { /// println!("before wait"); /// c.wait(); /// println!("after wait"); /// })); /// } /// // Wait for other threads to finish. /// for handle in handles { /// handle.join().unwrap(); /// } /// # } /// ``` pub(crate) fn wait(&self) -> BarrierWaitResult { let mut lock = self.lock.lock(); let local_gen = lock.generation_id; lock.count += 1; if lock.count < self.num_threads { // We need a while loop to guard against spurious wakeups. // https://en.wikipedia.org/wiki/Spurious_wakeup while local_gen == lock.generation_id { lock = self.cvar.wait(lock).unwrap(); } BarrierWaitResult(false) } else { lock.count = 0; lock.generation_id = lock.generation_id.wrapping_add(1); self.cvar.notify_all(); BarrierWaitResult(true) } } /// Blocks the current thread until all threads have rendezvoused here for /// at most `timeout` duration. pub(crate) fn wait_timeout(&self, timeout: Duration) -> Option<BarrierWaitResult> { // This implementation mirrors `wait`, but with each blocking operation // replaced by a timeout-amenable alternative. let deadline = Instant::now() + timeout; // Acquire `self.lock` with at most `timeout` duration. let mut lock = loop { if let Some(guard) = self.lock.try_lock() { break guard; } else if Instant::now() > deadline { return None; } else { std::thread::yield_now(); } }; // Shrink the `timeout` to account for the time taken to acquire `lock`. let timeout = deadline.saturating_duration_since(Instant::now()); let local_gen = lock.generation_id; lock.count += 1; if lock.count < self.num_threads { // We need a while loop to guard against spurious wakeups. // https://en.wikipedia.org/wiki/Spurious_wakeup while local_gen == lock.generation_id { let (guard, timeout_result) = self.cvar.wait_timeout(lock, timeout).unwrap(); lock = guard; if timeout_result.timed_out() { return None; } } Some(BarrierWaitResult(false)) } else { lock.count = 0; lock.generation_id = lock.generation_id.wrapping_add(1); self.cvar.notify_all(); Some(BarrierWaitResult(true)) } } } impl fmt::Debug for BarrierWaitResult { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BarrierWaitResult") .field("is_leader", &self.is_leader()) .finish() } } impl BarrierWaitResult { /// Returns `true` if this thread is the "leader thread" for the call to /// [`Barrier::wait()`]. /// /// Only one thread will have `true` returned from their result, all other /// threads will have `false` returned. /// /// # Examples /// /// ``` /// use std::sync::Barrier; /// /// let barrier = Barrier::new(1); /// let barrier_wait_result = barrier.wait(); /// println!("{:?}", barrier_wait_result.is_leader()); /// ``` #[must_use] pub(crate) fn is_leader(&self) -> bool { self.0 } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_u32.rs
tokio/src/loom/std/atomic_u32.rs
use std::cell::UnsafeCell; use std::fmt; use std::ops::Deref; use std::panic; /// `AtomicU32` providing an additional `unsync_load` function. pub(crate) struct AtomicU32 { inner: UnsafeCell<std::sync::atomic::AtomicU32>, } unsafe impl Send for AtomicU32 {} unsafe impl Sync for AtomicU32 {} impl panic::RefUnwindSafe for AtomicU32 {} impl panic::UnwindSafe for AtomicU32 {} impl AtomicU32 { pub(crate) const fn new(val: u32) -> AtomicU32 { let inner = UnsafeCell::new(std::sync::atomic::AtomicU32::new(val)); AtomicU32 { inner } } /// Performs an unsynchronized load. /// /// # Safety /// /// All mutations must have happened before the unsynchronized load. /// Additionally, there must be no concurrent mutations. pub(crate) unsafe fn unsync_load(&self) -> u32 { unsafe { core::ptr::read(self.inner.get() as *const u32) } } } impl Deref for AtomicU32 { type Target = std::sync::atomic::AtomicU32; fn deref(&self) -> &Self::Target { // safety: it is always safe to access `&self` fns on the inner value as // we never perform unsafe mutations. unsafe { &*self.inner.get() } } } impl fmt::Debug for AtomicU32 { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.deref().fmt(fmt) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/mod.rs
tokio/src/loom/std/mod.rs
#![cfg_attr(any(not(feature = "full"), loom), allow(unused_imports, dead_code))] mod atomic_u16; mod atomic_u32; mod atomic_u64; mod atomic_usize; mod barrier; mod mutex; #[cfg(all(feature = "parking_lot", not(miri)))] mod parking_lot; mod rwlock; mod unsafe_cell; pub(crate) mod cell { pub(crate) use super::unsafe_cell::UnsafeCell; } #[cfg(any( feature = "net", feature = "process", feature = "signal", feature = "sync", ))] pub(crate) mod future { pub(crate) use crate::sync::AtomicWaker; } pub(crate) mod hint { pub(crate) use std::hint::spin_loop; } pub(crate) mod rand { use std::collections::hash_map::RandomState; use std::hash::{BuildHasher, Hash, Hasher}; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering::Relaxed; static COUNTER: AtomicU32 = AtomicU32::new(1); pub(crate) fn seed() -> u64 { let rand_state = RandomState::new(); // Hash some unique-ish data to generate some new state rand_state.hash_one(COUNTER.fetch_add(1, Relaxed)) } } pub(crate) mod sync { pub(crate) use std::sync::{Arc, Weak}; // Below, make sure all the feature-influenced types are exported for // internal use. Note however that some are not _currently_ named by // consuming code. // Not using parking_lot in Miri due to <https://github.com/Amanieu/parking_lot/issues/477>. #[cfg(all(feature = "parking_lot", not(miri)))] #[allow(unused_imports)] pub(crate) use crate::loom::std::parking_lot::{ Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, WaitTimeoutResult, }; #[cfg(not(all(feature = "parking_lot", not(miri))))] #[allow(unused_imports)] pub(crate) use std::sync::{Condvar, MutexGuard, RwLockReadGuard, WaitTimeoutResult}; #[cfg(not(all(feature = "parking_lot", not(miri))))] pub(crate) use crate::loom::std::mutex::Mutex; #[cfg(not(all(feature = "parking_lot", not(miri))))] pub(crate) use crate::loom::std::rwlock::RwLock; pub(crate) mod atomic { pub(crate) use crate::loom::std::atomic_u16::AtomicU16; pub(crate) use crate::loom::std::atomic_u32::AtomicU32; pub(crate) use crate::loom::std::atomic_u64::{AtomicU64, StaticAtomicU64}; pub(crate) use crate::loom::std::atomic_usize::AtomicUsize; pub(crate) use std::sync::atomic::{fence, AtomicBool, AtomicPtr, AtomicU8, Ordering}; } pub(crate) use super::barrier::Barrier; } pub(crate) mod sys { #[cfg(feature = "rt-multi-thread")] pub(crate) fn num_cpus() -> usize { use std::num::NonZeroUsize; const ENV_WORKER_THREADS: &str = "TOKIO_WORKER_THREADS"; match std::env::var(ENV_WORKER_THREADS) { Ok(s) => { let n = s.parse().unwrap_or_else(|e| { panic!("\"{ENV_WORKER_THREADS}\" must be usize, error: {e}, value: {s}") }); assert!(n > 0, "\"{ENV_WORKER_THREADS}\" cannot be set to 0"); n } Err(std::env::VarError::NotPresent) => { std::thread::available_parallelism().map_or(1, NonZeroUsize::get) } Err(std::env::VarError::NotUnicode(e)) => { panic!("\"{ENV_WORKER_THREADS}\" must be valid unicode, error: {e:?}") } } } #[cfg(not(feature = "rt-multi-thread"))] pub(crate) fn num_cpus() -> usize { 1 } } pub(crate) mod thread { #[inline] pub(crate) fn yield_now() { std::hint::spin_loop(); } #[allow(unused_imports)] pub(crate) use std::thread::{ current, panicking, park, park_timeout, sleep, spawn, AccessError, Builder, JoinHandle, LocalKey, Result, Thread, ThreadId, }; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_usize.rs
tokio/src/loom/std/atomic_usize.rs
use std::cell::UnsafeCell; use std::fmt; use std::ops; use std::panic; /// `AtomicUsize` providing an additional `unsync_load` function. pub(crate) struct AtomicUsize { inner: UnsafeCell<std::sync::atomic::AtomicUsize>, } unsafe impl Send for AtomicUsize {} unsafe impl Sync for AtomicUsize {} impl panic::RefUnwindSafe for AtomicUsize {} impl panic::UnwindSafe for AtomicUsize {} impl AtomicUsize { pub(crate) const fn new(val: usize) -> AtomicUsize { let inner = UnsafeCell::new(std::sync::atomic::AtomicUsize::new(val)); AtomicUsize { inner } } /// Performs an unsynchronized load. /// /// # Safety /// /// All mutations must have happened before the unsynchronized load. /// Additionally, there must be no concurrent mutations. pub(crate) unsafe fn unsync_load(&self) -> usize { unsafe { core::ptr::read(self.inner.get() as *const usize) } } pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut usize) -> R) -> R { // safety: we have mutable access f(unsafe { (*self.inner.get()).get_mut() }) } } impl ops::Deref for AtomicUsize { type Target = std::sync::atomic::AtomicUsize; fn deref(&self) -> &Self::Target { // safety: it is always safe to access `&self` fns on the inner value as // we never perform unsafe mutations. unsafe { &*self.inner.get() } } } impl ops::DerefMut for AtomicUsize { fn deref_mut(&mut self) -> &mut Self::Target { // safety: we hold `&mut self` unsafe { &mut *self.inner.get() } } } impl fmt::Debug for AtomicUsize { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(fmt) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_u64_native.rs
tokio/src/loom/std/atomic_u64_native.rs
pub(crate) use std::sync::atomic::{AtomicU64, Ordering}; /// Alias `AtomicU64` to `StaticAtomicU64` pub(crate) type StaticAtomicU64 = AtomicU64;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_u64.rs
tokio/src/loom/std/atomic_u64.rs
//! Implementation of an atomic `u64` cell. On 64 bit platforms, this is a //! re-export of `AtomicU64`. On 32 bit platforms, this is implemented using a //! `Mutex`. // `AtomicU64` can only be used on targets with `target_has_atomic` is 64 or greater. // Once `cfg_target_has_atomic` feature is stable, we can replace it with // `#[cfg(target_has_atomic = "64")]`. // Refs: https://github.com/rust-lang/rust/tree/master/src/librustc_target cfg_has_atomic_u64! { #[path = "atomic_u64_native.rs"] mod imp; } cfg_not_has_atomic_u64! { #[path = "atomic_u64_as_mutex.rs"] mod imp; } pub(crate) use imp::{AtomicU64, StaticAtomicU64};
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/loom/std/atomic_u64_as_mutex.rs
tokio/src/loom/std/atomic_u64_as_mutex.rs
use crate::loom::sync::Mutex; use std::sync::atomic::Ordering; cfg_has_const_mutex_new! { #[path = "atomic_u64_static_const_new.rs"] mod static_macro; } cfg_not_has_const_mutex_new! { #[path = "atomic_u64_static_once_cell.rs"] mod static_macro; } pub(crate) use static_macro::StaticAtomicU64; #[derive(Debug)] pub(crate) struct AtomicU64 { inner: Mutex<u64>, } impl AtomicU64 { pub(crate) fn load(&self, _: Ordering) -> u64 { *self.inner.lock() } pub(crate) fn store(&self, val: u64, _: Ordering) { *self.inner.lock() = val; } pub(crate) fn fetch_add(&self, val: u64, _: Ordering) -> u64 { let mut lock = self.inner.lock(); let prev = *lock; *lock = prev + val; prev } pub(crate) fn fetch_or(&self, val: u64, _: Ordering) -> u64 { let mut lock = self.inner.lock(); let prev = *lock; *lock = prev | val; prev } pub(crate) fn compare_exchange( &self, current: u64, new: u64, _success: Ordering, _failure: Ordering, ) -> Result<u64, u64> { let mut lock = self.inner.lock(); if *lock == current { *lock = new; Ok(current) } else { Err(*lock) } } pub(crate) fn compare_exchange_weak( &self, current: u64, new: u64, success: Ordering, failure: Ordering, ) -> Result<u64, u64> { self.compare_exchange(current, new, success, failure) } } impl Default for AtomicU64 { fn default() -> AtomicU64 { AtomicU64::new(u64::default()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/canonicalize.rs
tokio/src/fs/canonicalize.rs
use crate::fs::asyncify; use std::io; use std::path::{Path, PathBuf}; /// Returns the canonical, absolute form of a path with all intermediate /// components normalized and symbolic links resolved. /// /// This is an async version of [`std::fs::canonicalize`]. /// /// # Platform-specific behavior /// /// This function currently corresponds to the `realpath` function on Unix /// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows. /// Note that, this [may change in the future][changes]. /// /// On Windows, this converts the path to use [extended length path][path] /// syntax, which allows your program to use longer path names, but means you /// can only join backslash-delimited paths to it, and it may be incompatible /// with other applications (if passed to the application on the command-line, /// or written to a file another application may read). /// /// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior /// [path]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath /// /// # Errors /// /// This function will return an error in the following situations, but is not /// limited to just these cases: /// /// * `path` does not exist. /// * A non-final component in path is not a directory. /// /// # Examples /// /// ```no_run /// use tokio::fs; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let path = fs::canonicalize("../a/../foo.txt").await?; /// Ok(()) /// } /// ``` pub async fn canonicalize(path: impl AsRef<Path>) -> io::Result<PathBuf> { let path = path.as_ref().to_owned(); asyncify(move || std::fs::canonicalize(path)).await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/create_dir_all.rs
tokio/src/fs/create_dir_all.rs
use crate::fs::asyncify; use std::io; use std::path::Path; /// Recursively creates a directory and all of its parent components if they /// are missing. /// /// This is an async version of [`std::fs::create_dir_all`]. /// /// # Platform-specific behavior /// /// This function currently corresponds to the `mkdir` function on Unix /// and the `CreateDirectory` function on Windows. /// Note that, this [may change in the future][changes]. /// /// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior /// /// # Errors /// /// This function will return an error in the following situations, but is not /// limited to just these cases: /// /// * If any directory in the path specified by `path` does not already exist /// and it could not be created otherwise. The specific error conditions for /// when a directory is being created (after it is determined to not exist) are /// outlined by [`fs::create_dir`]. /// /// Notable exception is made for situations where any of the directories /// specified in the `path` could not be created as it was being created concurrently. /// Such cases are considered to be successful. That is, calling `create_dir_all` /// concurrently from multiple threads or processes is guaranteed not to fail /// due to a race condition with itself. /// /// [`fs::create_dir`]: std::fs::create_dir /// /// # Examples /// /// ```no_run /// use tokio::fs; /// /// #[tokio::main] /// async fn main() -> std::io::Result<()> { /// fs::create_dir_all("/some/dir").await?; /// Ok(()) /// } /// ``` pub async fn create_dir_all(path: impl AsRef<Path>) -> io::Result<()> { let path = path.as_ref().to_owned(); asyncify(move || std::fs::create_dir_all(path)).await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/remove_dir.rs
tokio/src/fs/remove_dir.rs
use crate::fs::asyncify; use std::io; use std::path::Path; /// Removes an existing, empty directory. /// /// This is an async version of [`std::fs::remove_dir`]. pub async fn remove_dir(path: impl AsRef<Path>) -> io::Result<()> { let path = path.as_ref().to_owned(); asyncify(move || std::fs::remove_dir(path)).await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/write.rs
tokio/src/fs/write.rs
use crate::{fs::asyncify, util::as_ref::OwnedBuf}; use std::{io, path::Path}; /// Creates a future that will open a file for writing and write the entire /// contents of `contents` to it. /// /// This is the async equivalent of [`std::fs::write`][std]. /// /// This operation is implemented by running the equivalent blocking operation /// on a separate thread pool using [`spawn_blocking`]. /// /// [`spawn_blocking`]: crate::task::spawn_blocking /// [std]: fn@std::fs::write /// /// # Examples /// /// ```no_run /// use tokio::fs; /// /// # async fn dox() -> std::io::Result<()> { /// fs::write("foo.txt", b"Hello world!").await?; /// # Ok(()) /// # } /// ``` pub async fn write(path: impl AsRef<Path>, contents: impl AsRef<[u8]>) -> io::Result<()> { let path = path.as_ref(); let contents = crate::util::as_ref::upgrade(contents); #[cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux" ))] { let handle = crate::runtime::Handle::current(); let driver_handle = handle.inner.driver().io(); if driver_handle.check_and_init(io_uring::opcode::Write::CODE)? { return write_uring(path, contents).await; } } write_spawn_blocking(path, contents).await } #[cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux" ))] async fn write_uring(path: &Path, mut buf: OwnedBuf) -> io::Result<()> { use crate::{fs::OpenOptions, runtime::driver::op::Op}; use std::os::fd::OwnedFd; let file = OpenOptions::new() .write(true) .create(true) .truncate(true) .open(path) .await?; let mut fd: OwnedFd = file .try_into_std() .expect("unexpected in-flight operation detected") .into(); let total: usize = buf.as_ref().len(); let mut buf_offset: usize = 0; let mut file_offset: u64 = 0; while buf_offset < total { let (res, _buf, _fd) = Op::write_at(fd, buf, buf_offset, file_offset)?.await; let n = match res { Ok(0) => return Err(io::ErrorKind::WriteZero.into()), Ok(n) => n, Err(e) if e.kind() == io::ErrorKind::Interrupted => 0, Err(e) => return Err(e), }; buf = _buf; fd = _fd; buf_offset += n as usize; file_offset += n as u64; } Ok(()) } async fn write_spawn_blocking(path: &Path, contents: OwnedBuf) -> io::Result<()> { let path = path.to_owned(); asyncify(move || std::fs::write(path, contents)).await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/symlink.rs
tokio/src/fs/symlink.rs
use crate::fs::asyncify; use std::io; use std::path::Path; /// Creates a new symbolic link on the filesystem. /// /// The `link` path will be a symbolic link pointing to the `original` path. /// /// This is an async version of [`std::os::unix::fs::symlink`]. pub async fn symlink(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> { let original = original.as_ref().to_owned(); let link = link.as_ref().to_owned(); asyncify(move || std::os::unix::fs::symlink(original, link)).await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/read_link.rs
tokio/src/fs/read_link.rs
use crate::fs::asyncify; use std::io; use std::path::{Path, PathBuf}; /// Reads a symbolic link, returning the file that the link points to. /// /// This is an async version of [`std::fs::read_link`]. pub async fn read_link(path: impl AsRef<Path>) -> io::Result<PathBuf> { let path = path.as_ref().to_owned(); asyncify(move || std::fs::read_link(path)).await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/rename.rs
tokio/src/fs/rename.rs
use crate::fs::asyncify; use std::io; use std::path::Path; /// Renames a file or directory to a new name, replacing the original file if /// `to` already exists. /// /// This will not work if the new name is on a different mount point. /// /// This is an async version of [`std::fs::rename`]. pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> { let from = from.as_ref().to_owned(); let to = to.as_ref().to_owned(); asyncify(move || std::fs::rename(from, to)).await }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false