repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_read_exact.rs
tokio/tests/io_read_exact.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] use tokio::io::AsyncReadExt; use tokio_test::assert_ok; #[tokio::test] async fn read_exact() { let mut buf = Box::new([0; 8]); let mut rd: &[u8] = b"hello world"; let n = assert_ok!(rd.read_exact(&mut buf[..]).await); assert_eq!(n, 8); assert_eq!(buf[..], b"hello wo"[..]); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/macros_try_join.rs
tokio/tests/macros_try_join.rs
#![cfg(feature = "macros")] #![allow(clippy::disallowed_names)] use std::{convert::Infallible, sync::Arc}; use tokio::sync::{oneshot, Semaphore}; use tokio_test::{assert_pending, assert_ready, task}; #[cfg(all(target_family = "wasm", not(target_os = "wasi")))] use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test; #[cfg(not(all(target_family = "wasm", not(target_os = "wasi"))))] use tokio::test as maybe_tokio_test; #[maybe_tokio_test] async fn sync_one_lit_expr_comma() { let foo = tokio::try_join!(async { ok(1) },); assert_eq!(foo, Ok((1,))); let foo = tokio::try_join!(biased; async { ok(1) },); assert_eq!(foo, Ok((1,))); } #[maybe_tokio_test] async fn sync_one_lit_expr_no_comma() { let foo = tokio::try_join!(async { ok(1) }); assert_eq!(foo, Ok((1,))); let foo = tokio::try_join!(biased; async { ok(1) }); assert_eq!(foo, Ok((1,))); } #[maybe_tokio_test] async fn sync_two_lit_expr_comma() { let foo = tokio::try_join!(async { ok(1) }, async { ok(2) },); assert_eq!(foo, Ok((1, 2))); let foo = tokio::try_join!(biased;async { ok(1) }, async { ok(2) },); assert_eq!(foo, Ok((1, 2))); } #[maybe_tokio_test] async fn sync_two_lit_expr_no_comma() { let foo = tokio::try_join!(async { ok(1) }, async { ok(2) }); assert_eq!(foo, Ok((1, 2))); let foo = tokio::try_join!(biased; async { ok(1) }, async { ok(2) }); assert_eq!(foo, Ok((1, 2))); } #[maybe_tokio_test] async fn two_await() { let (tx1, rx1) = oneshot::channel::<&str>(); let (tx2, rx2) = oneshot::channel::<u32>(); let mut join = task::spawn(async { tokio::try_join!(rx1, rx2) }); assert_pending!(join.poll()); tx2.send(123).unwrap(); assert!(join.is_woken()); assert_pending!(join.poll()); tx1.send("hello").unwrap(); assert!(join.is_woken()); let res: Result<(&str, u32), _> = assert_ready!(join.poll()); assert_eq!(Ok(("hello", 123)), res); } #[maybe_tokio_test] async fn err_abort_early() { let (tx1, rx1) = oneshot::channel::<&str>(); let (tx2, rx2) = oneshot::channel::<u32>(); let (_tx3, rx3) = oneshot::channel::<u32>(); let mut join = task::spawn(async { tokio::try_join!(rx1, rx2, rx3) }); assert_pending!(join.poll()); tx2.send(123).unwrap(); assert!(join.is_woken()); assert_pending!(join.poll()); drop(tx1); assert!(join.is_woken()); let res = assert_ready!(join.poll()); assert!(res.is_err()); } #[test] #[cfg(target_pointer_width = "64")] fn try_join_size() { use futures::future; use std::mem; let fut = async { let ready = future::ready(ok(0i32)); tokio::try_join!(ready) }; assert_eq!(mem::size_of_val(&fut), 32); let fut = async { let ready1 = future::ready(ok(0i32)); let ready2 = future::ready(ok(0i32)); tokio::try_join!(ready1, ready2) }; assert_eq!(mem::size_of_val(&fut), 48); } fn ok<T>(val: T) -> Result<T, ()> { Ok(val) } async fn non_cooperative_task(permits: Arc<Semaphore>) -> Result<usize, String> { let mut exceeded_budget = 0; for _ in 0..5 { // Another task should run after this task uses its whole budget for _ in 0..128 { let _permit = permits.clone().acquire_owned().await.unwrap(); } exceeded_budget += 1; } Ok(exceeded_budget) } async fn poor_little_task(permits: Arc<Semaphore>) -> Result<usize, String> { let mut how_many_times_i_got_to_run = 0; for _ in 0..5 { let _permit = permits.clone().acquire_owned().await.unwrap(); how_many_times_i_got_to_run += 1; } Ok(how_many_times_i_got_to_run) } #[tokio::test] async fn try_join_does_not_allow_tasks_to_starve() { let permits = Arc::new(Semaphore::new(10)); // non_cooperative_task should yield after its budget is exceeded and then poor_little_task should run. let result = tokio::try_join!( non_cooperative_task(Arc::clone(&permits)), poor_little_task(permits) ); let (non_cooperative_result, little_task_result) = result.unwrap(); assert_eq!(5, non_cooperative_result); assert_eq!(5, little_task_result); } #[tokio::test] async fn a_different_future_is_polled_first_every_time_poll_fn_is_polled() { let poll_order = Arc::new(std::sync::Mutex::new(vec![])); let fut = |x, poll_order: Arc<std::sync::Mutex<Vec<i32>>>| async move { for _ in 0..4 { { let mut guard = poll_order.lock().unwrap(); guard.push(x); } tokio::task::yield_now().await; } Ok::<(), Infallible>(()) }; tokio::try_join!( fut(1, Arc::clone(&poll_order)), fut(2, Arc::clone(&poll_order)), fut(3, Arc::clone(&poll_order)), ) .unwrap(); // Each time the future created by join! is polled, it should start // by polling a different future first. assert_eq!( vec![1, 2, 3, 2, 3, 1, 3, 1, 2, 1, 2, 3], *poll_order.lock().unwrap() ); } #[tokio::test] async fn futures_are_polled_in_order_in_biased_mode() { let poll_order = Arc::new(std::sync::Mutex::new(vec![])); let fut = |x, poll_order: Arc<std::sync::Mutex<Vec<i32>>>| async move { for _ in 0..4 { { let mut guard = poll_order.lock().unwrap(); guard.push(x); } tokio::task::yield_now().await; } Ok::<(), Infallible>(()) }; tokio::try_join!( biased; fut(1, Arc::clone(&poll_order)), fut(2, Arc::clone(&poll_order)), fut(3, Arc::clone(&poll_order)), ) .unwrap(); // Each time the future created by join! is polled, it should start // by polling in the order as declared in the macro inputs. assert_eq!( vec![1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3], *poll_order.lock().unwrap() ); } #[test] #[cfg(target_pointer_width = "64")] fn try_join_size_biased() { use futures::future; use std::mem; let fut = async { let ready = future::ready(ok(0i32)); tokio::try_join!(biased; ready) }; assert_eq!(mem::size_of_val(&fut), 24); let fut = async { let ready1 = future::ready(ok(0i32)); let ready2 = future::ready(ok(0i32)); tokio::try_join!(biased; ready1, ready2) }; assert_eq!(mem::size_of_val(&fut), 40); } #[tokio::test] async fn empty_try_join() { assert_eq!(tokio::try_join!() as Result<_, ()>, Ok(())); assert_eq!(tokio::try_join!(biased;) as Result<_, ()>, Ok(())); } // Regression test for: https://github.com/tokio-rs/tokio/issues/7637 // We want to make sure that the `const COUNT: u32` declaration // inside the macro body doesn't leak to the caller to cause compiler failures // or variable shadowing. #[tokio::test] async fn caller_names_const_count() { let (tx, rx) = oneshot::channel::<u32>(); const COUNT: u32 = 2; let mut try_join = task::spawn(async { tokio::try_join!(async { tx.send(COUNT) }) }); assert_ready!(try_join.poll()).unwrap(); let res = rx.await.unwrap(); // This passing demonstrates that the const in the macro is // not shadowing the caller-specified COUNT value assert_eq!(2, res); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/fs_uring_read.rs
tokio/tests/fs_uring_read.rs
//! Uring file operations tests. #![cfg(all( tokio_unstable, feature = "io-uring", feature = "rt", feature = "fs", target_os = "linux" ))] use futures::future::Future; use std::future::poll_fn; use std::io::Write; use std::path::PathBuf; use std::sync::mpsc; use std::task::{Context, Poll, Waker}; use std::time::Duration; use tempfile::NamedTempFile; use tokio::fs::read; use tokio::runtime::{Builder, Runtime}; use tokio_test::assert_pending; use tokio_util::task::TaskTracker; fn multi_rt(n: usize) -> Box<dyn Fn() -> Runtime> { Box::new(move || { Builder::new_multi_thread() .worker_threads(n) .enable_all() .build() .unwrap() }) } fn current_rt() -> Box<dyn Fn() -> Runtime> { Box::new(|| Builder::new_current_thread().enable_all().build().unwrap()) } fn rt_combinations() -> Vec<Box<dyn Fn() -> Runtime>> { vec![ current_rt(), multi_rt(1), multi_rt(2), multi_rt(8), multi_rt(64), multi_rt(256), ] } #[test] fn shutdown_runtime_while_performing_io_uring_ops() { fn run(rt: Runtime) { let (done_tx, done_rx) = mpsc::channel(); let (_tmp, path) = create_tmp_files(1); // keep 100 permits const N: i32 = 100; rt.spawn(async move { let path = path[0].clone(); // spawning a bunch of uring operations. let mut futs = vec![]; // spawning a bunch of uring operations. for _ in 0..N { let path = path.clone(); let mut fut = Box::pin(read(path)); poll_fn(|cx| { assert_pending!(fut.as_mut().poll(cx)); Poll::<()>::Pending }) .await; futs.push(fut); } tokio::task::yield_now().await; }); std::thread::spawn(move || { rt.shutdown_timeout(Duration::from_millis(300)); done_tx.send(()).unwrap(); }); done_rx.recv().unwrap(); } for rt in rt_combinations() { run(rt()); } } #[test] fn read_many_files() { fn run(rt: Runtime) { const NUM_FILES: usize = 512; let (_tmp_files, paths): (Vec<NamedTempFile>, Vec<PathBuf>) = create_tmp_files(NUM_FILES); rt.block_on(async move { let tracker = TaskTracker::new(); for i in 0..10_000 { let path = paths.get(i % NUM_FILES).unwrap().clone(); tracker.spawn(async move { let bytes = read(path).await.unwrap(); assert_eq!(bytes, vec![20; 1023]); }); } tracker.close(); tracker.wait().await; }); } for rt in rt_combinations() { run(rt()); } } #[tokio::test] async fn read_small_large_files() { let (_tmp, path) = create_large_temp_file(); let bytes = read(path).await.unwrap(); assert_eq!(bytes, create_buf(5000)); let (_tmp, path) = create_small_temp_file(); let bytes = read(path).await.unwrap(); assert_eq!(bytes, create_buf(20)); } #[tokio::test] async fn cancel_op_future() { let (_tmp_file, path): (Vec<NamedTempFile>, Vec<PathBuf>) = create_tmp_files(1); let path = path[0].clone(); let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel(); let handle = tokio::spawn(async move { let fut = read(path.clone()); tokio::pin!(fut); poll_fn(move |_| { // If io_uring is enabled (and not falling back to the thread pool), // the first poll should return Pending. assert_pending!(fut.as_mut().poll(&mut Context::from_waker(Waker::noop()))); tx.send(true).unwrap(); Poll::<()>::Pending }) .await; }); // Wait for the first poll let val = rx.recv().await; assert!(val.unwrap()); handle.abort(); let res = handle.await.unwrap_err(); assert!(res.is_cancelled()); } fn create_tmp_files(num_files: usize) -> (Vec<NamedTempFile>, Vec<PathBuf>) { let mut files = Vec::with_capacity(num_files); for _ in 0..num_files { let mut tmp = NamedTempFile::new().unwrap(); let buf = vec![20; 1023]; tmp.write_all(&buf).unwrap(); let path = tmp.path().to_path_buf(); files.push((tmp, path)); } files.into_iter().unzip() } fn create_large_temp_file() -> (NamedTempFile, PathBuf) { let mut tmp = NamedTempFile::new().unwrap(); let buf = create_buf(5000); tmp.write_all(&buf).unwrap(); let path = tmp.path().to_path_buf(); (tmp, path) } fn create_small_temp_file() -> (NamedTempFile, PathBuf) { let mut tmp = NamedTempFile::new().unwrap(); let buf = create_buf(20); tmp.write_all(&buf).unwrap(); let path = tmp.path().to_path_buf(); (tmp, path) } fn create_buf(length: usize) -> Vec<u8> { (0..length).map(|i| i as u8).collect() }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/task_abort.rs
tokio/tests/task_abort.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support panic recovery use std::sync::Arc; use std::thread::sleep; use tokio::time::Duration; use tokio::runtime::Builder; #[cfg(panic = "unwind")] struct PanicOnDrop; #[cfg(panic = "unwind")] impl Drop for PanicOnDrop { fn drop(&mut self) { panic!("Well what did you expect would happen..."); } } /// Checks that a suspended task can be aborted without panicking as reported in /// issue #3157: <https://github.com/tokio-rs/tokio/issues/3157>. #[test] fn test_abort_without_panic_3157() { let rt = Builder::new_multi_thread() .enable_time() .worker_threads(1) .build() .unwrap(); rt.block_on(async move { let handle = tokio::spawn(async move { tokio::time::sleep(Duration::new(100, 0)).await }); // wait for task to sleep. tokio::time::sleep(Duration::from_millis(10)).await; handle.abort(); let _ = handle.await; }); } /// Checks that a suspended task can be aborted inside of a current_thread /// executor without panicking as reported in issue #3662: /// <https://github.com/tokio-rs/tokio/issues/3662>. #[test] fn test_abort_without_panic_3662() { use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; struct DropCheck(Arc<AtomicBool>); impl Drop for DropCheck { fn drop(&mut self) { self.0.store(true, Ordering::SeqCst); } } let rt = Builder::new_current_thread().build().unwrap(); rt.block_on(async move { let drop_flag = Arc::new(AtomicBool::new(false)); let drop_check = DropCheck(drop_flag.clone()); let j = tokio::spawn(async move { // NB: just grab the drop check here so that it becomes part of the // task. let _drop_check = drop_check; futures::future::pending::<()>().await; }); let drop_flag2 = drop_flag.clone(); let task = std::thread::spawn(move || { // This runs in a separate thread so it doesn't have immediate // thread-local access to the executor. It does however transition // the underlying task to be completed, which will cause it to be // dropped (but not in this thread). assert!(!drop_flag2.load(Ordering::SeqCst)); j.abort(); j }) .join() .unwrap(); let result = task.await; assert!(drop_flag.load(Ordering::SeqCst)); assert!(result.unwrap_err().is_cancelled()); // Note: We do the following to trigger a deferred task cleanup. // // The relevant piece of code you want to look at is in: // `Inner::block_on` of `scheduler/current_thread.rs`. // // We cause the cleanup to happen by having a poll return Pending once // so that the scheduler can go into the "auxiliary tasks" mode, at // which point the task is removed from the scheduler. let i = tokio::spawn(async move { tokio::task::yield_now().await; }); i.await.unwrap(); }); } /// Checks that a suspended LocalSet task can be aborted from a remote thread /// without panicking and without running the tasks destructor on the wrong thread. /// <https://github.com/tokio-rs/tokio/issues/3929> #[test] fn remote_abort_local_set_3929() { struct DropCheck { created_on: std::thread::ThreadId, not_send: std::marker::PhantomData<*const ()>, } impl DropCheck { fn new() -> Self { Self { created_on: std::thread::current().id(), not_send: std::marker::PhantomData, } } } impl Drop for DropCheck { fn drop(&mut self) { if std::thread::current().id() != self.created_on { panic!("non-Send value dropped in another thread!"); } } } let rt = Builder::new_current_thread().build().unwrap(); let local = tokio::task::LocalSet::new(); let check = DropCheck::new(); let jh = local.spawn_local(async move { futures::future::pending::<()>().await; drop(check); }); let jh2 = std::thread::spawn(move || { sleep(Duration::from_millis(10)); jh.abort(); }); rt.block_on(local); jh2.join().unwrap(); } /// Checks that a suspended task can be aborted even if the `JoinHandle` is immediately dropped. /// issue #3964: <https://github.com/tokio-rs/tokio/issues/3964>. #[test] fn test_abort_wakes_task_3964() { let rt = Builder::new_current_thread().enable_time().build().unwrap(); rt.block_on(async move { let notify_dropped = Arc::new(()); let weak_notify_dropped = Arc::downgrade(&notify_dropped); let handle = tokio::spawn(async move { // Make sure the Arc is moved into the task let _notify_dropped = notify_dropped; tokio::time::sleep(Duration::new(100, 0)).await }); // wait for task to sleep. tokio::time::sleep(Duration::from_millis(10)).await; handle.abort(); drop(handle); // wait for task to abort. tokio::time::sleep(Duration::from_millis(10)).await; // Check that the Arc has been dropped. assert!(weak_notify_dropped.upgrade().is_none()); }); } /// Checks that aborting a task whose destructor panics does not allow the /// panic to escape the task. #[test] #[cfg(panic = "unwind")] fn test_abort_task_that_panics_on_drop_contained() { let rt = Builder::new_current_thread().enable_time().build().unwrap(); rt.block_on(async move { let handle = tokio::spawn(async move { // Make sure the Arc is moved into the task let _panic_dropped = PanicOnDrop; tokio::time::sleep(Duration::new(100, 0)).await }); // wait for task to sleep. tokio::time::sleep(Duration::from_millis(10)).await; handle.abort(); drop(handle); // wait for task to abort. tokio::time::sleep(Duration::from_millis(10)).await; }); } /// Checks that aborting a task whose destructor panics has the expected result. #[test] #[cfg(panic = "unwind")] fn test_abort_task_that_panics_on_drop_returned() { let rt = Builder::new_current_thread().enable_time().build().unwrap(); rt.block_on(async move { let handle = tokio::spawn(async move { // Make sure the Arc is moved into the task let _panic_dropped = PanicOnDrop; tokio::time::sleep(Duration::new(100, 0)).await }); // wait for task to sleep. tokio::time::sleep(Duration::from_millis(10)).await; handle.abort(); assert!(handle.await.unwrap_err().is_panic()); }); } // It's not clear where these tests belong. This was the place suggested by @Darksonn: // https://github.com/tokio-rs/tokio/pull/6753#issuecomment-2271434176 /// Checks that a `JoinError` with a panic payload prints the expected text. #[test] #[cfg(panic = "unwind")] fn test_join_error_display() { let rt = Builder::new_current_thread().build().unwrap(); rt.block_on(async move { // `String` payload let join_err = tokio::spawn(async move { let value = 1234; panic!("Format-args payload: {value}") }) .await .unwrap_err(); // We can't assert the full output because the task ID can change. let join_err_str = join_err.to_string(); assert!( join_err_str.starts_with("task ") && join_err_str.ends_with(" panicked with message \"Format-args payload: 1234\""), "Unexpected join_err_str {join_err_str:?}" ); // `&'static str` payload let join_err = tokio::spawn(async move { panic!("Const payload") }) .await .unwrap_err(); let join_err_str = join_err.to_string(); assert!( join_err_str.starts_with("task ") && join_err_str.ends_with(" panicked with message \"Const payload\""), "Unexpected join_err_str {join_err_str:?}" ); // Non-string payload let join_err = tokio::spawn(async move { std::panic::panic_any(1234i32) }) .await .unwrap_err(); let join_err_str = join_err.to_string(); assert!( join_err_str.starts_with("task ") && join_err_str.ends_with(" panicked"), "Unexpected join_err_str {join_err_str:?}" ); }); } /// Checks that a `JoinError` with a panic payload prints the expected text from `Debug`. #[test] #[cfg(panic = "unwind")] fn test_join_error_debug() { let rt = Builder::new_current_thread().build().unwrap(); rt.block_on(async move { // `String` payload let join_err = tokio::spawn(async move { let value = 1234; panic!("Format-args payload: {value}") }) .await .unwrap_err(); // We can't assert the full output because the task ID can change. let join_err_str = format!("{join_err:?}"); assert!( join_err_str.starts_with("JoinError::Panic(Id(") && join_err_str.ends_with("), \"Format-args payload: 1234\", ...)"), "Unexpected join_err_str {join_err_str:?}" ); // `&'static str` payload let join_err = tokio::spawn(async move { panic!("Const payload") }) .await .unwrap_err(); let join_err_str = format!("{join_err:?}"); assert!( join_err_str.starts_with("JoinError::Panic(Id(") && join_err_str.ends_with("), \"Const payload\", ...)"), "Unexpected join_err_str {join_err_str:?}" ); // Non-string payload let join_err = tokio::spawn(async move { std::panic::panic_any(1234i32) }) .await .unwrap_err(); let join_err_str = format!("{join_err:?}"); assert!( join_err_str.starts_with("JoinError::Panic(Id(") && join_err_str.ends_with("), ...)"), "Unexpected join_err_str {join_err_str:?}" ); }); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/rt_basic.rs
tokio/tests/rt_basic.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] use tokio::runtime::Runtime; use tokio::sync::oneshot; use tokio::time::{timeout, Duration}; use tokio_test::{assert_err, assert_ok}; use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicBool, Ordering}; use std::task::{Context, Poll}; use std::thread; mod support { pub(crate) mod mpsc_stream; } macro_rules! cfg_metrics { ($($t:tt)*) => { #[cfg(all(tokio_unstable, target_has_atomic = "64"))] { $( $t )* } } } #[test] fn spawned_task_does_not_progress_without_block_on() { let (tx, mut rx) = oneshot::channel(); let rt = rt(); rt.spawn(async move { assert_ok!(tx.send("hello")); }); thread::sleep(Duration::from_millis(50)); assert_err!(rx.try_recv()); let out = rt.block_on(async { assert_ok!(rx.await) }); assert_eq!(out, "hello"); } #[test] fn no_extra_poll() { use pin_project_lite::pin_project; use std::pin::Pin; use std::sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, Arc, }; use std::task::{Context, Poll}; use tokio_stream::{Stream, StreamExt}; pin_project! { struct TrackPolls<S> { npolls: Arc<AtomicUsize>, #[pin] s: S, } } impl<S> Stream for TrackPolls<S> where S: Stream, { type Item = S::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let this = self.project(); this.npolls.fetch_add(1, SeqCst); this.s.poll_next(cx) } } let (tx, rx) = support::mpsc_stream::unbounded_channel_stream::<()>(); let rx = TrackPolls { npolls: Arc::new(AtomicUsize::new(0)), s: rx, }; let npolls = Arc::clone(&rx.npolls); let rt = rt(); // TODO: could probably avoid this, but why not. let mut rx = Box::pin(rx); rt.spawn(async move { while rx.next().await.is_some() {} }); rt.block_on(async { tokio::task::yield_now().await; }); // should have been polled exactly once: the initial poll assert_eq!(npolls.load(SeqCst), 1); tx.send(()).unwrap(); rt.block_on(async { tokio::task::yield_now().await; }); // should have been polled twice more: once to yield Some(), then once to yield Pending assert_eq!(npolls.load(SeqCst), 1 + 2); drop(tx); rt.block_on(async { tokio::task::yield_now().await; }); // should have been polled once more: to yield None assert_eq!(npolls.load(SeqCst), 1 + 2 + 1); } #[test] fn acquire_mutex_in_drop() { use futures::future::pending; use tokio::task; let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); let rt = rt(); rt.spawn(async move { let _ = rx2.await; unreachable!(); }); rt.spawn(async move { let _ = rx1.await; tx2.send(()).unwrap(); unreachable!(); }); // Spawn a task that will never notify rt.spawn(async move { pending::<()>().await; tx1.send(()).unwrap(); }); // Tick the loop rt.block_on(async { task::yield_now().await; }); // Drop the rt drop(rt); } #[test] fn drop_tasks_in_context() { static SUCCESS: AtomicBool = AtomicBool::new(false); struct ContextOnDrop; impl Future for ContextOnDrop { type Output = (); fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { Poll::Pending } } impl Drop for ContextOnDrop { fn drop(&mut self) { if tokio::runtime::Handle::try_current().is_ok() { SUCCESS.store(true, Ordering::SeqCst); } } } let rt = rt(); rt.spawn(ContextOnDrop); drop(rt); assert!(SUCCESS.load(Ordering::SeqCst)); } #[test] #[cfg_attr(target_os = "wasi", ignore = "Wasi does not support panic recovery")] #[should_panic(expected = "boom")] fn wake_in_drop_after_panic() { struct WakeOnDrop(Option<oneshot::Sender<()>>); impl Drop for WakeOnDrop { fn drop(&mut self) { let _ = self.0.take().unwrap().send(()); } } let rt = rt(); let (tx1, rx1) = oneshot::channel::<()>(); let (tx2, rx2) = oneshot::channel::<()>(); // Spawn two tasks. We don't know the order in which they are dropped, so we // make both tasks identical. When the first task is dropped, we wake up the // second task. This ensures that we trigger a wakeup on a live task while // handling the "boom" panic, no matter the order in which the tasks are // dropped. rt.spawn(async move { let _wake_on_drop = WakeOnDrop(Some(tx2)); let _ = rx1.await; unreachable!() }); rt.spawn(async move { let _wake_on_drop = WakeOnDrop(Some(tx1)); let _ = rx2.await; unreachable!() }); rt.block_on(async { tokio::task::yield_now().await; panic!("boom"); }); } #[test] fn spawn_two() { let rt = rt(); let out = rt.block_on(async { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { tokio::spawn(async move { tx.send("ZOMG").unwrap(); }); }); assert_ok!(rx.await) }); assert_eq!(out, "ZOMG"); cfg_metrics! { let metrics = rt.metrics(); drop(rt); assert_eq!(0, metrics.remote_schedule_count()); let mut local = 0; for i in 0..metrics.num_workers() { local += metrics.worker_local_schedule_count(i); } assert_eq!(2, local); } } #[cfg_attr(target_os = "wasi", ignore = "WASI: std::thread::spawn not supported")] #[test] fn spawn_remote() { let rt = rt(); let out = rt.block_on(async { let (tx, rx) = oneshot::channel(); let handle = tokio::spawn(async move { std::thread::spawn(move || { std::thread::sleep(Duration::from_millis(10)); tx.send("ZOMG").unwrap(); }); rx.await.unwrap() }); handle.await.unwrap() }); assert_eq!(out, "ZOMG"); cfg_metrics! { let metrics = rt.metrics(); drop(rt); assert_eq!(1, metrics.remote_schedule_count()); let mut local = 0; for i in 0..metrics.num_workers() { local += metrics.worker_local_schedule_count(i); } assert_eq!(1, local); } } #[test] #[cfg_attr(target_os = "wasi", ignore = "Wasi does not support panic recovery")] #[should_panic( expected = "A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers." )] fn timeout_panics_when_no_time_handle() { let rt = tokio::runtime::Builder::new_current_thread() .build() .unwrap(); rt.block_on(async { let (_tx, rx) = oneshot::channel::<()>(); let dur = Duration::from_millis(20); let _ = timeout(dur, rx).await; }); } #[cfg(tokio_unstable)] mod unstable { use tokio::runtime::{Builder, RngSeed, UnhandledPanic}; #[test] #[should_panic( expected = "a spawned task panicked and the runtime is configured to shut down on unhandled panic" )] fn shutdown_on_panic() { let rt = Builder::new_current_thread() .unhandled_panic(UnhandledPanic::ShutdownRuntime) .build() .unwrap(); rt.block_on(async { tokio::spawn(async { panic!("boom"); }); futures::future::pending::<()>().await; }) } #[test] #[cfg_attr(target_os = "wasi", ignore = "Wasi does not support panic recovery")] fn spawns_do_nothing() { use std::sync::Arc; let rt = Builder::new_current_thread() .unhandled_panic(UnhandledPanic::ShutdownRuntime) .build() .unwrap(); let rt1 = Arc::new(rt); let rt2 = rt1.clone(); let _ = std::thread::spawn(move || { rt2.block_on(async { tokio::spawn(async { panic!("boom"); }); futures::future::pending::<()>().await; }) }) .join(); let task = rt1.spawn(async {}); let res = futures::executor::block_on(task); assert!(res.is_err()); } #[test] #[cfg_attr(target_os = "wasi", ignore = "Wasi does not support panic recovery")] fn shutdown_all_concurrent_block_on() { const N: usize = 2; use std::sync::{mpsc, Arc}; let rt = Builder::new_current_thread() .unhandled_panic(UnhandledPanic::ShutdownRuntime) .build() .unwrap(); let rt = Arc::new(rt); let mut threads = vec![]; let (tx, rx) = mpsc::channel(); for _ in 0..N { let rt = rt.clone(); let tx = tx.clone(); threads.push(std::thread::spawn(move || { rt.block_on(async { tx.send(()).unwrap(); futures::future::pending::<()>().await; }); })); } for _ in 0..N { rx.recv().unwrap(); } rt.spawn(async { panic!("boom"); }); for thread in threads { assert!(thread.join().is_err()); } } #[test] fn rng_seed() { let seed = b"bytes used to generate seed"; let rt1 = tokio::runtime::Builder::new_current_thread() .rng_seed(RngSeed::from_bytes(seed)) .build() .unwrap(); let rt1_values = rt1.block_on(async { let rand_1 = tokio::macros::support::thread_rng_n(100); let rand_2 = tokio::macros::support::thread_rng_n(100); (rand_1, rand_2) }); let rt2 = tokio::runtime::Builder::new_current_thread() .rng_seed(RngSeed::from_bytes(seed)) .build() .unwrap(); let rt2_values = rt2.block_on(async { let rand_1 = tokio::macros::support::thread_rng_n(100); let rand_2 = tokio::macros::support::thread_rng_n(100); (rand_1, rand_2) }); assert_eq!(rt1_values, rt2_values); } #[test] fn rng_seed_multi_enter() { let seed = b"bytes used to generate seed"; fn two_rand_values() -> (u32, u32) { let rand_1 = tokio::macros::support::thread_rng_n(100); let rand_2 = tokio::macros::support::thread_rng_n(100); (rand_1, rand_2) } let rt1 = tokio::runtime::Builder::new_current_thread() .rng_seed(RngSeed::from_bytes(seed)) .build() .unwrap(); let rt1_values_1 = rt1.block_on(async { two_rand_values() }); let rt1_values_2 = rt1.block_on(async { two_rand_values() }); let rt2 = tokio::runtime::Builder::new_current_thread() .rng_seed(RngSeed::from_bytes(seed)) .build() .unwrap(); let rt2_values_1 = rt2.block_on(async { two_rand_values() }); let rt2_values_2 = rt2.block_on(async { two_rand_values() }); assert_eq!(rt1_values_1, rt2_values_1); assert_eq!(rt1_values_2, rt2_values_2); } } fn rt() -> Runtime { tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap() }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/process_kill_after_wait.rs
tokio/tests/process_kill_after_wait.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi"), not(miri)))] // Wasi/Miri cannot run system commands use tokio::process::Command; #[tokio::test] async fn kill_after_wait() { let mut cmd; if cfg!(windows) { cmd = Command::new("cmd"); cmd.arg("/c"); } else { cmd = Command::new("sh"); cmd.arg("-c"); } let mut child = cmd.arg("exit 2").spawn().unwrap(); child.start_kill().unwrap(); child.wait().await.unwrap(); // Kill after `wait` is fine. child.start_kill().unwrap(); child.kill().await.unwrap(); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_copy.rs
tokio/tests/io_copy.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] use bytes::BytesMut; use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf}; use tokio_test::assert_ok; use std::pin::Pin; use std::task::{ready, Context, Poll}; #[tokio::test] async fn copy() { struct Rd(bool); impl AsyncRead for Rd { fn poll_read( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { if self.0 { buf.put_slice(b"hello world"); self.0 = false; Poll::Ready(Ok(())) } else { Poll::Ready(Ok(())) } } } let mut rd = Rd(true); let mut wr = Vec::new(); let n = assert_ok!(io::copy(&mut rd, &mut wr).await); assert_eq!(n, 11); assert_eq!(wr, b"hello world"); } #[tokio::test] async fn proxy() { struct BufferedWd { buf: BytesMut, writer: io::DuplexStream, } impl AsyncWrite for BufferedWd { fn poll_write( self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.get_mut().buf.extend_from_slice(buf); Poll::Ready(Ok(buf.len())) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { let this = self.get_mut(); while !this.buf.is_empty() { let n = ready!(Pin::new(&mut this.writer).poll_write(cx, &this.buf))?; let _ = this.buf.split_to(n); } Pin::new(&mut this.writer).poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { Pin::new(&mut self.writer).poll_shutdown(cx) } } let (rd, wd) = io::duplex(1024); let mut rd = rd.take(1024); let mut wd = BufferedWd { buf: BytesMut::new(), writer: wd, }; // write start bytes assert_ok!(wd.write_all(&[0x42; 512]).await); assert_ok!(wd.flush().await); let n = assert_ok!(io::copy(&mut rd, &mut wd).await); assert_eq!(n, 1024); } #[tokio::test] async fn copy_is_cooperative() { tokio::select! { biased; _ = async { loop { let mut reader: &[u8] = b"hello"; let mut writer: Vec<u8> = vec![]; let _ = io::copy(&mut reader, &mut writer).await; } } => {}, _ = tokio::task::yield_now() => {} } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/signal_no_rt.rs
tokio/tests/signal_no_rt.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] #![cfg(unix)] #![cfg(not(miri))] // No `sigaction` on Miri. use tokio::signal::unix::{signal, SignalKind}; #[cfg_attr(target_os = "wasi", ignore = "Wasi does not support panic recovery")] #[test] #[should_panic] fn no_runtime_panics_creating_signals() { let _ = signal(SignalKind::hangup()); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/process_kill_on_drop.rs
tokio/tests/process_kill_on_drop.rs
#![cfg(all(unix, feature = "process", not(miri)))] #![warn(rust_2018_idioms)] use std::io::ErrorKind; use std::process::Stdio; use std::time::Duration; use tokio::io::AsyncReadExt; use tokio::process::Command; use tokio::time::sleep; use tokio_test::assert_ok; #[tokio::test] async fn kill_on_drop() { let mut cmd = Command::new("bash"); cmd.args([ "-c", " # Fork another child that won't get killed sh -c 'sleep 1; echo child ran' & disown -a # Await our death sleep 5 echo hello from beyond the grave ", ]); let e = cmd.kill_on_drop(true).stdout(Stdio::piped()).spawn(); if e.is_err() && e.as_ref().unwrap_err().kind() == ErrorKind::NotFound { println!("bash not available; skipping test"); return; } let mut child = e.unwrap(); sleep(Duration::from_secs(2)).await; let mut out = child.stdout.take().unwrap(); drop(child); let mut msg = String::new(); assert_ok!(out.read_to_string(&mut msg).await); assert_eq!("child ran\n", msg); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/tcp_connect.rs
tokio/tests/tcp_connect.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi"), not(miri)))] // Wasi doesn't support bind // No `socket` on miri. use tokio::net::{TcpListener, TcpStream}; use tokio::sync::oneshot; use tokio_test::assert_ok; use futures::join; #[tokio::test] async fn connect_v4() { let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(srv.local_addr()); assert!(addr.is_ipv4()); let (tx, rx) = oneshot::channel(); tokio::spawn(async move { let (socket, addr) = assert_ok!(srv.accept().await); assert_eq!(addr, assert_ok!(socket.peer_addr())); assert_ok!(tx.send(socket)); }); let mine = assert_ok!(TcpStream::connect(&addr).await); let theirs = assert_ok!(rx.await); assert_eq!( assert_ok!(mine.local_addr()), assert_ok!(theirs.peer_addr()) ); assert_eq!( assert_ok!(theirs.local_addr()), assert_ok!(mine.peer_addr()) ); } #[tokio::test] async fn connect_v6() { let srv = assert_ok!(TcpListener::bind("[::1]:0").await); let addr = assert_ok!(srv.local_addr()); assert!(addr.is_ipv6()); let (tx, rx) = oneshot::channel(); tokio::spawn(async move { let (socket, addr) = assert_ok!(srv.accept().await); assert_eq!(addr, assert_ok!(socket.peer_addr())); assert_ok!(tx.send(socket)); }); let mine = assert_ok!(TcpStream::connect(&addr).await); let theirs = assert_ok!(rx.await); assert_eq!( assert_ok!(mine.local_addr()), assert_ok!(theirs.peer_addr()) ); assert_eq!( assert_ok!(theirs.local_addr()), assert_ok!(mine.peer_addr()) ); } #[tokio::test] async fn connect_addr_ip_string() { let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(srv.local_addr()); let addr = format!("127.0.0.1:{}", addr.port()); let server = async { assert_ok!(srv.accept().await); }; let client = async { assert_ok!(TcpStream::connect(addr).await); }; join!(server, client); } #[tokio::test] async fn connect_addr_ip_str_slice() { let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(srv.local_addr()); let addr = format!("127.0.0.1:{}", addr.port()); let server = async { assert_ok!(srv.accept().await); }; let client = async { assert_ok!(TcpStream::connect(&addr[..]).await); }; join!(server, client); } #[tokio::test] async fn connect_addr_host_string() { let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(srv.local_addr()); let addr = format!("localhost:{}", addr.port()); let server = async { assert_ok!(srv.accept().await); }; let client = async { assert_ok!(TcpStream::connect(addr).await); }; join!(server, client); } #[tokio::test] async fn connect_addr_ip_port_tuple() { let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(srv.local_addr()); let addr = (addr.ip(), addr.port()); let server = async { assert_ok!(srv.accept().await); }; let client = async { assert_ok!(TcpStream::connect(&addr).await); }; join!(server, client); } #[tokio::test] async fn connect_addr_ip_str_port_tuple() { let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(srv.local_addr()); let addr = ("127.0.0.1", addr.port()); let server = async { assert_ok!(srv.accept().await); }; let client = async { assert_ok!(TcpStream::connect(&addr).await); }; join!(server, client); } #[tokio::test] async fn connect_addr_host_str_port_tuple() { let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(srv.local_addr()); let addr = ("localhost", addr.port()); let server = async { assert_ok!(srv.accept().await); }; let client = async { assert_ok!(TcpStream::connect(&addr).await); }; join!(server, client); } /* * TODO: bring this back once TCP exposes HUP again * #[cfg(target_os = "linux")] mod linux { use tokio::net::{TcpListener, TcpStream}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio_test::assert_ok; use mio::unix::UnixReady; use futures_util::future::poll_fn; use std::io::Write; use std::time::Duration; use std::{net, thread}; #[tokio::test] #[expect(deprecated)] // set_linger is deprecated fn poll_hup() { let addr = assert_ok!("127.0.0.1:0".parse()); let mut srv = assert_ok!(TcpListener::bind(&addr)); let addr = assert_ok!(srv.local_addr()); tokio::spawn(async move { let (mut client, _) = assert_ok!(srv.accept().await); assert_ok!(client.set_linger(Some(Duration::from_millis(0)))); assert_ok!(client.write_all(b"hello world").await); // TODO: Drop? }); /* let t = thread::spawn(move || { let mut client = assert_ok!(srv.accept()).0; client.set_linger(Some(Duration::from_millis(0))).unwrap(); client.write(b"hello world").unwrap(); thread::sleep(Duration::from_millis(200)); }); */ let mut stream = assert_ok!(TcpStream::connect(&addr).await); // Poll for HUP before reading. future::poll_fn(|| stream.poll_read_ready(UnixReady::hup().into())) .wait() .unwrap(); // Same for write half future::poll_fn(|| stream.poll_write_ready()) .wait() .unwrap(); let mut buf = vec![0; 11]; // Read the data future::poll_fn(|| stream.poll_read(&mut buf)) .wait() .unwrap(); assert_eq!(b"hello world", &buf[..]); t.join().unwrap(); } } */
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/tcp_accept.rs
tokio/tests/tcp_accept.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi"), not(miri)))] // Wasi doesn't support bind // No `socket` on miri. use tokio::net::{TcpListener, TcpStream}; use tokio::sync::{mpsc, oneshot}; use tokio_test::assert_ok; use std::io; use std::net::{IpAddr, SocketAddr}; macro_rules! test_accept { ($(($ident:ident, $target:expr),)*) => { $( #[tokio::test] async fn $ident() { let listener = assert_ok!(TcpListener::bind($target).await); let addr = listener.local_addr().unwrap(); let (tx, rx) = oneshot::channel(); tokio::spawn(async move { let (socket, _) = assert_ok!(listener.accept().await); assert_ok!(tx.send(socket)); }); let cli = assert_ok!(TcpStream::connect(&addr).await); let srv = assert_ok!(rx.await); assert_eq!(cli.local_addr().unwrap(), srv.peer_addr().unwrap()); } )* } } test_accept! { (ip_str, "127.0.0.1:0"), (host_str, "localhost:0"), (socket_addr, "127.0.0.1:0".parse::<SocketAddr>().unwrap()), (str_port_tuple, ("127.0.0.1", 0)), (ip_port_tuple, ("127.0.0.1".parse::<IpAddr>().unwrap(), 0)), } use std::pin::Pin; use std::sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, Arc, }; use std::task::{Context, Poll}; use tokio_stream::{Stream, StreamExt}; struct TrackPolls<'a> { npolls: Arc<AtomicUsize>, listener: &'a mut TcpListener, } impl<'a> Stream for TrackPolls<'a> { type Item = io::Result<(TcpStream, SocketAddr)>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.npolls.fetch_add(1, SeqCst); self.listener.poll_accept(cx).map(Some) } } #[tokio::test] async fn no_extra_poll() { let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = listener.local_addr().unwrap(); let (tx, rx) = oneshot::channel(); let (accepted_tx, mut accepted_rx) = mpsc::unbounded_channel(); tokio::spawn(async move { let mut incoming = TrackPolls { npolls: Arc::new(AtomicUsize::new(0)), listener: &mut listener, }; assert_ok!(tx.send(Arc::clone(&incoming.npolls))); while incoming.next().await.is_some() { accepted_tx.send(()).unwrap(); } }); let npolls = assert_ok!(rx.await); tokio::task::yield_now().await; // should have been polled exactly once: the initial poll assert_eq!(npolls.load(SeqCst), 1); let _ = assert_ok!(TcpStream::connect(&addr).await); accepted_rx.recv().await.unwrap(); // should have been polled twice more: once to yield Some(), then once to yield Pending assert_eq!(npolls.load(SeqCst), 1 + 2); } #[tokio::test] async fn accept_many() { use std::future::{poll_fn, Future}; use std::sync::atomic::AtomicBool; const N: usize = 50; let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let listener = Arc::new(listener); let addr = listener.local_addr().unwrap(); let connected = Arc::new(AtomicBool::new(false)); let (pending_tx, mut pending_rx) = mpsc::unbounded_channel(); let (notified_tx, mut notified_rx) = mpsc::unbounded_channel(); for _ in 0..N { let listener = listener.clone(); let connected = connected.clone(); let pending_tx = pending_tx.clone(); let notified_tx = notified_tx.clone(); tokio::spawn(async move { let accept = listener.accept(); tokio::pin!(accept); let mut polled = false; poll_fn(|cx| { if !polled { polled = true; assert!(Pin::new(&mut accept).poll(cx).is_pending()); pending_tx.send(()).unwrap(); Poll::Pending } else if connected.load(SeqCst) { notified_tx.send(()).unwrap(); Poll::Ready(()) } else { Poll::Pending } }) .await; pending_tx.send(()).unwrap(); }); } // Wait for all tasks to have polled at least once for _ in 0..N { pending_rx.recv().await.unwrap(); } // Establish a TCP connection connected.store(true, SeqCst); let _sock = TcpStream::connect(addr).await.unwrap(); // Wait for all notifications for _ in 0..N { notified_rx.recv().await.unwrap(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/async_send_sync.rs
tokio/tests/async_send_sync.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] #![allow(clippy::type_complexity, clippy::diverging_sub_expression)] use std::cell::Cell; use std::future::Future; use std::io::SeekFrom; use std::net::SocketAddr; use std::pin::Pin; use std::rc::Rc; use tokio::net::TcpStream; use tokio::time::{Duration, Instant}; // The names of these structs behaves better when sorted. // Send: Yes, Sync: Yes #[derive(Clone)] #[allow(unused)] struct YY {} // Send: Yes, Sync: No #[derive(Clone)] #[allow(unused)] struct YN { _value: Cell<u8>, } // Send: No, Sync: No #[derive(Clone)] #[allow(unused)] struct NN { _value: Rc<u8>, } #[allow(dead_code)] type BoxFutureSync<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send + Sync>>; #[allow(dead_code)] type BoxFutureSend<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send>>; #[allow(dead_code)] type BoxFuture<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T>>>; #[allow(dead_code)] type BoxAsyncRead = std::pin::Pin<Box<dyn tokio::io::AsyncBufRead + Send + Sync>>; #[allow(dead_code)] type BoxAsyncSeek = std::pin::Pin<Box<dyn tokio::io::AsyncSeek + Send + Sync>>; #[allow(dead_code)] type BoxAsyncWrite = std::pin::Pin<Box<dyn tokio::io::AsyncWrite + Send + Sync>>; #[allow(dead_code)] fn require_send<T: Send>(_t: &T) {} #[allow(dead_code)] fn require_sync<T: Sync>(_t: &T) {} #[allow(dead_code)] fn require_unpin<T: Unpin>(_t: &T) {} #[allow(dead_code)] struct Invalid; #[allow(unused)] trait AmbiguousIfSend<A> { fn some_item(&self) {} } impl<T: ?Sized> AmbiguousIfSend<()> for T {} impl<T: ?Sized + Send> AmbiguousIfSend<Invalid> for T {} #[allow(unused)] trait AmbiguousIfSync<A> { fn some_item(&self) {} } impl<T: ?Sized> AmbiguousIfSync<()> for T {} impl<T: ?Sized + Sync> AmbiguousIfSync<Invalid> for T {} #[allow(unused)] trait AmbiguousIfUnpin<A> { fn some_item(&self) {} } impl<T: ?Sized> AmbiguousIfUnpin<()> for T {} impl<T: ?Sized + Unpin> AmbiguousIfUnpin<Invalid> for T {} macro_rules! into_todo { ($typ:ty) => {{ let x: $typ = todo!(); x }}; } macro_rules! async_assert_fn_send { (Send & $(!)?Sync & $(!)?Unpin, $value:expr) => { require_send(&$value); }; (!Send & $(!)?Sync & $(!)?Unpin, $value:expr) => { AmbiguousIfSend::some_item(&$value); }; } macro_rules! async_assert_fn_sync { ($(!)?Send & Sync & $(!)?Unpin, $value:expr) => { require_sync(&$value); }; ($(!)?Send & !Sync & $(!)?Unpin, $value:expr) => { AmbiguousIfSync::some_item(&$value); }; } macro_rules! async_assert_fn_unpin { ($(!)?Send & $(!)?Sync & Unpin, $value:expr) => { require_unpin(&$value); }; ($(!)?Send & $(!)?Sync & !Unpin, $value:expr) => { AmbiguousIfUnpin::some_item(&$value); }; } macro_rules! async_assert_fn { ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): $($tok:tt)*) => { #[allow(unreachable_code)] #[allow(unused_variables)] const _: fn() = || { let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); async_assert_fn_send!($($tok)*, f); async_assert_fn_sync!($($tok)*, f); async_assert_fn_unpin!($($tok)*, f); }; }; } macro_rules! assert_value { ($type:ty: $($tok:tt)*) => { #[allow(unreachable_code)] #[allow(unused_variables)] const _: fn() = || { let f: $type = todo!(); async_assert_fn_send!($($tok)*, f); async_assert_fn_sync!($($tok)*, f); async_assert_fn_unpin!($($tok)*, f); }; }; } macro_rules! cfg_not_wasi { ($($item:item)*) => { $( #[cfg(not(target_os = "wasi"))] $item )* } } // Manually re-implementation of `async_assert_fn` for `poll_fn`. The macro // doesn't work for this particular case because constructing the closure // is too complicated. const _: fn() = || { let pinned = std::marker::PhantomPinned; let f = tokio::macros::support::poll_fn(move |_| { // Use `pinned` to take ownership of it. let _ = &pinned; std::task::Poll::Pending::<()> }); require_send(&f); require_sync(&f); AmbiguousIfUnpin::some_item(&f); }; cfg_not_wasi! { mod fs { use super::*; assert_value!(tokio::fs::DirBuilder: Send & Sync & Unpin); assert_value!(tokio::fs::DirEntry: Send & Sync & Unpin); assert_value!(tokio::fs::File: Send & Sync & Unpin); assert_value!(tokio::fs::OpenOptions: Send & Sync & Unpin); assert_value!(tokio::fs::ReadDir: Send & Sync & Unpin); async_assert_fn!(tokio::fs::canonicalize(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::copy(&str, &str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::create_dir(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::create_dir_all(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::hard_link(&str, &str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::metadata(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::read(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::read_dir(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::read_link(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::read_to_string(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::remove_dir(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::remove_dir_all(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::remove_file(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::rename(&str, &str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::set_permissions(&str, std::fs::Permissions): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::symlink_metadata(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::write(&str, Vec<u8>): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::ReadDir::next_entry(_): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::OpenOptions::open(_, &str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::DirBuilder::create(_, &str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::DirEntry::metadata(_): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::DirEntry::file_type(_): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::open(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::create(&str): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::sync_all(_): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::sync_data(_): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::set_len(_, u64): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::metadata(_): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::try_clone(_): Send & Sync & !Unpin); async_assert_fn!(tokio::fs::File::into_std(_): Send & Sync & !Unpin); async_assert_fn!( tokio::fs::File::set_permissions(_, std::fs::Permissions): Send & Sync & !Unpin ); } } cfg_not_wasi! { assert_value!(tokio::net::TcpSocket: Send & Sync & Unpin); async_assert_fn!(tokio::net::TcpListener::bind(SocketAddr): Send & Sync & !Unpin); async_assert_fn!(tokio::net::TcpStream::connect(SocketAddr): Send & Sync & !Unpin); } assert_value!(tokio::net::TcpListener: Send & Sync & Unpin); assert_value!(tokio::net::TcpStream: Send & Sync & Unpin); assert_value!(tokio::net::tcp::OwnedReadHalf: Send & Sync & Unpin); assert_value!(tokio::net::tcp::OwnedWriteHalf: Send & Sync & Unpin); assert_value!(tokio::net::tcp::ReadHalf<'_>: Send & Sync & Unpin); assert_value!(tokio::net::tcp::ReuniteError: Send & Sync & Unpin); assert_value!(tokio::net::tcp::WriteHalf<'_>: Send & Sync & Unpin); async_assert_fn!(tokio::net::TcpListener::accept(_): Send & Sync & !Unpin); async_assert_fn!(tokio::net::TcpStream::peek(_, &mut [u8]): Send & Sync & !Unpin); async_assert_fn!(tokio::net::TcpStream::readable(_): Send & Sync & !Unpin); async_assert_fn!(tokio::net::TcpStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(tokio::net::TcpStream::writable(_): Send & Sync & !Unpin); // Wasi does not support UDP cfg_not_wasi! { mod udp_socket { use super::*; assert_value!(tokio::net::UdpSocket: Send & Sync & Unpin); async_assert_fn!(tokio::net::UdpSocket::bind(SocketAddr): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::connect(_, SocketAddr): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::peek_from(_, &mut [u8]): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::readable(_): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::recv(_, &mut [u8]): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::recv_from(_, &mut [u8]): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::send(_, &[u8]): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::send_to(_, &[u8], SocketAddr): Send & Sync & !Unpin); async_assert_fn!(tokio::net::UdpSocket::writable(_): Send & Sync & !Unpin); } } async_assert_fn!(tokio::net::lookup_host(SocketAddr): Send & Sync & !Unpin); async_assert_fn!(tokio::net::tcp::ReadHalf::peek(_, &mut [u8]): Send & Sync & !Unpin); #[cfg(unix)] mod unix_datagram { use super::*; use tokio::net::*; assert_value!(UnixDatagram: Send & Sync & Unpin); assert_value!(UnixListener: Send & Sync & Unpin); assert_value!(UnixStream: Send & Sync & Unpin); assert_value!(unix::OwnedReadHalf: Send & Sync & Unpin); assert_value!(unix::OwnedWriteHalf: Send & Sync & Unpin); assert_value!(unix::ReadHalf<'_>: Send & Sync & Unpin); assert_value!(unix::ReuniteError: Send & Sync & Unpin); assert_value!(unix::SocketAddr: Send & Sync & Unpin); assert_value!(unix::UCred: Send & Sync & Unpin); assert_value!(unix::WriteHalf<'_>: Send & Sync & Unpin); async_assert_fn!(UnixDatagram::readable(_): Send & Sync & !Unpin); async_assert_fn!(UnixDatagram::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(UnixDatagram::recv(_, &mut [u8]): Send & Sync & !Unpin); async_assert_fn!(UnixDatagram::recv_from(_, &mut [u8]): Send & Sync & !Unpin); async_assert_fn!(UnixDatagram::send(_, &[u8]): Send & Sync & !Unpin); async_assert_fn!(UnixDatagram::send_to(_, &[u8], &str): Send & Sync & !Unpin); async_assert_fn!(UnixDatagram::writable(_): Send & Sync & !Unpin); async_assert_fn!(UnixListener::accept(_): Send & Sync & !Unpin); async_assert_fn!(UnixStream::connect(&str): Send & Sync & !Unpin); async_assert_fn!(UnixStream::readable(_): Send & Sync & !Unpin); async_assert_fn!(UnixStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(UnixStream::writable(_): Send & Sync & !Unpin); } #[cfg(unix)] mod unix_pipe { use super::*; use tokio::net::unix::pipe::*; assert_value!(OpenOptions: Send & Sync & Unpin); assert_value!(Receiver: Send & Sync & Unpin); assert_value!(Sender: Send & Sync & Unpin); async_assert_fn!(Receiver::readable(_): Send & Sync & !Unpin); async_assert_fn!(Receiver::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(Sender::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(Sender::writable(_): Send & Sync & !Unpin); } #[cfg(windows)] mod windows_named_pipe { use super::*; use tokio::net::windows::named_pipe::*; assert_value!(ClientOptions: Send & Sync & Unpin); assert_value!(NamedPipeClient: Send & Sync & Unpin); assert_value!(NamedPipeServer: Send & Sync & Unpin); assert_value!(PipeEnd: Send & Sync & Unpin); assert_value!(PipeInfo: Send & Sync & Unpin); assert_value!(PipeMode: Send & Sync & Unpin); assert_value!(ServerOptions: Send & Sync & Unpin); async_assert_fn!(NamedPipeClient::readable(_): Send & Sync & !Unpin); async_assert_fn!(NamedPipeClient::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(NamedPipeClient::writable(_): Send & Sync & !Unpin); async_assert_fn!(NamedPipeServer::connect(_): Send & Sync & !Unpin); async_assert_fn!(NamedPipeServer::readable(_): Send & Sync & !Unpin); async_assert_fn!(NamedPipeServer::ready(_, tokio::io::Interest): Send & Sync & !Unpin); async_assert_fn!(NamedPipeServer::writable(_): Send & Sync & !Unpin); } cfg_not_wasi! { mod test_process { use super::*; assert_value!(tokio::process::Child: Send & Sync & Unpin); assert_value!(tokio::process::ChildStderr: Send & Sync & Unpin); assert_value!(tokio::process::ChildStdin: Send & Sync & Unpin); assert_value!(tokio::process::ChildStdout: Send & Sync & Unpin); assert_value!(tokio::process::Command: Send & Sync & Unpin); async_assert_fn!(tokio::process::Child::kill(_): Send & Sync & !Unpin); async_assert_fn!(tokio::process::Child::wait(_): Send & Sync & !Unpin); async_assert_fn!(tokio::process::Child::wait_with_output(_): Send & Sync & !Unpin); } async_assert_fn!(tokio::signal::ctrl_c(): Send & Sync & !Unpin); } #[cfg(unix)] mod unix_signal { use super::*; assert_value!(tokio::signal::unix::Signal: Send & Sync & Unpin); assert_value!(tokio::signal::unix::SignalKind: Send & Sync & Unpin); async_assert_fn!(tokio::signal::unix::Signal::recv(_): Send & Sync & !Unpin); } #[cfg(windows)] mod windows_signal { use super::*; assert_value!(tokio::signal::windows::CtrlC: Send & Sync & Unpin); assert_value!(tokio::signal::windows::CtrlBreak: Send & Sync & Unpin); async_assert_fn!(tokio::signal::windows::CtrlC::recv(_): Send & Sync & !Unpin); async_assert_fn!(tokio::signal::windows::CtrlBreak::recv(_): Send & Sync & !Unpin); } assert_value!(tokio::sync::AcquireError: Send & Sync & Unpin); assert_value!(tokio::sync::Barrier: Send & Sync & Unpin); assert_value!(tokio::sync::BarrierWaitResult: Send & Sync & Unpin); assert_value!(tokio::sync::MappedMutexGuard<'_, NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::MappedMutexGuard<'_, YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::MappedMutexGuard<'_, YY>: Send & Sync & Unpin); assert_value!(tokio::sync::Mutex<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::Mutex<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::Mutex<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::MutexGuard<'_, NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::MutexGuard<'_, YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::MutexGuard<'_, YY>: Send & Sync & Unpin); assert_value!(tokio::sync::Notify: Send & Sync & Unpin); assert_value!(tokio::sync::OnceCell<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OnceCell<YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::OnceCell<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::SetOnce<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::SetOnce<YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::SetOnce<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedMutexGuard<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMutexGuard<YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMutexGuard<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<NN,NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<NN,YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<NN,YY>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<YN,NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<YN,YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<YN,YY>: Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<YY,NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<YY,YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedMappedMutexGuard<YY,YY>: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockReadGuard<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockReadGuard<YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockReadGuard<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockWriteGuard<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockWriteGuard<YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::OwnedRwLockWriteGuard<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::OwnedSemaphorePermit: Send & Sync & Unpin); assert_value!(tokio::sync::RwLock<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::RwLock<YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::RwLock<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YY>: Send & Sync & Unpin); assert_value!(tokio::sync::RwLockReadGuard<'_, NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::RwLockReadGuard<'_, YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::RwLockReadGuard<'_, YY>: Send & Sync & Unpin); assert_value!(tokio::sync::RwLockWriteGuard<'_, NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::RwLockWriteGuard<'_, YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::RwLockWriteGuard<'_, YY>: Send & Sync & Unpin); assert_value!(tokio::sync::Semaphore: Send & Sync & Unpin); assert_value!(tokio::sync::SemaphorePermit<'_>: Send & Sync & Unpin); assert_value!(tokio::sync::TryAcquireError: Send & Sync & Unpin); assert_value!(tokio::sync::TryLockError: Send & Sync & Unpin); assert_value!(tokio::sync::broadcast::Receiver<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::broadcast::Receiver<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::broadcast::Receiver<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::broadcast::Sender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::broadcast::Sender<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::broadcast::Sender<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::broadcast::WeakSender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::broadcast::WeakSender<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::broadcast::WeakSender<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::futures::Notified<'_>: Send & Sync & !Unpin); assert_value!(tokio::sync::futures::OwnedNotified: Send & Sync & !Unpin); assert_value!(tokio::sync::mpsc::OwnedPermit<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::OwnedPermit<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::OwnedPermit<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::Permit<'_, NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::Permit<'_, YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::Permit<'_, YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::Receiver<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::Receiver<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::Receiver<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::Sender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::Sender<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::Sender<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::UnboundedReceiver<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::UnboundedReceiver<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::UnboundedReceiver<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::UnboundedSender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::UnboundedSender<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::UnboundedSender<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::WeakSender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::WeakSender<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::WeakSender<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::WeakUnboundedSender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::WeakUnboundedSender<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::WeakUnboundedSender<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::error::SendError<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::error::SendError<YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::error::SendError<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::error::SendTimeoutError<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::mpsc::error::TrySendError<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::error::TrySendError<YN>: Send & !Sync & Unpin); assert_value!(tokio::sync::mpsc::error::TrySendError<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::oneshot::Receiver<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::oneshot::Receiver<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::oneshot::Receiver<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::oneshot::Sender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::oneshot::Sender<YN>: Send & Sync & Unpin); assert_value!(tokio::sync::oneshot::Sender<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::watch::Receiver<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::watch::Receiver<YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::watch::Receiver<YY>: Send & Sync & Unpin); assert_value!(tokio::sync::watch::Ref<'_, NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::watch::Ref<'_, YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::watch::Ref<'_, YY>: !Send & Sync & Unpin); assert_value!(tokio::sync::watch::Sender<NN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::watch::Sender<YN>: !Send & !Sync & Unpin); assert_value!(tokio::sync::watch::Sender<YY>: Send & Sync & Unpin); assert_value!(tokio::task::JoinError: Send & Sync & Unpin); assert_value!(tokio::task::JoinHandle<NN>: !Send & !Sync & Unpin); assert_value!(tokio::task::JoinHandle<YN>: Send & Sync & Unpin); assert_value!(tokio::task::JoinHandle<YY>: Send & Sync & Unpin); assert_value!(tokio::task::JoinSet<NN>: !Send & !Sync & Unpin); assert_value!(tokio::task::JoinSet<YN>: Send & Sync & Unpin); assert_value!(tokio::task::JoinSet<YY>: Send & Sync & Unpin); assert_value!(tokio::task::LocalSet: !Send & !Sync & Unpin); assert_value!(tokio::task::coop::RestoreOnPending: !Send & !Sync & Unpin); async_assert_fn!(tokio::sync::Barrier::wait(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Mutex<NN>::lock(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::Mutex<NN>::lock_owned(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::Mutex<YN>::lock(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Mutex<YN>::lock_owned(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Mutex<YY>::lock(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Mutex<YY>::lock_owned(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Notify::notified(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send + Sync>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN>>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send + Sync>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>>>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send + Sync>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN>>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send + Sync>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>>>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send + Sync>>): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send>>): Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY>>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send + Sync>>): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send>>): Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>>>>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::SetOnce<NN>::wait(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::SetOnce<YN>::wait(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::SetOnce<YY>::wait(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::RwLock<NN>::read(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::RwLock<NN>::write(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::RwLock<YN>::read(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::RwLock<YN>::write(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::RwLock<YY>::read(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::RwLock<YY>::write(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Semaphore::acquire(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Semaphore::acquire_many(_, u32): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Semaphore::acquire_many_owned(_, u32): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::Semaphore::acquire_owned(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::broadcast::Receiver<NN>::recv(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::broadcast::Receiver<YN>::recv(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::broadcast::Receiver<YY>::recv(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Receiver<NN>::recv(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Receiver<YN>::recv(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Receiver<YY>::recv(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<NN>::closed(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve_owned(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send(_, NN): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send_timeout(_, NN, Duration): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YN>::closed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve_owned(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send(_, YN): Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send_timeout(_, YN, Duration): Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YY>::closed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve_owned(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send(_, YY): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send_timeout(_, YY, Duration): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<NN>::recv(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YN>::recv(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YY>::recv(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::UnboundedSender<NN>::closed(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YN>::closed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YY>::closed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::oneshot::Sender<NN>::closed(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::oneshot::Sender<YN>::closed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::oneshot::Sender<YY>::closed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::watch::Receiver<NN>::changed(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::watch::Receiver<YN>::changed(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::watch::Receiver<YY>::changed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::sync::watch::Sender<NN>::closed(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::watch::Sender<YN>::closed(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::sync::watch::Sender<YY>::closed(_): Send & Sync & !Unpin); async_assert_fn!(tokio::task::JoinSet<Cell<u32>>::join_next(_): Send & Sync & !Unpin); async_assert_fn!(tokio::task::JoinSet<Cell<u32>>::shutdown(_): Send & Sync & !Unpin); async_assert_fn!(tokio::task::JoinSet<Rc<u32>>::join_next(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::JoinSet<Rc<u32>>::shutdown(_): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::JoinSet<u32>::join_next(_): Send & Sync & !Unpin); async_assert_fn!(tokio::task::JoinSet<u32>::shutdown(_): Send & Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFuture<()>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSend<()>): Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSync<()>): Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFuture<()>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSend<()>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSync<()>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFuture<()>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSend<()>): Send & !Sync & !Unpin); async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSync<()>): Send & Sync & !Unpin); async_assert_fn!(tokio::task::LocalSet::run_until(_, BoxFutureSync<()>): !Send & !Sync & !Unpin); async_assert_fn!(tokio::task::unconstrained(BoxFuture<()>): !Send & !Sync & Unpin); async_assert_fn!(tokio::task::unconstrained(BoxFutureSend<()>): Send & !Sync & Unpin); async_assert_fn!(tokio::task::unconstrained(BoxFutureSync<()>): Send & Sync & Unpin); assert_value!(tokio::runtime::Builder: Send & Sync & Unpin); assert_value!(tokio::runtime::EnterGuard<'_>: !Send & Sync & Unpin); assert_value!(tokio::runtime::Handle: Send & Sync & Unpin); assert_value!(tokio::runtime::Runtime: Send & Sync & Unpin); assert_value!(tokio::time::Interval: Send & Sync & Unpin);
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/signal_drop_rt.rs
tokio/tests/signal_drop_rt.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] #![cfg(unix)] #![cfg(not(miri))] // No `sigaction` in miri. mod support { pub mod signal; } use support::signal::send_signal; use tokio::runtime::Runtime; use tokio::signal::unix::{signal, SignalKind}; #[test] fn dropping_loops_does_not_cause_starvation() { let kind = SignalKind::user_defined1(); let first_rt = rt(); let mut first_signal = first_rt.block_on(async { signal(kind).expect("failed to register first signal") }); let second_rt = rt(); let mut second_signal = second_rt.block_on(async { signal(kind).expect("failed to register second signal") }); send_signal(libc::SIGUSR1); first_rt .block_on(first_signal.recv()) .expect("failed to await first signal"); drop(first_rt); drop(first_signal); send_signal(libc::SIGUSR1); second_rt.block_on(second_signal.recv()); } fn rt() -> Runtime { tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap() }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/task_hooks.rs
tokio/tests/task_hooks.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", tokio_unstable, target_has_atomic = "64"))] use std::collections::HashSet; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use tokio::runtime::Builder; const TASKS: usize = 8; const ITERATIONS: usize = 64; /// Assert that the spawn task hook always fires when set. #[test] fn spawn_task_hook_fires() { let count = Arc::new(AtomicUsize::new(0)); let count2 = Arc::clone(&count); let ids = Arc::new(Mutex::new(HashSet::new())); let ids2 = Arc::clone(&ids); let runtime = Builder::new_current_thread() .on_task_spawn(move |data| { ids2.lock().unwrap().insert(data.id()); count2.fetch_add(1, Ordering::SeqCst); }) .build() .unwrap(); for _ in 0..TASKS { runtime.spawn(std::future::pending::<()>()); } let count_realized = count.load(Ordering::SeqCst); assert_eq!( TASKS, count_realized, "Total number of spawned task hook invocations was incorrect, expected {TASKS}, got {count_realized}" ); let count_ids_realized = ids.lock().unwrap().len(); assert_eq!( TASKS, count_ids_realized, "Total number of spawned task hook invocations was incorrect, expected {TASKS}, got {count_realized}" ); } /// Assert that the terminate task hook always fires when set. #[test] fn terminate_task_hook_fires() { let count = Arc::new(AtomicUsize::new(0)); let count2 = Arc::clone(&count); let runtime = Builder::new_current_thread() .on_task_terminate(move |_data| { count2.fetch_add(1, Ordering::SeqCst); }) .build() .unwrap(); for _ in 0..TASKS { runtime.spawn(std::future::ready(())); } runtime.block_on(async { // tick the runtime a bunch to close out tasks for _ in 0..ITERATIONS { tokio::task::yield_now().await; } }); assert_eq!(TASKS, count.load(Ordering::SeqCst)); } /// Test that the correct spawn location is provided to the task hooks on a /// current thread runtime. #[test] fn task_hook_spawn_location_current_thread() { let spawns = Arc::new(AtomicUsize::new(0)); let poll_starts = Arc::new(AtomicUsize::new(0)); let poll_ends = Arc::new(AtomicUsize::new(0)); let runtime = Builder::new_current_thread() .on_task_spawn(mk_spawn_location_hook( "(current_thread) on_task_spawn", &spawns, )) .on_before_task_poll(mk_spawn_location_hook( "(current_thread) on_before_task_poll", &poll_starts, )) .on_after_task_poll(mk_spawn_location_hook( "(current_thread) on_after_task_poll", &poll_ends, )) .build() .unwrap(); let task = runtime.spawn(async move { tokio::task::yield_now().await }); runtime.block_on(async move { // Spawn tasks using both `runtime.spawn(...)` and `tokio::spawn(...)` // to ensure the correct location is captured in both code paths. task.await.unwrap(); tokio::spawn(async move {}).await.unwrap(); // tick the runtime a bunch to close out tasks for _ in 0..ITERATIONS { tokio::task::yield_now().await; } }); assert_eq!(spawns.load(Ordering::SeqCst), 2); let poll_starts = poll_starts.load(Ordering::SeqCst); assert!(poll_starts > 2); assert_eq!(poll_starts, poll_ends.load(Ordering::SeqCst)); } /// Test that the correct spawn location is provided to the task hooks on a /// multi-thread runtime. /// /// Testing this separately is necessary as the spawn code paths are different /// and we should ensure that `#[track_caller]` is passed through correctly /// for both runtimes. #[cfg_attr( target_os = "wasi", ignore = "WASI does not support multi-threaded runtime" )] #[test] fn task_hook_spawn_location_multi_thread() { let spawns = Arc::new(AtomicUsize::new(0)); let poll_starts = Arc::new(AtomicUsize::new(0)); let poll_ends = Arc::new(AtomicUsize::new(0)); let runtime = Builder::new_multi_thread() .on_task_spawn(mk_spawn_location_hook( "(multi_thread) on_task_spawn", &spawns, )) .on_before_task_poll(mk_spawn_location_hook( "(multi_thread) on_before_task_poll", &poll_starts, )) .on_after_task_poll(mk_spawn_location_hook( "(multi_thread) on_after_task_poll", &poll_ends, )) .build() .unwrap(); let task = runtime.spawn(async move { tokio::task::yield_now().await }); runtime.block_on(async move { // Spawn tasks using both `runtime.spawn(...)` and `tokio::spawn(...)` // to ensure the correct location is captured in both code paths. task.await.unwrap(); tokio::spawn(async move {}).await.unwrap(); // tick the runtime a bunch to close out tasks for _ in 0..ITERATIONS { tokio::task::yield_now().await; } }); // Give the runtime to shut down so that we see all the expected calls to // the task hooks. runtime.shutdown_timeout(std::time::Duration::from_secs(60)); // Note: we "read" the counters using `fetch_add(0, SeqCst)` rather than // `load(SeqCst)` because read-write-modify operations are guaranteed to // observe the latest value, while the load is not. // This avoids a race that may cause test flakiness. assert_eq!(spawns.fetch_add(0, Ordering::SeqCst), 2); let poll_starts = poll_starts.fetch_add(0, Ordering::SeqCst); assert!(poll_starts > 2); assert_eq!(poll_starts, poll_ends.fetch_add(0, Ordering::SeqCst)); } fn mk_spawn_location_hook( event: &'static str, count: &Arc<AtomicUsize>, ) -> impl Fn(&tokio::runtime::TaskMeta<'_>) { let count = Arc::clone(count); move |data| { eprintln!("{event} ({:?}): {:?}", data.id(), data.spawned_at()); // Assert that the spawn location is in this file. // Don't make assertions about line number/column here, as these // may change as new code is added to the test file... assert_eq!( data.spawned_at().file(), file!(), "incorrect spawn location in {event} hook", ); count.fetch_add(1, Ordering::SeqCst); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/time_alt.rs
tokio/tests/time_alt.rs
#![warn(rust_2018_idioms)] #![cfg(all(tokio_unstable, feature = "time", feature = "rt-multi-thread"))] use tokio::runtime::Runtime; use tokio::time::*; fn rt_combinations() -> Vec<Runtime> { let mut rts = vec![]; let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .enable_all() .build() .unwrap(); rts.push(rt); let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(4) .enable_all() .build() .unwrap(); rts.push(rt); #[cfg(tokio_unstable)] { let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .enable_alt_timer() .enable_all() .build() .unwrap(); rts.push(rt); let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(4) .enable_alt_timer() .enable_all() .build() .unwrap(); rts.push(rt); } rts } #[test] fn sleep() { const N: u32 = 512; for rt in rt_combinations() { rt.block_on(async { let mut jhs = vec![]; // sleep outside of the worker threads let now = Instant::now(); tokio::time::sleep(Duration::from_millis(10)).await; assert!(now.elapsed() >= Duration::from_millis(10)); for _ in 0..N { let jh = tokio::spawn(async move { // sleep inside of the worker threads let now = Instant::now(); tokio::time::sleep(Duration::from_millis(10)).await; assert!(now.elapsed() >= Duration::from_millis(10)); }); jhs.push(jh); } for jh in jhs { jh.await.unwrap(); } }); } } #[test] fn timeout() { const N: u32 = 512; for rt in rt_combinations() { rt.block_on(async { let mut jhs = vec![]; // timeout outside of the worker threads let now = Instant::now(); tokio::time::timeout(Duration::from_millis(10), std::future::pending::<()>()) .await .expect_err("timeout should occur"); assert!(now.elapsed() >= Duration::from_millis(10)); for _ in 0..N { let jh = tokio::spawn(async move { let now = Instant::now(); // timeout inside of the worker threads tokio::time::timeout(Duration::from_millis(10), std::future::pending::<()>()) .await .expect_err("timeout should occur"); assert!(now.elapsed() >= Duration::from_millis(10)); }); jhs.push(jh); } for jh in jhs { jh.await.unwrap(); } }); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_rwlock.rs
tokio/tests/sync_rwlock.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "sync")] #[cfg(all(target_family = "wasm", not(target_os = "wasi")))] use wasm_bindgen_test::wasm_bindgen_test as test; #[cfg(all(target_family = "wasm", not(target_os = "wasi")))] use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test; #[cfg(not(all(target_family = "wasm", not(target_os = "wasi"))))] use tokio::test as maybe_tokio_test; use std::task::Poll; use futures::future::FutureExt; use tokio::sync::{RwLock, RwLockWriteGuard}; use tokio_test::task::spawn; use tokio_test::{assert_pending, assert_ready}; #[test] fn into_inner() { let rwlock = RwLock::new(42); assert_eq!(rwlock.into_inner(), 42); } // multiple reads should be Ready #[test] fn read_shared() { let rwlock = RwLock::new(100); let mut t1 = spawn(rwlock.read()); let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.read()); let _g2 = assert_ready!(t2.poll()); } // When there is an active shared owner, exclusive access should not be possible #[test] fn write_shared_pending() { let rwlock = RwLock::new(100); let mut t1 = spawn(rwlock.read()); let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.write()); assert_pending!(t2.poll()); } // When there is an active exclusive owner, subsequent exclusive access should not be possible #[test] fn read_exclusive_pending() { let rwlock = RwLock::new(100); let mut t1 = spawn(rwlock.write()); let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.read()); assert_pending!(t2.poll()); } // If the max shared access is reached and subsequent shared access is pending // should be made available when one of the shared accesses is dropped #[test] fn exhaust_reading() { let rwlock = RwLock::with_max_readers(100, 1024); let mut reads = Vec::new(); loop { let mut t = spawn(rwlock.read()); match t.poll() { Poll::Ready(guard) => reads.push(guard), Poll::Pending => break, } } let mut t1 = spawn(rwlock.read()); assert_pending!(t1.poll()); let g2 = reads.pop().unwrap(); drop(g2); assert!(t1.is_woken()); let _g1 = assert_ready!(t1.poll()); } // When there is an active exclusive owner, subsequent exclusive access should not be possible #[test] fn write_exclusive_pending() { let rwlock = RwLock::new(100); let mut t1 = spawn(rwlock.write()); let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.write()); assert_pending!(t2.poll()); } // When there is an active shared owner, exclusive access should be possible after shared is dropped #[test] fn write_shared_drop() { let rwlock = RwLock::new(100); let mut t1 = spawn(rwlock.read()); let g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.write()); assert_pending!(t2.poll()); drop(g1); assert!(t2.is_woken()); let _g2 = assert_ready!(t2.poll()); } // when there is an active shared owner, and exclusive access is triggered, // subsequent shared access should not be possible as write gathers all the available semaphore permits #[test] fn write_read_shared_pending() { let rwlock = RwLock::new(100); let mut t1 = spawn(rwlock.read()); let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.read()); let _g2 = assert_ready!(t2.poll()); let mut t3 = spawn(rwlock.write()); assert_pending!(t3.poll()); let mut t4 = spawn(rwlock.read()); assert_pending!(t4.poll()); } // when there is an active shared owner, and exclusive access is triggered, // reading should be possible after pending exclusive access is dropped #[test] fn write_read_shared_drop_pending() { let rwlock = RwLock::new(100); let mut t1 = spawn(rwlock.read()); let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.write()); assert_pending!(t2.poll()); let mut t3 = spawn(rwlock.read()); assert_pending!(t3.poll()); drop(t2); assert!(t3.is_woken()); let _t3 = assert_ready!(t3.poll()); } // Acquire an RwLock nonexclusively by a single task #[maybe_tokio_test] async fn read_uncontested() { let rwlock = RwLock::new(100); let result = *rwlock.read().await; assert_eq!(result, 100); } // Acquire an uncontested RwLock in exclusive mode #[maybe_tokio_test] async fn write_uncontested() { let rwlock = RwLock::new(100); let mut result = rwlock.write().await; *result += 50; assert_eq!(*result, 150); } // RwLocks should be acquired in the order that their Futures are waited upon. #[maybe_tokio_test] async fn write_order() { let rwlock = RwLock::<Vec<u32>>::new(vec![]); let fut2 = rwlock.write().map(|mut guard| guard.push(2)); let fut1 = rwlock.write().map(|mut guard| guard.push(1)); fut1.await; fut2.await; let g = rwlock.read().await; assert_eq!(*g, vec![1, 2]); } // A single RwLock is contested by tasks in multiple threads #[cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support threads #[cfg_attr(miri, ignore)] // Too slow on miri. #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn multithreaded() { use futures::stream::{self, StreamExt}; use std::sync::Arc; use tokio::sync::Barrier; let barrier = Arc::new(Barrier::new(5)); let rwlock = Arc::new(RwLock::<u32>::new(0)); let rwclone1 = rwlock.clone(); let rwclone2 = rwlock.clone(); let rwclone3 = rwlock.clone(); let rwclone4 = rwlock.clone(); let b1 = barrier.clone(); tokio::spawn(async move { stream::iter(0..1000) .for_each(move |_| { let rwlock = rwclone1.clone(); async move { let mut guard = rwlock.write().await; *guard += 2; } }) .await; b1.wait().await; }); let b2 = barrier.clone(); tokio::spawn(async move { stream::iter(0..1000) .for_each(move |_| { let rwlock = rwclone2.clone(); async move { let mut guard = rwlock.write().await; *guard += 3; } }) .await; b2.wait().await; }); let b3 = barrier.clone(); tokio::spawn(async move { stream::iter(0..1000) .for_each(move |_| { let rwlock = rwclone3.clone(); async move { let mut guard = rwlock.write().await; *guard += 5; } }) .await; b3.wait().await; }); let b4 = barrier.clone(); tokio::spawn(async move { stream::iter(0..1000) .for_each(move |_| { let rwlock = rwclone4.clone(); async move { let mut guard = rwlock.write().await; *guard += 7; } }) .await; b4.wait().await; }); barrier.wait().await; let g = rwlock.read().await; assert_eq!(*g, 17_000); } #[maybe_tokio_test] async fn try_write() { let lock = RwLock::new(0); let read_guard = lock.read().await; assert!(lock.try_write().is_err()); drop(read_guard); assert!(lock.try_write().is_ok()); } #[test] fn try_read_try_write() { let lock: RwLock<usize> = RwLock::new(15); { let rg1 = lock.try_read().unwrap(); assert_eq!(*rg1, 15); assert!(lock.try_write().is_err()); let rg2 = lock.try_read().unwrap(); assert_eq!(*rg2, 15) } { let mut wg = lock.try_write().unwrap(); *wg = 1515; assert!(lock.try_read().is_err()) } assert_eq!(*lock.try_read().unwrap(), 1515); } #[maybe_tokio_test] async fn downgrade_map() { let lock = RwLock::new(0); let write_guard = lock.write().await; let mut read_t = spawn(lock.read()); // We can't create a read when a write exists assert_pending!(read_t.poll()); // During the call to `f`, `read_t` doesn't have access yet. let read_guard1 = RwLockWriteGuard::downgrade_map(write_guard, |v| { assert_pending!(read_t.poll()); v }); // After the downgrade, `read_t` got the lock let read_guard2 = assert_ready!(read_t.poll()); // Ensure they're equal, as we return the original value assert_eq!(&*read_guard1 as *const _, &*read_guard2 as *const _); } #[maybe_tokio_test] async fn try_downgrade_map() { let lock = RwLock::new(0); let write_guard = lock.write().await; let mut read_t = spawn(lock.read()); // We can't create a read when a write exists assert_pending!(read_t.poll()); // During the call to `f`, `read_t` doesn't have access yet. let write_guard = RwLockWriteGuard::try_downgrade_map(write_guard, |_| { assert_pending!(read_t.poll()); None::<&()> }) .expect_err("downgrade didn't fail"); // After `f` returns `None`, `read_t` doesn't have access assert_pending!(read_t.poll()); // After `f` returns `Some`, `read_t` does have access let read_guard1 = RwLockWriteGuard::try_downgrade_map(write_guard, |v| Some(v)) .expect("downgrade didn't succeed"); let read_guard2 = assert_ready!(read_t.poll()); // Ensure they're equal, as we return the original value assert_eq!(&*read_guard1 as *const _, &*read_guard2 as *const _); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/time_pause.rs
tokio/tests/time_pause.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] #![cfg(not(miri))] // Too slow on miri. use rand::SeedableRng; use rand::{rngs::StdRng, Rng}; use tokio::time::{self, Duration, Instant, Sleep}; use tokio_test::{assert_elapsed, assert_pending, assert_ready, assert_ready_eq, task}; #[cfg(not(target_os = "wasi"))] use tokio_test::assert_err; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; #[tokio::test] async fn pause_time_in_main() { tokio::time::pause(); } #[tokio::test] async fn pause_time_in_task() { let t = tokio::spawn(async { tokio::time::pause(); }); t.await.unwrap(); } #[cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support threads #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[should_panic] async fn pause_time_in_main_threads() { tokio::time::pause(); } #[cfg_attr(panic = "abort", ignore)] #[cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support threads #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn pause_time_in_spawn_threads() { let t = tokio::spawn(async { tokio::time::pause(); }); assert_err!(t.await); } #[test] fn paused_time_is_deterministic() { let run_1 = paused_time_stress_run(); let run_2 = paused_time_stress_run(); assert_eq!(run_1, run_2); } #[tokio::main(flavor = "current_thread", start_paused = true)] async fn paused_time_stress_run() -> Vec<Duration> { let mut rng = StdRng::seed_from_u64(1); let mut times = vec![]; let start = Instant::now(); for _ in 0..10_000 { let sleep = rng.random_range(Duration::from_secs(0)..Duration::from_secs(1)); time::sleep(sleep).await; times.push(start.elapsed()); } times } #[tokio::test(start_paused = true)] async fn advance_after_poll() { time::sleep(ms(1)).await; let start = Instant::now(); let mut sleep = task::spawn(time::sleep_until(start + ms(300))); assert_pending!(sleep.poll()); let before = Instant::now(); time::advance(ms(100)).await; assert_elapsed!(before, ms(100)); assert_pending!(sleep.poll()); } #[tokio::test(start_paused = true)] async fn sleep_no_poll() { let start = Instant::now(); // TODO: Skip this time::advance(ms(1)).await; let mut sleep = task::spawn(time::sleep_until(start + ms(300))); let before = Instant::now(); time::advance(ms(100)).await; assert_elapsed!(before, ms(100)); assert_pending!(sleep.poll()); } enum State { Begin, AwaitingAdvance(Pin<Box<dyn Future<Output = ()>>>), AfterAdvance, } struct Tester { sleep: Pin<Box<Sleep>>, state: State, before: Option<Instant>, poll: bool, } impl Future for Tester { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { match &mut self.state { State::Begin => { if self.poll { assert_pending!(self.sleep.as_mut().poll(cx)); } self.before = Some(Instant::now()); let advance_fut = Box::pin(time::advance(ms(100))); self.state = State::AwaitingAdvance(advance_fut); self.poll(cx) } State::AwaitingAdvance(ref mut advance_fut) => match advance_fut.as_mut().poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(()) => { self.state = State::AfterAdvance; self.poll(cx) } }, State::AfterAdvance => { assert_elapsed!(self.before.unwrap(), ms(100)); assert_pending!(self.sleep.as_mut().poll(cx)); Poll::Ready(()) } } } } #[tokio::test(start_paused = true)] async fn sleep_same_task() { let start = Instant::now(); // TODO: Skip this time::advance(ms(1)).await; let sleep = Box::pin(time::sleep_until(start + ms(300))); Tester { sleep, state: State::Begin, before: None, poll: true, } .await; } #[tokio::test(start_paused = true)] async fn sleep_same_task_no_poll() { let start = Instant::now(); // TODO: Skip this time::advance(ms(1)).await; let sleep = Box::pin(time::sleep_until(start + ms(300))); Tester { sleep, state: State::Begin, before: None, poll: false, } .await; } #[tokio::test(start_paused = true)] async fn interval() { let start = Instant::now(); // TODO: Skip this time::advance(ms(1)).await; let mut i = task::spawn(time::interval_at(start, ms(300))); assert_ready_eq!(poll_next(&mut i), start); assert_pending!(poll_next(&mut i)); let before = Instant::now(); time::advance(ms(100)).await; assert_elapsed!(before, ms(100)); assert_pending!(poll_next(&mut i)); let before = Instant::now(); time::advance(ms(200)).await; assert_elapsed!(before, ms(200)); assert_ready_eq!(poll_next(&mut i), start + ms(300)); assert_pending!(poll_next(&mut i)); let before = Instant::now(); time::advance(ms(400)).await; assert_elapsed!(before, ms(400)); assert_ready_eq!(poll_next(&mut i), start + ms(600)); assert_pending!(poll_next(&mut i)); let before = Instant::now(); time::advance(ms(500)).await; assert_elapsed!(before, ms(500)); assert_ready_eq!(poll_next(&mut i), start + ms(900)); assert_ready_eq!(poll_next(&mut i), start + ms(1200)); assert_pending!(poll_next(&mut i)); } #[tokio::test(start_paused = true)] async fn test_time_advance_sub_ms() { let now = Instant::now(); let dur = Duration::from_micros(51_592); time::advance(dur).await; assert_eq!(now.elapsed(), dur); let now = Instant::now(); let dur = Duration::from_micros(1); time::advance(dur).await; assert_eq!(now.elapsed(), dur); } #[tokio::test(start_paused = true)] async fn test_time_advance_3ms_and_change() { let now = Instant::now(); let dur = Duration::from_micros(3_141_592); time::advance(dur).await; assert_eq!(now.elapsed(), dur); let now = Instant::now(); let dur = Duration::from_micros(3_123_456); time::advance(dur).await; assert_eq!(now.elapsed(), dur); } #[tokio::test(start_paused = true)] async fn regression_3710_with_submillis_advance() { let start = Instant::now(); time::advance(Duration::from_millis(1)).await; let mut sleep = task::spawn(time::sleep_until(start + Duration::from_secs(60))); assert_pending!(sleep.poll()); let before = Instant::now(); let dur = Duration::from_micros(51_592); time::advance(dur).await; assert_eq!(before.elapsed(), dur); assert_pending!(sleep.poll()); } #[tokio::test(start_paused = true)] async fn exact_1ms_advance() { let now = Instant::now(); let dur = Duration::from_millis(1); time::advance(dur).await; assert_eq!(now.elapsed(), dur); let now = Instant::now(); let dur = Duration::from_millis(1); time::advance(dur).await; assert_eq!(now.elapsed(), dur); } #[tokio::test(start_paused = true)] async fn advance_once_with_timer() { let mut sleep = task::spawn(time::sleep(Duration::from_millis(1))); assert_pending!(sleep.poll()); time::advance(Duration::from_micros(250)).await; assert_pending!(sleep.poll()); time::advance(Duration::from_micros(1500)).await; assert!(sleep.is_woken()); assert_ready!(sleep.poll()); } #[tokio::test(start_paused = true)] async fn advance_multi_with_timer() { // Round to the nearest ms // time::sleep(Duration::from_millis(1)).await; let mut sleep = task::spawn(time::sleep(Duration::from_millis(1))); assert_pending!(sleep.poll()); time::advance(Duration::from_micros(250)).await; assert_pending!(sleep.poll()); time::advance(Duration::from_micros(250)).await; assert_pending!(sleep.poll()); time::advance(Duration::from_micros(250)).await; assert_pending!(sleep.poll()); time::advance(Duration::from_micros(250)).await; assert!(sleep.is_woken()); assert_ready!(sleep.poll()); } fn poll_next(interval: &mut task::Spawn<time::Interval>) -> Poll<Instant> { interval.enter(|cx, mut interval| interval.poll_tick(cx)) } fn ms(n: u64) -> Duration { Duration::from_millis(n) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/task_local.rs
tokio/tests/task_local.rs
#![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support threads use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::sync::oneshot; #[tokio::test(flavor = "multi_thread")] async fn local() { tokio::task_local! { static REQ_ID: u32; pub static FOO: bool; } let j1 = tokio::spawn(REQ_ID.scope(1, async move { assert_eq!(REQ_ID.get(), 1); assert_eq!(REQ_ID.get(), 1); })); let j2 = tokio::spawn(REQ_ID.scope(2, async move { REQ_ID.with(|v| { assert_eq!(REQ_ID.get(), 2); assert_eq!(*v, 2); }); tokio::time::sleep(std::time::Duration::from_millis(10)).await; assert_eq!(REQ_ID.get(), 2); })); let j3 = tokio::spawn(FOO.scope(true, async move { assert!(FOO.get()); })); j1.await.unwrap(); j2.await.unwrap(); j3.await.unwrap(); } #[tokio::test] async fn task_local_available_on_abort() { tokio::task_local! { static KEY: u32; } struct MyFuture { tx_poll: Option<oneshot::Sender<()>>, tx_drop: Option<oneshot::Sender<u32>>, } impl Future for MyFuture { type Output = (); fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { if let Some(tx_poll) = self.tx_poll.take() { let _ = tx_poll.send(()); } Poll::Pending } } impl Drop for MyFuture { fn drop(&mut self) { let _ = self.tx_drop.take().unwrap().send(KEY.get()); } } let (tx_drop, rx_drop) = oneshot::channel(); let (tx_poll, rx_poll) = oneshot::channel(); let h = tokio::spawn(KEY.scope( 42, MyFuture { tx_poll: Some(tx_poll), tx_drop: Some(tx_drop), }, )); rx_poll.await.unwrap(); h.abort(); assert_eq!(rx_drop.await.unwrap(), 42); let err = h.await.unwrap_err(); if !err.is_cancelled() { if let Ok(panic) = err.try_into_panic() { std::panic::resume_unwind(panic); } else { panic!(); } } } #[tokio::test] async fn task_local_available_on_completion_drop() { tokio::task_local! { static KEY: u32; } struct MyFuture { tx: Option<oneshot::Sender<u32>>, } impl Future for MyFuture { type Output = (); fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { Poll::Ready(()) } } impl Drop for MyFuture { fn drop(&mut self) { let _ = self.tx.take().unwrap().send(KEY.get()); } } let (tx, rx) = oneshot::channel(); let h = tokio::spawn(KEY.scope(42, MyFuture { tx: Some(tx) })); assert_eq!(rx.await.unwrap(), 42); h.await.unwrap(); } #[tokio::test] async fn take_value() { tokio::task_local! { static KEY: u32 } let fut = KEY.scope(1, async {}); let mut pinned = Box::pin(fut); assert_eq!(pinned.as_mut().take_value(), Some(1)); assert_eq!(pinned.as_mut().take_value(), None); } #[tokio::test] async fn poll_after_take_value_should_fail() { tokio::task_local! { static KEY: u32 } let fut = KEY.scope(1, async { let result = KEY.try_with(|_| {}); // The task local value no longer exists. assert!(result.is_err()); }); let mut fut = Box::pin(fut); fut.as_mut().take_value(); // Poll the future after `take_value` has been called fut.await; } #[tokio::test] async fn get_value() { tokio::task_local! { static KEY: u32 } KEY.scope(1, async { assert_eq!(KEY.get(), 1); assert_eq!(KEY.try_get().unwrap(), 1); }) .await; let fut = KEY.scope(1, async { let result = KEY.try_get(); // The task local value no longer exists. assert!(result.is_err()); }); let mut fut = Box::pin(fut); fut.as_mut().take_value(); // Poll the future after `take_value` has been called fut.await; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/tcp_stream.rs
tokio/tests/tcp_stream.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support bind use tokio::io::{AsyncReadExt, AsyncWriteExt, Interest}; use tokio::net::{TcpListener, TcpStream}; use tokio::try_join; use tokio_test::task; use tokio_test::{assert_ok, assert_pending, assert_ready_ok}; use std::future::poll_fn; use std::io; use std::task::Poll; use std::time::Duration; #[tokio::test] #[cfg_attr(miri, ignore)] // No `socket` on miri. #[expect(deprecated)] // set_linger is deprecated async fn set_linger() { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let stream = TcpStream::connect(listener.local_addr().unwrap()) .await .unwrap(); assert_ok!(stream.set_linger(Some(Duration::from_secs(1)))); assert_eq!(stream.linger().unwrap().unwrap().as_secs(), 1); assert_ok!(stream.set_linger(None)); assert!(stream.linger().unwrap().is_none()); } #[tokio::test] #[cfg_attr(miri, ignore)] // No `socket` on miri. async fn try_read_write() { const DATA: &[u8] = b"this is some data to write to the socket"; // Create listener let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); // Create socket pair let client = TcpStream::connect(listener.local_addr().unwrap()) .await .unwrap(); let (server, _) = listener.accept().await.unwrap(); let mut written = DATA.to_vec(); // Track the server receiving data let mut readable = task::spawn(server.readable()); assert_pending!(readable.poll()); // Write data. client.writable().await.unwrap(); assert_eq!(DATA.len(), client.try_write(DATA).unwrap()); // The task should be notified while !readable.is_woken() { tokio::task::yield_now().await; } // Fill the write buffer using non-vectored I/O loop { // Still ready let mut writable = task::spawn(client.writable()); assert_ready_ok!(writable.poll()); match client.try_write(DATA) { Ok(n) => written.extend(&DATA[..n]), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { break; } Err(e) => panic!("error = {e:?}"), } } { // Write buffer full let mut writable = task::spawn(client.writable()); assert_pending!(writable.poll()); // Drain the socket from the server end using non-vectored I/O let mut read = vec![0; written.len()]; let mut i = 0; while i < read.len() { server.readable().await.unwrap(); match server.try_read(&mut read[i..]) { Ok(n) => i += n, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue, Err(e) => panic!("error = {e:?}"), } } assert_eq!(read, written); } written.clear(); client.writable().await.unwrap(); // Fill the write buffer using vectored I/O let data_bufs: Vec<_> = DATA.chunks(10).map(io::IoSlice::new).collect(); loop { // Still ready let mut writable = task::spawn(client.writable()); assert_ready_ok!(writable.poll()); match client.try_write_vectored(&data_bufs) { Ok(n) => written.extend(&DATA[..n]), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { break; } Err(e) => panic!("error = {e:?}"), } } { // Write buffer full let mut writable = task::spawn(client.writable()); assert_pending!(writable.poll()); // Drain the socket from the server end using vectored I/O let mut read = vec![0; written.len()]; let mut i = 0; while i < read.len() { server.readable().await.unwrap(); let mut bufs: Vec<_> = read[i..] .chunks_mut(0x10000) .map(io::IoSliceMut::new) .collect(); match server.try_read_vectored(&mut bufs) { Ok(n) => i += n, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue, Err(e) => panic!("error = {e:?}"), } } assert_eq!(read, written); } // Now, we listen for shutdown drop(client); loop { let ready = server.ready(Interest::READABLE).await.unwrap(); if ready.is_read_closed() { return; } else { tokio::task::yield_now().await; } } } #[test] fn buffer_not_included_in_future() { use std::mem; const N: usize = 4096; let fut = async { let stream = TcpStream::connect("127.0.0.1:8080").await.unwrap(); loop { stream.readable().await.unwrap(); let mut buf = [0; N]; let n = stream.try_read(&mut buf[..]).unwrap(); if n == 0 { break; } } }; let n = mem::size_of_val(&fut); assert!(n < 1000); } macro_rules! assert_readable_by_polling { ($stream:expr) => { assert_ok!(poll_fn(|cx| $stream.poll_read_ready(cx)).await); }; } macro_rules! assert_not_readable_by_polling { ($stream:expr) => { poll_fn(|cx| { assert_pending!($stream.poll_read_ready(cx)); Poll::Ready(()) }) .await; }; } macro_rules! assert_writable_by_polling { ($stream:expr) => { assert_ok!(poll_fn(|cx| $stream.poll_write_ready(cx)).await); }; } macro_rules! assert_not_writable_by_polling { ($stream:expr) => { poll_fn(|cx| { assert_pending!($stream.poll_write_ready(cx)); Poll::Ready(()) }) .await; }; } #[tokio::test] #[cfg_attr(miri, ignore)] // No `socket` on miri. async fn poll_read_ready() { let (mut client, mut server) = create_pair().await; // Initial state - not readable. assert_not_readable_by_polling!(server); // There is data in the buffer - readable. assert_ok!(client.write_all(b"ping").await); assert_readable_by_polling!(server); // Readable until calls to `poll_read` return `Poll::Pending`. let mut buf = [0u8; 4]; assert_ok!(server.read_exact(&mut buf).await); assert_readable_by_polling!(server); read_until_pending(&mut server); assert_not_readable_by_polling!(server); // Detect the client disconnect. drop(client); assert_readable_by_polling!(server); } #[tokio::test] #[cfg_attr(miri, ignore)] // No `socket` on miri. async fn poll_write_ready() { let (mut client, server) = create_pair().await; // Initial state - writable. assert_writable_by_polling!(client); // No space to write - not writable. write_until_pending(&mut client); assert_not_writable_by_polling!(client); // Detect the server disconnect. drop(server); assert_writable_by_polling!(client); } async fn create_pair() -> (TcpStream, TcpStream) { let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await); let addr = assert_ok!(listener.local_addr()); let (client, (server, _)) = assert_ok!(try_join!(TcpStream::connect(&addr), listener.accept())); (client, server) } fn read_until_pending(stream: &mut TcpStream) -> usize { let mut buf = vec![0u8; 1024 * 1024]; let mut total = 0; loop { match stream.try_read(&mut buf) { Ok(n) => total += n, Err(err) => { assert_eq!(err.kind(), io::ErrorKind::WouldBlock); break; } } } total } fn write_until_pending(stream: &mut TcpStream) -> usize { let buf = vec![0u8; 1024 * 1024]; let mut total = 0; loop { match stream.try_write(&buf) { Ok(n) => total += n, Err(err) => { assert_eq!(err.kind(), io::ErrorKind::WouldBlock); break; } } } total } #[tokio::test] #[cfg_attr(miri, ignore)] // No `socket` on miri. async fn try_read_buf() { const DATA: &[u8] = b"this is some data to write to the socket"; // Create listener let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); // Create socket pair let client = TcpStream::connect(listener.local_addr().unwrap()) .await .unwrap(); let (server, _) = listener.accept().await.unwrap(); let mut written = DATA.to_vec(); // Track the server receiving data let mut readable = task::spawn(server.readable()); assert_pending!(readable.poll()); // Write data. client.writable().await.unwrap(); assert_eq!(DATA.len(), client.try_write(DATA).unwrap()); // The task should be notified while !readable.is_woken() { tokio::task::yield_now().await; } // Fill the write buffer loop { // Still ready let mut writable = task::spawn(client.writable()); assert_ready_ok!(writable.poll()); match client.try_write(DATA) { Ok(n) => written.extend(&DATA[..n]), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { break; } Err(e) => panic!("error = {e:?}"), } } { // Write buffer full let mut writable = task::spawn(client.writable()); assert_pending!(writable.poll()); // Drain the socket from the server end let mut read = Vec::with_capacity(written.len()); let mut i = 0; while i < read.capacity() { server.readable().await.unwrap(); match server.try_read_buf(&mut read) { Ok(n) => i += n, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue, Err(e) => panic!("error = {e:?}"), } } assert_eq!(read, written); } // Now, we listen for shutdown drop(client); loop { let ready = server.ready(Interest::READABLE).await.unwrap(); if ready.is_read_closed() { return; } else { tokio::task::yield_now().await; } } } // read_closed is a best effort event, so test only for no false positives. #[tokio::test] #[cfg_attr(miri, ignore)] // No `socket` on miri. async fn read_closed() { let (client, mut server) = create_pair().await; let mut ready_fut = task::spawn(client.ready(Interest::READABLE)); assert_pending!(ready_fut.poll()); assert_ok!(server.write_all(b"ping").await); let ready_event = assert_ok!(ready_fut.await); assert!(!ready_event.is_read_closed()); } // write_closed is a best effort event, so test only for no false positives. #[tokio::test] #[cfg_attr(miri, ignore)] // No `socket` on miri. async fn write_closed() { let (mut client, mut server) = create_pair().await; // Fill the write buffer. let write_size = write_until_pending(&mut client); let mut ready_fut = task::spawn(client.ready(Interest::WRITABLE)); assert_pending!(ready_fut.poll()); // Drain the socket to make client writable. let mut read_size = 0; while read_size < write_size { server.readable().await.unwrap(); read_size += read_until_pending(&mut server); } let ready_event = assert_ok!(ready_fut.await); assert!(!ready_event.is_write_closed()); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_write_all_buf.rs
tokio/tests/io_write_all_buf.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] use tokio::io::{AsyncWrite, AsyncWriteExt}; use tokio_test::{assert_err, assert_ok}; use bytes::{Buf, Bytes, BytesMut}; use std::cmp; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; #[tokio::test] async fn write_all_buf() { struct Wr { buf: BytesMut, cnt: usize, } impl AsyncWrite for Wr { fn poll_write( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { let n = cmp::min(4, buf.len()); dbg!(buf); let buf = &buf[0..n]; self.cnt += 1; self.buf.extend(buf); Ok(buf.len()).into() } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Ok(()).into() } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Ok(()).into() } } let mut wr = Wr { buf: BytesMut::with_capacity(64), cnt: 0, }; let mut buf = Bytes::from_static(b"hello").chain(Bytes::from_static(b"world")); assert_ok!(wr.write_all_buf(&mut buf).await); assert_eq!(wr.buf, b"helloworld"[..]); // expect 4 writes, [hell],[o],[worl],[d] assert_eq!(wr.cnt, 4); assert!(!buf.has_remaining()); } #[tokio::test] async fn write_buf_err() { /// Error out after writing the first 4 bytes struct Wr { cnt: usize, } impl AsyncWrite for Wr { fn poll_write( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, _buf: &[u8], ) -> Poll<io::Result<usize>> { self.cnt += 1; if self.cnt == 2 { return Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, "whoops"))); } Poll::Ready(Ok(4)) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Ok(()).into() } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Ok(()).into() } } let mut wr = Wr { cnt: 0 }; let mut buf = Bytes::from_static(b"hello").chain(Bytes::from_static(b"world")); assert_err!(wr.write_all_buf(&mut buf).await); assert_eq!( buf.copy_to_bytes(buf.remaining()), Bytes::from_static(b"oworld") ); } #[tokio::test] async fn write_all_buf_vectored() { struct Wr { buf: BytesMut, } impl AsyncWrite for Wr { fn poll_write( self: Pin<&mut Self>, _cx: &mut Context<'_>, _buf: &[u8], ) -> Poll<io::Result<usize>> { // When executing `write_all_buf` with this writer, // `poll_write` is not called. panic!("shouldn't be called") } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Ok(()).into() } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Ok(()).into() } fn poll_write_vectored( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<Result<usize, io::Error>> { for buf in bufs { self.buf.extend_from_slice(buf); } let n = self.buf.len(); Ok(n).into() } fn is_write_vectored(&self) -> bool { // Enable vectored write. true } } let mut wr = Wr { buf: BytesMut::with_capacity(64), }; let mut buf = Bytes::from_static(b"hello") .chain(Bytes::from_static(b" ")) .chain(Bytes::from_static(b"world")); wr.write_all_buf(&mut buf).await.unwrap(); assert_eq!(&wr.buf[..], b"hello world"); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/signal_usr1.rs
tokio/tests/signal_usr1.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "full")] #![cfg(unix)] #![cfg(not(miri))] // No `sigaction` in Miri. mod support { pub mod signal; } use support::signal::send_signal; use tokio::signal::unix::{signal, SignalKind}; use tokio_test::assert_ok; #[tokio::test] async fn signal_usr1() { let mut signal = assert_ok!( signal(SignalKind::user_defined1()), "failed to create signal" ); send_signal(libc::SIGUSR1); signal.recv().await; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_notify.rs
tokio/tests/sync_notify.rs
#![warn(rust_2018_idioms)] #![cfg(feature = "sync")] #[cfg(all(target_family = "wasm", not(target_os = "wasi")))] use wasm_bindgen_test::wasm_bindgen_test as test; use tokio::sync::Notify; use tokio_test::task::spawn; use tokio_test::*; #[allow(unused)] trait AssertSend: Send + Sync {} impl AssertSend for Notify {} #[test] fn notify_notified_one() { let notify = Notify::new(); let mut notified = spawn(async { notify.notified().await }); notify.notify_one(); assert_ready!(notified.poll()); } #[test] fn notify_multi_notified_one() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); // add two waiters into the queue assert_pending!(notified1.poll()); assert_pending!(notified2.poll()); // should wakeup the first one notify.notify_one(); assert_ready!(notified1.poll()); assert_pending!(notified2.poll()); } #[test] fn notify_multi_notified_last() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); // add two waiters into the queue assert_pending!(notified1.poll()); assert_pending!(notified2.poll()); // should wakeup the last one notify.notify_last(); assert_pending!(notified1.poll()); assert_ready!(notified2.poll()); } #[test] fn notified_one_notify() { let notify = Notify::new(); let mut notified = spawn(async { notify.notified().await }); assert_pending!(notified.poll()); notify.notify_one(); assert!(notified.is_woken()); assert_ready!(notified.poll()); } #[test] fn notified_multi_notify() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); assert_pending!(notified1.poll()); assert_pending!(notified2.poll()); notify.notify_one(); assert!(notified1.is_woken()); assert!(!notified2.is_woken()); assert_ready!(notified1.poll()); assert_pending!(notified2.poll()); } #[test] fn notify_notified_multi() { let notify = Notify::new(); notify.notify_one(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); assert_ready!(notified1.poll()); assert_pending!(notified2.poll()); notify.notify_one(); assert!(notified2.is_woken()); assert_ready!(notified2.poll()); } #[test] fn notified_drop_notified_notify() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); assert_pending!(notified1.poll()); drop(notified1); assert_pending!(notified2.poll()); notify.notify_one(); assert!(notified2.is_woken()); assert_ready!(notified2.poll()); } #[test] fn notified_multi_notify_drop_one() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); assert_pending!(notified1.poll()); assert_pending!(notified2.poll()); notify.notify_one(); assert!(notified1.is_woken()); assert!(!notified2.is_woken()); drop(notified1); assert!(notified2.is_woken()); assert_ready!(notified2.poll()); } #[test] fn notified_multi_notify_one_drop() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); let mut notified3 = spawn(async { notify.notified().await }); // add waiters by order of poll execution assert_pending!(notified1.poll()); assert_pending!(notified2.poll()); assert_pending!(notified3.poll()); // by default fifo notify.notify_one(); drop(notified1); // next waiter should be the one to be to woken up assert_ready!(notified2.poll()); assert_pending!(notified3.poll()); } #[test] fn notified_multi_notify_last_drop() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); let mut notified2 = spawn(async { notify.notified().await }); let mut notified3 = spawn(async { notify.notified().await }); // add waiters by order of poll execution assert_pending!(notified1.poll()); assert_pending!(notified2.poll()); assert_pending!(notified3.poll()); notify.notify_last(); drop(notified3); // latest waiter added should be the one to woken up assert_ready!(notified2.poll()); assert_pending!(notified1.poll()); } #[test] fn notify_in_drop_after_wake() { use futures::task::ArcWake; use std::future::Future; use std::sync::Arc; let notify = Arc::new(Notify::new()); struct NotifyOnDrop(Arc<Notify>); impl ArcWake for NotifyOnDrop { fn wake_by_ref(_arc_self: &Arc<Self>) {} } impl Drop for NotifyOnDrop { fn drop(&mut self) { self.0.notify_waiters(); } } let mut fut = Box::pin(async { notify.notified().await; }); { let waker = futures::task::waker(Arc::new(NotifyOnDrop(notify.clone()))); let mut cx = std::task::Context::from_waker(&waker); assert!(fut.as_mut().poll(&mut cx).is_pending()); } // Now, notifying **should not** deadlock notify.notify_waiters(); } #[test] fn notify_one_after_dropped_all() { let notify = Notify::new(); let mut notified1 = spawn(async { notify.notified().await }); assert_pending!(notified1.poll()); notify.notify_waiters(); notify.notify_one(); drop(notified1); let mut notified2 = spawn(async { notify.notified().await }); assert_ready!(notified2.poll()); } #[test] fn test_notify_one_not_enabled() { let notify = Notify::new(); let mut future = spawn(notify.notified()); notify.notify_one(); assert_ready!(future.poll()); } #[test] fn test_notify_one_after_enable() { let notify = Notify::new(); let mut future = spawn(notify.notified()); future.enter(|_, fut| assert!(!fut.enable())); notify.notify_one(); assert_ready!(future.poll()); future.enter(|_, fut| assert!(fut.enable())); } #[test] fn test_poll_after_enable() { let notify = Notify::new(); let mut future = spawn(notify.notified()); future.enter(|_, fut| assert!(!fut.enable())); assert_pending!(future.poll()); } #[test] fn test_enable_after_poll() { let notify = Notify::new(); let mut future = spawn(notify.notified()); assert_pending!(future.poll()); future.enter(|_, fut| assert!(!fut.enable())); } #[test] fn test_enable_consumes_permit() { let notify = Notify::new(); // Add a permit. notify.notify_one(); let mut future1 = spawn(notify.notified()); future1.enter(|_, fut| assert!(fut.enable())); let mut future2 = spawn(notify.notified()); future2.enter(|_, fut| assert!(!fut.enable())); } #[test] fn test_waker_update() { use futures::task::noop_waker; use std::future::Future; use std::task::Context; let notify = Notify::new(); let mut future = spawn(notify.notified()); let noop = noop_waker(); future.enter(|_, fut| assert_pending!(fut.poll(&mut Context::from_waker(&noop)))); assert_pending!(future.poll()); notify.notify_one(); assert!(future.is_woken()); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/fs_dir.rs
tokio/tests/fs_dir.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] // WASI does not support all fs operations use tokio::fs; use tokio_test::{assert_err, assert_ok}; use std::sync::{Arc, Mutex}; use tempfile::tempdir; #[tokio::test] async fn create_dir() { let base_dir = tempdir().unwrap(); let new_dir = base_dir.path().join("foo"); let new_dir_2 = new_dir.clone(); assert_ok!(fs::create_dir(new_dir).await); assert!(new_dir_2.is_dir()); } #[tokio::test] async fn create_all() { let base_dir = tempdir().unwrap(); let new_dir = base_dir.path().join("foo").join("bar"); let new_dir_2 = new_dir.clone(); assert_ok!(fs::create_dir_all(new_dir).await); assert!(new_dir_2.is_dir()); } #[tokio::test] async fn build_dir() { let base_dir = tempdir().unwrap(); let new_dir = base_dir.path().join("foo").join("bar"); let new_dir_2 = new_dir.clone(); assert_ok!(fs::DirBuilder::new().recursive(true).create(new_dir).await); assert!(new_dir_2.is_dir()); assert_err!( fs::DirBuilder::new() .recursive(false) .create(new_dir_2) .await ); } #[tokio::test] #[cfg(unix)] async fn build_dir_mode_read_only() { let base_dir = tempdir().unwrap(); let new_dir = base_dir.path().join("abc"); assert_ok!( fs::DirBuilder::new() .recursive(true) .mode(0o444) .create(&new_dir) .await ); assert!(fs::metadata(new_dir) .await .expect("metadata result") .permissions() .readonly()); } #[tokio::test] async fn remove() { let base_dir = tempdir().unwrap(); let new_dir = base_dir.path().join("foo"); let new_dir_2 = new_dir.clone(); std::fs::create_dir(new_dir.clone()).unwrap(); assert_ok!(fs::remove_dir(new_dir).await); assert!(!new_dir_2.exists()); } #[tokio::test] async fn read_inherent() { let base_dir = tempdir().unwrap(); let p = base_dir.path(); std::fs::create_dir(p.join("aa")).unwrap(); std::fs::create_dir(p.join("bb")).unwrap(); std::fs::create_dir(p.join("cc")).unwrap(); let files = Arc::new(Mutex::new(Vec::new())); let f = files.clone(); let p = p.to_path_buf(); let mut entries = fs::read_dir(p).await.unwrap(); while let Some(e) = assert_ok!(entries.next_entry().await) { let s = e.file_name().to_str().unwrap().to_string(); f.lock().unwrap().push(s); } let mut files = files.lock().unwrap(); files.sort(); // because the order is not guaranteed assert_eq!( *files, vec!["aa".to_string(), "bb".to_string(), "cc".to_string()] ); } #[tokio::test] async fn read_dir_entry_info() { let temp_dir = tempdir().unwrap(); let file_path = temp_dir.path().join("a.txt"); fs::write(&file_path, b"Hello File!").await.unwrap(); let mut dir = fs::read_dir(temp_dir.path()).await.unwrap(); let first_entry = dir.next_entry().await.unwrap().unwrap(); assert_eq!(first_entry.path(), file_path); assert_eq!(first_entry.file_name(), "a.txt"); assert!(first_entry.metadata().await.unwrap().is_file()); assert!(first_entry.file_type().await.unwrap().is_file()); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/support/mpsc_stream.rs
tokio/tests/support/mpsc_stream.rs
#![allow(dead_code)] use std::pin::Pin; use std::task::{Context, Poll}; use tokio::sync::mpsc::{self, Receiver, Sender, UnboundedReceiver, UnboundedSender}; use tokio_stream::Stream; struct UnboundedStream<T> { recv: UnboundedReceiver<T>, } impl<T> Stream for UnboundedStream<T> { type Item = T; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { Pin::into_inner(self).recv.poll_recv(cx) } } pub fn unbounded_channel_stream<T: Unpin>() -> (UnboundedSender<T>, impl Stream<Item = T>) { let (tx, rx) = mpsc::unbounded_channel(); let stream = UnboundedStream { recv: rx }; (tx, stream) } struct BoundedStream<T> { recv: Receiver<T>, } impl<T> Stream for BoundedStream<T> { type Item = T; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { Pin::into_inner(self).recv.poll_recv(cx) } } pub fn channel_stream<T: Unpin>(size: usize) -> (Sender<T>, impl Stream<Item = T>) { let (tx, rx) = mpsc::channel(size); let stream = BoundedStream { recv: rx }; (tx, stream) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/support/io_vec.rs
tokio/tests/support/io_vec.rs
use std::io::IoSlice; use std::ops::Deref; use std::slice; pub struct IoBufs<'a, 'b>(&'b mut [IoSlice<'a>]); impl<'a, 'b> IoBufs<'a, 'b> { pub fn new(slices: &'b mut [IoSlice<'a>]) -> Self { IoBufs(slices) } pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn advance(mut self, n: usize) -> IoBufs<'a, 'b> { let mut to_remove = 0; let mut remaining_len = n; for slice in self.0.iter() { if remaining_len < slice.len() { break; } else { remaining_len -= slice.len(); to_remove += 1; } } self.0 = self.0.split_at_mut(to_remove).1; if let Some(slice) = self.0.first_mut() { let tail = &slice[remaining_len..]; // Safety: recasts slice to the original lifetime let tail = unsafe { slice::from_raw_parts(tail.as_ptr(), tail.len()) }; *slice = IoSlice::new(tail); } else if remaining_len != 0 { panic!("advance past the end of the slice vector"); } self } } impl<'a, 'b> Deref for IoBufs<'a, 'b> { type Target = [IoSlice<'a>]; fn deref(&self) -> &[IoSlice<'a>] { self.0 } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/support/signal.rs
tokio/tests/support/signal.rs
pub fn send_signal(signal: libc::c_int) { use libc::{getpid, kill}; unsafe { let pid = getpid(); assert_eq!( kill(pid, signal), 0, "kill(pid = {}, {}) failed with error: {}", pid, signal, std::io::Error::last_os_error(), ); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/support/panic.rs
tokio/tests/support/panic.rs
use std::panic; use std::sync::{Arc, Mutex}; pub fn test_panic<Func: FnOnce() + panic::UnwindSafe>(func: Func) -> Option<String> { static PANIC_MUTEX: Mutex<()> = Mutex::new(()); { let _guard = PANIC_MUTEX.lock(); let panic_file: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None)); let prev_hook = panic::take_hook(); { let panic_file = panic_file.clone(); panic::set_hook(Box::new(move |panic_info| { let panic_location = panic_info.location().unwrap(); panic_file .lock() .unwrap() .clone_from(&Some(panic_location.file().to_string())); })); } let result = panic::catch_unwind(func); // Return to the previously set panic hook (maybe default) so that we get nice error // messages in the tests. panic::set_hook(prev_hook); if result.is_err() { panic_file.lock().unwrap().clone() } else { None } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/support/leaked_buffers.rs
tokio/tests/support/leaked_buffers.rs
/// Can create buffers of arbitrary lifetime. /// Frees created buffers when dropped. /// /// This struct is of course unsafe and the fact that /// it must outlive the created slices has to be ensured by /// the programmer. /// /// Used at certain test scenarios as a safer version of /// Vec::leak, to satisfy the address sanitizer. pub struct LeakedBuffers { leaked_vecs: Vec<Box<[u8]>>, } impl LeakedBuffers { pub fn new() -> Self { Self { leaked_vecs: vec![], } } pub unsafe fn create<'a>(&mut self, size: usize) -> &'a mut [u8] { let new_mem = vec![0u8; size].into_boxed_slice(); self.leaked_vecs.push(new_mem); let new_mem = self.leaked_vecs.last_mut().unwrap(); std::slice::from_raw_parts_mut(new_mem.as_mut_ptr(), new_mem.len()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/fuzz/fuzz_targets/fuzz_linked_list.rs
tokio/fuzz/fuzz_targets/fuzz_linked_list.rs
#![no_main] use libfuzzer_sys::fuzz_target; fuzz_target!(|data: &[u8]| { tokio::fuzz::fuzz_linked_list(data); });
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/connect-udp.rs
examples/connect-udp.rs
//! An example of hooking up stdin/stdout to a UDP stream. //! //! This example will connect to a socket address specified in the argument list //! and then forward all data read on stdin to the server, printing out all data //! received on stdout. Each line entered on stdin will be translated to a UDP //! packet which is then sent to the remote address. //! //! Note that this is not currently optimized for performance, especially //! around buffer management. Rather it's intended to show an example of //! working with a client. //! //! This example can be quite useful when interacting with the other examples in //! this repository! Many of them recommend running this as a simple "hook up //! stdin/stdout to a server" to get up and running. #![warn(rust_2018_idioms)] use tokio::io::{stdin, stdout}; use tokio::net::UdpSocket; use tokio_util::codec::{BytesCodec, FramedRead, FramedWrite}; use bytes::Bytes; use futures::{Sink, SinkExt, Stream, StreamExt}; use std::env; use std::error::Error; use std::net::SocketAddr; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { // Parse what address we're going to connect to let args = env::args().skip(1).collect::<Vec<_>>(); let addr = args .first() .ok_or("this program requires at least one argument")?; let addr = addr.parse::<SocketAddr>()?; let stdin = FramedRead::new(stdin(), BytesCodec::new()); let stdin = stdin.map(|i| i.map(|bytes| bytes.freeze())); let stdout = FramedWrite::new(stdout(), BytesCodec::new()); connect(&addr, stdin, stdout).await?; Ok(()) } pub async fn connect( addr: &SocketAddr, stdin: impl Stream<Item = Result<Bytes, std::io::Error>> + Unpin, stdout: impl Sink<Bytes, Error = std::io::Error> + Unpin, ) -> Result<(), Box<dyn Error>> { // We'll bind our UDP socket to a local IP/port, but for now we // basically let the OS pick both of those. let bind_addr = if addr.ip().is_ipv4() { "0.0.0.0:0" } else { "[::]:0" }; let socket = UdpSocket::bind(&bind_addr).await?; socket.connect(addr).await?; tokio::try_join!(send(stdin, &socket), recv(stdout, &socket))?; Ok(()) } async fn send( mut stdin: impl Stream<Item = Result<Bytes, std::io::Error>> + Unpin, writer: &UdpSocket, ) -> Result<(), std::io::Error> { while let Some(item) = stdin.next().await { let buf = item?; writer.send(&buf[..]).await?; } Ok(()) } async fn recv( mut stdout: impl Sink<Bytes, Error = std::io::Error> + Unpin, reader: &UdpSocket, ) -> Result<(), std::io::Error> { loop { let mut buf = vec![0; 1024]; let n = reader.recv(&mut buf[..]).await?; if n > 0 { stdout.send(Bytes::copy_from_slice(&buf[..n])).await?; } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/echo-tcp.rs
examples/echo-tcp.rs
//! A "hello world" echo server with Tokio //! //! This server will create a TCP listener, accept connections in a loop, and //! write back everything that's read off of each TCP connection. //! //! Because the Tokio runtime uses a thread pool, each TCP connection is //! processed concurrently with all other TCP connections across multiple //! threads. //! //! To see this server in action, you can run this in one terminal: //! //! cargo run --example echo-tcp //! //! and in another terminal you can run: //! //! cargo run --example connect-tcp 127.0.0.1:8080 //! //! Each line you type in to the `connect-tcp` terminal should be echo'd back to //! you! If you open up multiple terminals running the `connect-tcp` example you //! should be able to see them all make progress simultaneously. #![warn(rust_2018_idioms)] use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpListener; use std::env; use std::error::Error; const DEFAULT_ADDR: &str = "127.0.0.1:8080"; const BUFFER_SIZE: usize = 4096; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { // Allow passing an address to listen on as the first argument of this // program, but otherwise we'll just set up our TCP listener on // 127.0.0.1:8080 for connections. let addr = env::args() .nth(1) .unwrap_or_else(|| DEFAULT_ADDR.to_string()); // Next up we create a TCP listener which will listen for incoming // connections. This TCP listener is bound to the address we determined // above and must be associated with an event loop. let listener = TcpListener::bind(&addr).await?; println!("Listening on: {addr}"); loop { // Asynchronously wait for an inbound socket. let (mut socket, addr) = listener.accept().await?; // And this is where much of the magic of this server happens. We // crucially want all clients to make progress concurrently, rather than // blocking one on completion of another. To achieve this we use the // `tokio::spawn` function to execute the work in the background. // // Essentially here we're executing a new task to run concurrently, // which will allow all of our clients to be processed concurrently. tokio::spawn(async move { let mut buf = vec![0; BUFFER_SIZE]; // In a loop, read data from the socket and write the data back. loop { match socket.read(&mut buf).await { Ok(0) => { // Connection closed by peer return; } Ok(n) => { // Write the data back. If writing fails, log the error and exit. if let Err(e) = socket.write_all(&buf[0..n]).await { eprintln!("Failed to write to socket {}: {}", addr, e); return; } } Err(e) => { eprintln!("Failed to read from socket {}: {}", addr, e); return; } } } }); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/named-pipe-ready.rs
examples/named-pipe-ready.rs
use std::io; #[cfg(windows)] async fn windows_main() -> io::Result<()> { use tokio::io::Interest; use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; const PIPE_NAME: &str = r"\\.\pipe\named-pipe-single-client"; let server = ServerOptions::new().create(PIPE_NAME)?; let server = tokio::spawn(async move { // Note: we wait for a client to connect. server.connect().await?; let buf = { let mut read_buf = [0u8; 5]; let mut read_buf_cursor = 0; loop { server.readable().await?; let buf = &mut read_buf[read_buf_cursor..]; match server.try_read(buf) { Ok(n) => { read_buf_cursor += n; if read_buf_cursor == read_buf.len() { break; } } Err(e) if e.kind() == io::ErrorKind::WouldBlock => { continue; } Err(e) => { return Err(e); } } } read_buf }; { let write_buf = b"pong\n"; let mut write_buf_cursor = 0; loop { let buf = &write_buf[write_buf_cursor..]; if buf.is_empty() { break; } server.writable().await?; match server.try_write(buf) { Ok(n) => { write_buf_cursor += n; } Err(e) if e.kind() == io::ErrorKind::WouldBlock => { continue; } Err(e) => { return Err(e); } } } } Ok::<_, io::Error>(buf) }); let client = tokio::spawn(async move { // There's no need to use a connect loop here, since we know that the // server is already up - `open` was called before spawning any of the // tasks. let client = ClientOptions::new().open(PIPE_NAME)?; let mut read_buf = [0u8; 5]; let mut read_buf_cursor = 0; let write_buf = b"ping\n"; let mut write_buf_cursor = 0; loop { let mut interest = Interest::READABLE; if write_buf_cursor < write_buf.len() { interest |= Interest::WRITABLE; } let ready = client.ready(interest).await?; if ready.is_readable() { let buf = &mut read_buf[read_buf_cursor..]; match client.try_read(buf) { Ok(n) => { read_buf_cursor += n; if read_buf_cursor == read_buf.len() { break; } } Err(e) if e.kind() == io::ErrorKind::WouldBlock => { continue; } Err(e) => { return Err(e); } } } if ready.is_writable() { let buf = &write_buf[write_buf_cursor..]; if buf.is_empty() { continue; } match client.try_write(buf) { Ok(n) => { write_buf_cursor += n; } Err(e) if e.kind() == io::ErrorKind::WouldBlock => { continue; } Err(e) => { return Err(e); } } } } let buf = String::from_utf8_lossy(&read_buf).into_owned(); Ok::<_, io::Error>(buf) }); let (server, client) = tokio::try_join!(server, client)?; assert_eq!(server?, *b"ping\n"); assert_eq!(client?, "pong\n"); Ok(()) } #[tokio::main] async fn main() -> io::Result<()> { #[cfg(windows)] { windows_main().await?; } #[cfg(not(windows))] { println!("Named pipes are only supported on Windows!"); } Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/hello_world.rs
examples/hello_world.rs
//! A simple client that opens a TCP stream, writes "hello world\n", and closes //! the connection. //! //! To start a server that this client can talk to on port 6142, you can use this command: //! //! ncat -l 6142 //! //! And then in another terminal run: //! //! cargo run --example hello_world #![warn(rust_2018_idioms)] use tokio::io::AsyncWriteExt; use tokio::net::TcpStream; use std::error::Error; #[tokio::main] pub async fn main() -> Result<(), Box<dyn Error>> { // Open a TCP stream to the socket address. // // Note that this is the Tokio TcpStream, which is fully async. let mut stream = TcpStream::connect("127.0.0.1:6142").await?; println!("created stream"); let result = stream.write_all(b"hello world\n").await; println!("wrote to stream; success={:?}", result.is_ok()); Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/udp-client.rs
examples/udp-client.rs
//! A UDP client that just sends everything it gets via `stdio` in a single datagram, and then //! waits for a reply. //! //! For the reasons of simplicity data from `stdio` is read until `EOF` in a blocking manner. //! //! You can test this out by running an echo server: //! //! ``` //! $ cargo run --example echo-udp -- 127.0.0.1:8080 //! ``` //! //! and running the client in another terminal: //! //! ``` //! $ cargo run --example udp-client //! ``` //! //! You can optionally provide any custom endpoint address for the client: //! //! ``` //! $ cargo run --example udp-client -- 127.0.0.1:8080 //! ``` //! //! Don't forget to pass `EOF` to the standard input of the client! //! //! Please mind that since the UDP protocol doesn't have any capabilities to detect a broken //! connection the server needs to be run first, otherwise the client will block forever. #![warn(rust_2018_idioms)] use std::env; use std::error::Error; use std::io::{stdin, Read}; use std::net::SocketAddr; use tokio::net::UdpSocket; fn get_stdin_data() -> Result<Vec<u8>, Box<dyn std::error::Error>> { let mut buf = Vec::new(); stdin().read_to_end(&mut buf)?; Ok(buf) } #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let remote_addr: SocketAddr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:8080".into()) .parse()?; // We use port 0 to let the operating system allocate an available port for us. let local_addr: SocketAddr = if remote_addr.is_ipv4() { "0.0.0.0:0" } else { "[::]:0" } .parse()?; let socket = UdpSocket::bind(local_addr).await?; const MAX_DATAGRAM_SIZE: usize = 65_507; socket.connect(&remote_addr).await?; let data = get_stdin_data()?; socket.send(&data).await?; let mut data = vec![0u8; MAX_DATAGRAM_SIZE]; let len = socket.recv(&mut data).await?; println!( "Received {} bytes:\n{}", len, String::from_utf8_lossy(&data[..len]) ); Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/connect-tcp.rs
examples/connect-tcp.rs
//! An example of hooking up stdin/stdout to a TCP stream. //! //! This example will connect to a socket address specified in the argument list //! and then forward all data read on stdin to the server, printing out all data //! received on stdout. Each line entered on stdin will be translated to a TCP //! packet which is then sent to the remote address. //! //! Note that this is not currently optimized for performance, especially //! around buffer management. Rather it's intended to show an example of //! working with a client. //! //! This example can be quite useful when interacting with the other examples in //! this repository! Many of them recommend running this as a simple "hook up //! stdin/stdout to a server" to get up and running. #![warn(rust_2018_idioms)] use tokio::io::{stdin, stdout}; use tokio::net::TcpStream; use tokio_util::codec::{BytesCodec, FramedRead, FramedWrite}; use bytes::Bytes; use futures::{future, Sink, SinkExt, Stream, StreamExt}; use std::env; use std::error::Error; use std::net::SocketAddr; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { // Parse what address we're going to connect to let args = env::args().skip(1).collect::<Vec<_>>(); let addr = args .first() .ok_or("this program requires at least one argument")?; let addr = addr.parse::<SocketAddr>()?; let stdin = FramedRead::new(stdin(), BytesCodec::new()); let stdin = stdin.map(|i| i.map(|bytes| bytes.freeze())); let stdout = FramedWrite::new(stdout(), BytesCodec::new()); connect(&addr, stdin, stdout).await?; Ok(()) } pub async fn connect( addr: &SocketAddr, mut stdin: impl Stream<Item = Result<Bytes, std::io::Error>> + Unpin, mut stdout: impl Sink<Bytes, Error = std::io::Error> + Unpin, ) -> Result<(), Box<dyn Error>> { let mut stream = TcpStream::connect(addr).await?; let (r, w) = stream.split(); let mut sink = FramedWrite::new(w, BytesCodec::new()); // filter map Result<BytesMut, Error> stream into just a Bytes stream to match stdout Sink // on the event of an Error, log the error and end the stream let mut stream = FramedRead::new(r, BytesCodec::new()) .filter_map(|i| match i { //BytesMut into Bytes Ok(i) => future::ready(Some(i.freeze())), Err(e) => { eprintln!("failed to read from socket; error={e}"); future::ready(None) } }) .map(Ok); match future::join(sink.send_all(&mut stdin), stdout.send_all(&mut stream)).await { (Err(e), _) | (_, Err(e)) => Err(e.into()), _ => Ok(()), } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/custom-executor-tokio-context.rs
examples/custom-executor-tokio-context.rs
// This example shows how to use the tokio runtime with any other executor // //It takes advantage from RuntimeExt which provides the extension to customize your //runtime. use tokio::net::TcpListener; use tokio::runtime::Builder; use tokio::sync::oneshot; use tokio_util::context::RuntimeExt; fn main() { let (tx, rx) = oneshot::channel(); let rt1 = Builder::new_multi_thread() .worker_threads(1) // no timer! .build() .unwrap(); let rt2 = Builder::new_multi_thread() .worker_threads(1) .enable_all() .build() .unwrap(); // Without the `HandleExt.wrap()` there would be a panic because there is // no timer running, since it would be referencing runtime r1. rt1.block_on(rt2.wrap(async move { let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); println!("addr: {:?}", listener.local_addr()); tx.send(()).unwrap(); })); futures::executor::block_on(rx).unwrap(); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/tinyhttp.rs
examples/tinyhttp.rs
//! A "tiny" example of HTTP request/response handling using transports. //! //! This example is intended for *learning purposes* to see how various pieces //! hook up together and how HTTP can get up and running. Note that this example //! is written with the restriction that it *can't* use any "big" library other //! than Tokio, if you'd like a "real world" HTTP library you likely want a //! crate like Hyper. //! //! Code here is based on the `echo-threads` example and implements two paths, //! the `/plaintext` and `/json` routes to respond with some text and json, //! respectively. By default this will run I/O on all the cores your system has //! available, and it doesn't support HTTP request bodies. #![warn(rust_2018_idioms)] use bytes::BytesMut; use futures::SinkExt; use http::{header::HeaderValue, Request, Response, StatusCode}; #[macro_use] extern crate serde_derive; use std::{env, error::Error, fmt, io}; use tokio::net::{TcpListener, TcpStream}; use tokio_stream::StreamExt; use tokio_util::codec::{Decoder, Encoder, Framed}; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { // Parse the arguments, bind the TCP socket we'll be listening to, spin up // our worker threads, and start shipping sockets to those worker threads. let addr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:8080".to_string()); let server = TcpListener::bind(&addr).await?; println!("Listening on: {addr}"); loop { let (stream, _) = server.accept().await?; tokio::spawn(async move { if let Err(e) = process(stream).await { println!("failed to process connection; error = {e}"); } }); } } async fn process(stream: TcpStream) -> Result<(), Box<dyn Error>> { let mut transport = Framed::new(stream, Http); while let Some(request) = transport.next().await { match request { Ok(request) => { let response = respond(request).await?; transport.send(response).await?; } Err(e) => return Err(e.into()), } } Ok(()) } async fn respond(req: Request<()>) -> Result<Response<String>, Box<dyn Error>> { let mut response = Response::builder(); let body = match req.uri().path() { "/plaintext" => { response = response.header("Content-Type", "text/plain"); "Hello, World!".to_string() } "/json" => { response = response.header("Content-Type", "application/json"); #[derive(Serialize)] struct Message { message: &'static str, } serde_json::to_string(&Message { message: "Hello, World!", })? } _ => { response = response.status(StatusCode::NOT_FOUND); String::new() } }; let response = response.body(body).map_err(io::Error::other)?; Ok(response) } struct Http; /// Implementation of encoding an HTTP response into a `BytesMut`, basically /// just writing out an HTTP/1.1 response. impl Encoder<Response<String>> for Http { type Error = io::Error; fn encode(&mut self, item: Response<String>, dst: &mut BytesMut) -> io::Result<()> { use std::fmt::Write; write!( BytesWrite(dst), "\ HTTP/1.1 {}\r\n\ Server: Example\r\n\ Content-Length: {}\r\n\ Date: {}\r\n\ ", item.status(), item.body().len(), date::now() ) .unwrap(); for (k, v) in item.headers() { dst.extend_from_slice(k.as_str().as_bytes()); dst.extend_from_slice(b": "); dst.extend_from_slice(v.as_bytes()); dst.extend_from_slice(b"\r\n"); } dst.extend_from_slice(b"\r\n"); dst.extend_from_slice(item.body().as_bytes()); return Ok(()); // Right now `write!` on `Vec<u8>` goes through io::Write and is not // super speedy, so inline a less-crufty implementation here which // doesn't go through io::Error. struct BytesWrite<'a>(&'a mut BytesMut); impl fmt::Write for BytesWrite<'_> { fn write_str(&mut self, s: &str) -> fmt::Result { self.0.extend_from_slice(s.as_bytes()); Ok(()) } fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { fmt::write(self, args) } } } } /// Implementation of decoding an HTTP request from the bytes we've read so far. /// This leverages the `httparse` crate to do the actual parsing and then we use /// that information to construct an instance of a `http::Request` object, /// trying to avoid allocations where possible. impl Decoder for Http { type Item = Request<()>; type Error = io::Error; fn decode(&mut self, src: &mut BytesMut) -> io::Result<Option<Request<()>>> { // TODO: we should grow this headers array if parsing fails and asks // for more headers let mut headers = [None; 16]; let (method, path, version, amt) = { let mut parsed_headers = [httparse::EMPTY_HEADER; 16]; let mut r = httparse::Request::new(&mut parsed_headers); let status = r.parse(src).map_err(|e| { let msg = format!("failed to parse http request: {e:?}"); io::Error::other(msg) })?; let amt = match status { httparse::Status::Complete(amt) => amt, httparse::Status::Partial => return Ok(None), }; let toslice = |a: &[u8]| { let start = a.as_ptr() as usize - src.as_ptr() as usize; assert!(start < src.len()); (start, start + a.len()) }; for (i, header) in r.headers.iter().enumerate() { let k = toslice(header.name.as_bytes()); let v = toslice(header.value); headers[i] = Some((k, v)); } let method = http::Method::try_from(r.method.unwrap()).map_err(io::Error::other)?; ( method, toslice(r.path.unwrap().as_bytes()), r.version.unwrap(), amt, ) }; if version != 1 { return Err(io::Error::other("only HTTP/1.1 accepted")); } let data = src.split_to(amt).freeze(); let mut ret = Request::builder(); ret = ret.method(method); let s = data.slice(path.0..path.1); let s = unsafe { String::from_utf8_unchecked(Vec::from(s.as_ref())) }; ret = ret.uri(s); ret = ret.version(http::Version::HTTP_11); for header in headers.iter() { let (k, v) = match *header { Some((ref k, ref v)) => (k, v), None => break, }; let value = HeaderValue::from_bytes(data.slice(v.0..v.1).as_ref()) .map_err(|_| io::Error::other("header decode error"))?; ret = ret.header(&data[k.0..k.1], value); } let req = ret.body(()).map_err(io::Error::other)?; Ok(Some(req)) } } mod date { use std::cell::RefCell; use std::fmt::{self, Write}; use std::str; use std::time::SystemTime; use httpdate::HttpDate; pub struct Now(()); /// Returns a struct, which when formatted, renders an appropriate `Date` /// header value. pub fn now() -> Now { Now(()) } // Gee Alex, doesn't this seem like premature optimization. Well you see // there Billy, you're absolutely correct! If your server is *bottlenecked* // on rendering the `Date` header, well then boy do I have news for you, you // don't need this optimization. // // In all seriousness, though, a simple "hello world" benchmark which just // sends back literally "hello world" with standard headers actually is // bottlenecked on rendering a date into a byte buffer. Since it was at the // top of a profile, and this was done for some competitive benchmarks, this // module was written. // // Just to be clear, though, I was not intending on doing this because it // really does seem kinda absurd, but it was done by someone else [1], so I // blame them! :) // // [1]: https://github.com/rapidoid/rapidoid/blob/f1c55c0555007e986b5d069fe1086e6d09933f7b/rapidoid-commons/src/main/java/org/rapidoid/commons/Dates.java#L48-L66 struct LastRenderedNow { bytes: [u8; 128], amt: usize, unix_date: u64, } thread_local!(static LAST: RefCell<LastRenderedNow> = const { RefCell::new(LastRenderedNow { bytes: [0; 128], amt: 0, unix_date: 0, }) }); impl fmt::Display for Now { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { LAST.with(|cache| { let mut cache = cache.borrow_mut(); let now = SystemTime::now(); let now_unix = now .duration_since(SystemTime::UNIX_EPOCH) .map(|since_epoch| since_epoch.as_secs()) .unwrap_or(0); if cache.unix_date != now_unix { cache.update(now, now_unix); } f.write_str(cache.buffer()) }) } } impl LastRenderedNow { fn buffer(&self) -> &str { str::from_utf8(&self.bytes[..self.amt]).unwrap() } fn update(&mut self, now: SystemTime, now_unix: u64) { self.amt = 0; self.unix_date = now_unix; write!(LocalBuffer(self), "{}", HttpDate::from(now)).unwrap(); } } struct LocalBuffer<'a>(&'a mut LastRenderedNow); impl fmt::Write for LocalBuffer<'_> { fn write_str(&mut self, s: &str) -> fmt::Result { let start = self.0.amt; let end = start + s.len(); self.0.bytes[start..end].copy_from_slice(s.as_bytes()); self.0.amt += s.len(); Ok(()) } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/dump.rs
examples/dump.rs
//! This example demonstrates tokio's experimental task dumping functionality. //! This application deadlocks. Input CTRL+C to display traces of each task, or //! input CTRL+C twice within 1 second to quit. #[cfg(all( tokio_unstable, target_os = "linux", any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") ))] #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { use std::sync::Arc; use tokio::sync::Barrier; #[inline(never)] async fn a(barrier: Arc<Barrier>) { b(barrier).await } #[inline(never)] async fn b(barrier: Arc<Barrier>) { c(barrier).await } #[inline(never)] async fn c(barrier: Arc<Barrier>) { barrier.wait().await; } // Prints a task dump upon receipt of CTRL+C, or returns if CTRL+C is // inputted twice within a second. async fn dump_or_quit() { use tokio::time::{timeout, Duration, Instant}; let handle = tokio::runtime::Handle::current(); let mut last_signal: Option<Instant> = None; // wait for CTRL+C while let Ok(_) = tokio::signal::ctrl_c().await { // exit if a CTRL+C is inputted twice within 1 second if let Some(time_since_last_signal) = last_signal.map(|i| i.elapsed()) { if time_since_last_signal < Duration::from_secs(1) { return; } } last_signal = Some(Instant::now()); // capture a dump, and print each trace println!("{:-<80}", ""); if let Ok(dump) = timeout(Duration::from_secs(2), handle.dump()).await { for task in dump.tasks().iter() { let id = task.id(); let trace = task.trace(); println!("TASK {id}:"); println!("{trace}\n"); } } else { println!("Task dumping timed out. Use a native debugger (like gdb) to debug the deadlock."); } println!("{:-<80}", ""); println!("Input CTRL+C twice within 1 second to exit."); } } println!("This program has a deadlock."); println!("Input CTRL+C to print a task dump."); println!("Input CTRL+C twice within 1 second to exit."); // oops! this barrier waits for one more task than will ever come. let barrier = Arc::new(Barrier::new(3)); let task_1 = tokio::spawn(a(barrier.clone())); let task_2 = tokio::spawn(a(barrier)); tokio::select!( _ = dump_or_quit() => {}, _ = task_1 => {}, _ = task_2 => {}, ); Ok(()) } #[cfg(not(all( tokio_unstable, target_os = "linux", any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") )))] fn main() { println!("task dumps are not available") }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/named-pipe.rs
examples/named-pipe.rs
use std::io; #[cfg(windows)] async fn windows_main() -> io::Result<()> { use tokio::io::AsyncWriteExt; use tokio::io::{AsyncBufReadExt, BufReader}; use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; const PIPE_NAME: &str = r"\\.\pipe\named-pipe-single-client"; let server = ServerOptions::new().create(PIPE_NAME)?; let server = tokio::spawn(async move { // Note: we wait for a client to connect. server.connect().await?; let mut server = BufReader::new(server); let mut buf = String::new(); server.read_line(&mut buf).await?; server.write_all(b"pong\n").await?; Ok::<_, io::Error>(buf) }); let client = tokio::spawn(async move { // There's no need to use a connect loop here, since we know that the // server is already up - `open` was called before spawning any of the // tasks. let client = ClientOptions::new().open(PIPE_NAME)?; let mut client = BufReader::new(client); let mut buf = String::new(); client.write_all(b"ping\n").await?; client.read_line(&mut buf).await?; Ok::<_, io::Error>(buf) }); let (server, client) = tokio::try_join!(server, client)?; assert_eq!(server?, "ping\n"); assert_eq!(client?, "pong\n"); Ok(()) } #[tokio::main] async fn main() -> io::Result<()> { #[cfg(windows)] { windows_main().await?; } #[cfg(not(windows))] { println!("Named pipes are only supported on Windows!"); } Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/custom-executor.rs
examples/custom-executor.rs
// This example shows how to use the tokio runtime with any other executor // // The main components are a spawn fn that will wrap futures in a special future // that will always enter the tokio context on poll. This only spawns one extra thread // to manage and run the tokio drivers in the background. use tokio::net::TcpListener; use tokio::sync::oneshot; fn main() { let (tx, rx) = oneshot::channel(); my_custom_runtime::spawn(async move { let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); println!("addr: {:?}", listener.local_addr()); tx.send(()).unwrap(); }); futures::executor::block_on(rx).unwrap(); } mod my_custom_runtime { use once_cell::sync::Lazy; use std::future::Future; use tokio_util::context::TokioContext; pub fn spawn(f: impl Future<Output = ()> + Send + 'static) { EXECUTOR.spawn(f); } struct ThreadPool { inner: futures::executor::ThreadPool, rt: tokio::runtime::Runtime, } static EXECUTOR: Lazy<ThreadPool> = Lazy::new(|| { // Spawn tokio runtime on a single background thread // enabling IO and timers. let rt = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap(); let inner = futures::executor::ThreadPool::builder().create().unwrap(); ThreadPool { inner, rt } }); impl ThreadPool { fn spawn(&self, f: impl Future<Output = ()> + Send + 'static) { let handle = self.rt.handle().clone(); self.inner.spawn_ok(TokioContext::new(f, handle)); } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/chat.rs
examples/chat.rs
//! A chat server that broadcasts a message to all connections. //! //! This example is explicitly more verbose than it has to be. This is to //! illustrate more concepts. //! //! A chat server for telnet clients. After a telnet client connects, the first //! line should contain the client's name. After that, all lines sent by a //! client are broadcasted to all other connected clients. //! //! Because the client is telnet, lines are delimited by "\r\n". //! //! You can test this out by running: //! //! cargo run --example chat //! //! And then in another terminal run: //! //! telnet localhost 6142 //! //! You can run the `telnet` command in any number of additional windows. //! //! You can run the second command in multiple windows and then chat between the //! two, seeing the messages from the other client as they're received. For all //! connected clients they'll all join the same room and see everyone else's //! messages. #![warn(rust_2018_idioms)] use tokio::net::{TcpListener, TcpStream}; use tokio::sync::{mpsc, Mutex}; use tokio_stream::StreamExt; use tokio_util::codec::{Framed, LinesCodec}; use futures::SinkExt; use std::collections::HashMap; use std::env; use std::error::Error; use std::io; use std::net::SocketAddr; use std::sync::Arc; const DEFAULT_ADDR: &str = "127.0.0.1:6142"; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter}; // Configure a `tracing` subscriber that logs traces emitted by the chat // server. tracing_subscriber::fmt() // Filter what traces are displayed based on the RUST_LOG environment // variable. // // Traces emitted by the example code will always be displayed. You // can set `RUST_LOG=tokio=trace` to enable additional traces emitted by // Tokio itself. .with_env_filter(EnvFilter::from_default_env().add_directive("chat=info".parse()?)) // Log events when `tracing` spans are created, entered, exited, or // closed. When Tokio's internal tracing support is enabled (as // described above), this can be used to track the lifecycle of spawned // tasks on the Tokio runtime. .with_span_events(FmtSpan::FULL) // Set this subscriber as the default, to collect all traces emitted by // the program. .init(); // Create the shared state. This is how all the peers communicate. // // The server task will hold a handle to this. For every new client, the // `state` handle is cloned and passed into the task that processes the // client connection. let state = Arc::new(Mutex::new(Shared::new())); let addr = env::args() .nth(1) .unwrap_or_else(|| DEFAULT_ADDR.to_string()); // Bind a TCP listener to the socket address. // // Note that this is the Tokio TcpListener, which is fully async. let listener = TcpListener::bind(&addr).await?; tracing::info!("server running on {addr}"); loop { // Asynchronously wait for an inbound TcpStream. let (stream, addr) = listener.accept().await?; // Clone a handle to the `Shared` state for the new connection. let state = Arc::clone(&state); // Spawn our handler to be run asynchronously. tokio::spawn(async move { tracing::debug!("accepted connection from {addr}"); if let Err(e) = process(state, stream, addr).await { tracing::warn!("Connection from {addr} failed: {e:?}"); } }); } } /// Shorthand for the transmit half of the message channel. type Tx = mpsc::UnboundedSender<String>; /// Shorthand for the receive half of the message channel. type Rx = mpsc::UnboundedReceiver<String>; /// Data that is shared between all peers in the chat server. /// /// This is the set of `Tx` handles for all connected clients. Whenever a /// message is received from a client, it is broadcasted to all peers by /// iterating over the `peers` entries and sending a copy of the message on each /// `Tx`. struct Shared { peers: HashMap<SocketAddr, Tx>, } /// The state for each connected client. struct Peer { /// The TCP socket wrapped with the `Lines` codec, defined below. /// /// This handles sending and receiving data on the socket. When using /// `Lines`, we can work at the line level instead of having to manage the /// raw byte operations. lines: Framed<TcpStream, LinesCodec>, /// Receive half of the message channel. /// /// This is used to receive messages from peers. When a message is received /// off of this `Rx`, it will be written to the socket. rx: Rx, } impl Shared { /// Create a new, empty, instance of `Shared`. fn new() -> Self { Shared { peers: HashMap::new(), } } /// Send a `LineCodec` encoded message to every peer, except /// for the sender. /// /// This function also cleans up disconnected peers automatically. async fn broadcast(&mut self, sender: SocketAddr, message: &str) { let mut failed_peers = Vec::new(); let message = message.to_string(); // Clone once for all sends for (addr, tx) in self.peers.iter() { if *addr != sender && tx.send(message.clone()).is_err() { // Receiver has been dropped, mark for removal failed_peers.push(*addr); } } // Clean up disconnected peers for addr in failed_peers { self.peers.remove(&addr); tracing::debug!("Removed disconnected peer: {addr}"); } } } impl Peer { /// Create a new instance of `Peer`. async fn new( state: Arc<Mutex<Shared>>, lines: Framed<TcpStream, LinesCodec>, ) -> io::Result<Peer> { // Get the client socket address let addr = lines.get_ref().peer_addr()?; // Create a channel for this peer let (tx, rx) = mpsc::unbounded_channel(); // Add an entry for this `Peer` in the shared state map. state.lock().await.peers.insert(addr, tx); Ok(Peer { lines, rx }) } } /// Process an individual chat client async fn process( state: Arc<Mutex<Shared>>, stream: TcpStream, addr: SocketAddr, ) -> Result<(), Box<dyn Error>> { let mut lines = Framed::new(stream, LinesCodec::new()); // Send a prompt to the client to enter their username. lines.send("Please enter your username:").await?; // Read the first line from the `LineCodec` stream to get the username. let Some(Ok(username)) = lines.next().await else { // We didn't get a line so we return early here. tracing::error!("Failed to get username from {addr}. Client disconnected."); return Ok(()); }; // Register our peer with state which internally sets up some channels. let mut peer = Peer::new(state.clone(), lines).await?; // A client has connected, let's let everyone know. { let mut state = state.lock().await; let msg = format!("{username} has joined the chat"); tracing::info!("{msg}"); state.broadcast(addr, &msg).await; } // Process incoming messages until our stream is exhausted by a disconnect. loop { tokio::select! { // A message was received from a peer. Send it to the current user. Some(msg) = peer.rx.recv() => { if let Err(e) = peer.lines.send(&msg).await { tracing::error!("Failed to send message to {username}: {e:?}"); break; } } result = peer.lines.next() => match result { // A message was received from the current user, we should // broadcast this message to the other users. Some(Ok(msg)) => { let mut state = state.lock().await; let msg = format!("{username}: {msg}"); state.broadcast(addr, &msg).await; } // An error occurred. Some(Err(e)) => { tracing::error!( "an error occurred while processing messages for {username}; error = {e:?}" ); break; } // The stream has been exhausted. None => break, }, } } // If this section is reached it means that the client was disconnected! // Let's let everyone still connected know about it. { let mut state = state.lock().await; state.peers.remove(&addr); let msg = format!("{username} has left the chat"); tracing::info!("{msg}"); state.broadcast(addr, &msg).await; } Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/udp-codec.rs
examples/udp-codec.rs
//! This example leverages `BytesCodec` to create a UDP client and server which //! speak a custom protocol. //! //! Here we're using the codec from `tokio-codec` to convert a UDP socket to a stream of //! client messages. These messages are then processed and returned back as a //! new message with a new destination. Overall, we then use this to construct a //! "ping pong" pair where two sockets are sending messages back and forth. #![warn(rust_2018_idioms)] use tokio::net::UdpSocket; use tokio::{io, time}; use tokio_stream::StreamExt; use tokio_util::codec::BytesCodec; use tokio_util::udp::UdpFramed; use bytes::Bytes; use futures::{FutureExt, SinkExt}; use std::env; use std::error::Error; use std::net::SocketAddr; use std::time::Duration; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let addr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:0".to_string()); // Bind both our sockets and then figure out what ports we got. let a = UdpSocket::bind(&addr).await?; let b = UdpSocket::bind(&addr).await?; let b_addr = b.local_addr()?; let mut a = UdpFramed::new(a, BytesCodec::new()); let mut b = UdpFramed::new(b, BytesCodec::new()); // Start off by sending a ping from a to b, afterwards we just print out // what they send us and continually send pings let a = ping(&mut a, b_addr); // The second client we have will receive the pings from `a` and then send // back pongs. let b = pong(&mut b); // Run both futures simultaneously of `a` and `b` sending messages back and forth. match tokio::try_join!(a, b) { Err(e) => println!("an error occurred; error = {e:?}"), _ => println!("done!"), } Ok(()) } async fn ping(socket: &mut UdpFramed<BytesCodec>, b_addr: SocketAddr) -> Result<(), io::Error> { socket.send((Bytes::from(&b"PING"[..]), b_addr)).await?; for _ in 0..4usize { let (bytes, addr) = socket.next().map(|e| e.unwrap()).await?; println!("[a] recv: {}", String::from_utf8_lossy(&bytes)); socket.send((Bytes::from(&b"PING"[..]), addr)).await?; } Ok(()) } async fn pong(socket: &mut UdpFramed<BytesCodec>) -> Result<(), io::Error> { let timeout = Duration::from_millis(200); while let Ok(Some(Ok((bytes, addr)))) = time::timeout(timeout, socket.next()).await { println!("[b] recv: {}", String::from_utf8_lossy(&bytes)); socket.send((Bytes::from(&b"PONG"[..]), addr)).await?; } Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/print_each_packet.rs
examples/print_each_packet.rs
//! A "print-each-packet" server with Tokio //! //! This server will create a TCP listener, accept connections in a loop, and //! put down in the stdout everything that's read off of each TCP connection. //! //! Because the Tokio runtime uses a thread pool, each TCP connection is //! processed concurrently with all other TCP connections across multiple //! threads. //! //! To see this server in action, you can run this in one terminal: //! //! cargo run --example print\_each\_packet //! //! and in another terminal you can run: //! //! cargo run --example connect-tcp 127.0.0.1:8080 //! //! Each line you type in to the `connect-tcp` terminal should be written to terminal! //! //! Minimal js example: //! //! ```js //! var net = require("net"); //! //! var listenPort = 8080; //! //! var server = net.createServer(function (socket) { //! socket.on("data", function (bytes) { //! console.log("bytes", bytes); //! }); //! //! socket.on("end", function() { //! console.log("Socket received FIN packet and closed connection"); //! }); //! socket.on("error", function (error) { //! console.log("Socket closed with error", error); //! }); //! //! socket.on("close", function (with_error) { //! if (with_error) { //! console.log("Socket closed with result: Err(SomeError)"); //! } else { //! console.log("Socket closed with result: Ok(())"); //! } //! }); //! //! }); //! //! server.listen(listenPort); //! //! console.log("Listening on:", listenPort); //! ``` //! #![warn(rust_2018_idioms)] use tokio::net::TcpListener; use tokio_stream::StreamExt; use tokio_util::codec::{BytesCodec, Decoder}; use std::env; #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { // Allow passing an address to listen on as the first argument of this // program, but otherwise we'll just set up our TCP listener on // 127.0.0.1:8080 for connections. let addr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:8080".to_string()); // Next up we create a TCP listener which will listen for incoming // connections. This TCP listener is bound to the address we determined // above and must be associated with an event loop, so we pass in a handle // to our event loop. After the socket's created we inform that we're ready // to go and start accepting connections. let listener = TcpListener::bind(&addr).await?; println!("Listening on: {addr}"); loop { // Asynchronously wait for an inbound socket. let (socket, _) = listener.accept().await?; // And this is where much of the magic of this server happens. We // crucially want all clients to make progress concurrently, rather than // blocking one on completion of another. To achieve this we use the // `tokio::spawn` function to execute the work in the background. // // Essentially here we're executing a new task to run concurrently, // which will allow all of our clients to be processed concurrently. tokio::spawn(async move { // We're parsing each socket with the `BytesCodec` included in `tokio::codec`. let mut framed = BytesCodec::new().framed(socket); // We loop while there are messages coming from the Stream `framed`. // The stream will return None once the client disconnects. while let Some(message) = framed.next().await { match message { Ok(bytes) => println!("bytes: {bytes:?}"), Err(err) => println!("Socket closed with error: {err:?}"), } } println!("Socket received FIN packet and closed connection"); }); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/proxy.rs
examples/proxy.rs
//! A proxy that forwards data to another server and forwards that server's //! responses back to clients. //! //! Because the Tokio runtime uses a thread pool, each TCP connection is //! processed concurrently with all other TCP connections across multiple //! threads. //! //! You can showcase this by running this in one terminal: //! //! cargo run --example proxy //! //! This in another terminal //! //! cargo run --example echo-tcp //! //! And finally this in another terminal //! //! cargo run --example connect-tcp 127.0.0.1:8081 //! //! This final terminal will connect to our proxy, which will in turn connect to //! the echo server, and you'll be able to see data flowing between them. #![warn(rust_2018_idioms)] use tokio::io::copy_bidirectional; use tokio::net::{TcpListener, TcpStream}; use futures::FutureExt; use std::env; use std::error::Error; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let listen_addr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:8081".to_string()); let server_addr = env::args() .nth(2) .unwrap_or_else(|| "127.0.0.1:8080".to_string()); println!("Listening on: {listen_addr}"); println!("Proxying to: {server_addr}"); let listener = TcpListener::bind(listen_addr).await?; while let Ok((mut inbound, _)) = listener.accept().await { let mut outbound = TcpStream::connect(server_addr.clone()).await?; tokio::spawn(async move { copy_bidirectional(&mut inbound, &mut outbound) .map(|r| { if let Err(e) = r { println!("Failed to transfer; error={e}"); } }) .await }); } Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/tinydb.rs
examples/tinydb.rs
//! A "tiny database" and accompanying protocol //! //! This example shows the usage of shared state amongst all connected clients, //! namely a database of key/value pairs. Each connected client can send a //! series of GET/SET commands to query the current value of a key or set the //! value of a key. //! //! This example has a simple protocol you can use to interact with the server. //! To run, first run this in one terminal window: //! //! cargo run --example tinydb //! //! and next in another windows run: //! //! cargo run --example connect-tcp 127.0.0.1:8080 //! //! In the `connect-tcp` window you can type in commands where when you hit enter //! you'll get a response from the server for that command. An example session //! is: //! //! //! $ cargo run --example connect-tcp 127.0.0.1:8080 //! GET foo //! foo = bar //! GET FOOBAR //! error: no key FOOBAR //! SET FOOBAR my awesome string //! set FOOBAR = `my awesome string`, previous: None //! SET foo tokio //! set foo = `tokio`, previous: Some("bar") //! GET foo //! foo = tokio //! //! Namely you can issue two forms of commands: //! //! * `GET $key` - this will fetch the value of `$key` from the database and //! return it. The server's database is initially populated with the key `foo` //! set to the value `bar` //! * `SET $key $value` - this will set the value of `$key` to `$value`, //! returning the previous value, if any. #![warn(rust_2018_idioms)] use tokio::net::TcpListener; use tokio_stream::StreamExt; use tokio_util::codec::{Framed, LinesCodec}; use futures::SinkExt; use std::collections::HashMap; use std::env; use std::error::Error; use std::sync::{Arc, Mutex}; /// The in-memory database shared amongst all clients. /// /// This database will be shared via `Arc`, so to mutate the internal map we're /// going to use a `Mutex` for interior mutability. struct Database { map: Mutex<HashMap<String, String>>, } /// Possible requests our clients can send us enum Request { Get { key: String }, Set { key: String, value: String }, } /// Responses to the `Request` commands above enum Response { Value { key: String, value: String, }, Set { key: String, value: String, previous: Option<String>, }, Error { msg: String, }, } #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { // Parse the address we're going to run this server on // and set up our TCP listener to accept connections. let addr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:8080".to_string()); let listener = TcpListener::bind(&addr).await?; println!("Listening on: {addr}"); // Create the shared state of this server that will be shared amongst all // clients. We populate the initial database and then create the `Database` // structure. Note the usage of `Arc` here which will be used to ensure that // each independently spawned client will have a reference to the in-memory // database. let mut initial_db = HashMap::new(); initial_db.insert("foo".to_string(), "bar".to_string()); let db = Arc::new(Database { map: Mutex::new(initial_db), }); loop { match listener.accept().await { Ok((socket, _)) => { // After getting a new connection first we see a clone of the database // being created, which is creating a new reference for this connected // client to use. let db = db.clone(); // Like with other small servers, we'll `spawn` this client to ensure it // runs concurrently with all other clients. The `move` keyword is used // here to move ownership of our db handle into the async closure. tokio::spawn(async move { // Since our protocol is line-based we use `tokio_codecs`'s `LineCodec` // to convert our stream of bytes, `socket`, into a `Stream` of lines // as well as convert our line based responses into a stream of bytes. let mut lines = Framed::new(socket, LinesCodec::new()); // Here for every line we get back from the `Framed` decoder, // we parse the request, and if it's valid we generate a response // based on the values in the database. while let Some(result) = lines.next().await { match result { Ok(line) => { let response = handle_request(&line, &db); let response = response.serialize(); if let Err(e) = lines.send(response.as_str()).await { println!("error on sending response; error = {e:?}"); } } Err(e) => { println!("error on decoding from socket; error = {e:?}"); } } } // The connection will be closed at this point as `lines.next()` has returned `None`. }); } Err(e) => println!("error accepting socket; error = {e:?}"), } } } fn handle_request(line: &str, db: &Arc<Database>) -> Response { let request = match Request::parse(line) { Ok(req) => req, Err(e) => return Response::Error { msg: e }, }; let mut db = db.map.lock().unwrap(); match request { Request::Get { key } => match db.get(&key) { Some(value) => Response::Value { key, value: value.clone(), }, None => Response::Error { msg: format!("no key {key}"), }, }, Request::Set { key, value } => { let previous = db.insert(key.clone(), value.clone()); Response::Set { key, value, previous, } } } } impl Request { fn parse(input: &str) -> Result<Request, String> { let mut parts = input.splitn(3, ' '); match parts.next() { Some("GET") => { let key = parts.next().ok_or("GET must be followed by a key")?; if parts.next().is_some() { return Err("GET's key must not be followed by anything".into()); } Ok(Request::Get { key: key.to_string(), }) } Some("SET") => { let key = match parts.next() { Some(key) => key, None => return Err("SET must be followed by a key".into()), }; let value = match parts.next() { Some(value) => value, None => return Err("SET needs a value".into()), }; Ok(Request::Set { key: key.to_string(), value: value.to_string(), }) } Some(cmd) => Err(format!("unknown command: {cmd}")), None => Err("empty input".into()), } } } impl Response { fn serialize(&self) -> String { match *self { Response::Value { ref key, ref value } => format!("{key} = {value}"), Response::Set { ref key, ref value, ref previous, } => format!("set {key} = `{value}`, previous: {previous:?}"), Response::Error { ref msg } => format!("error: {msg}"), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/echo-udp.rs
examples/echo-udp.rs
//! An UDP echo server that just sends back everything that it receives. //! //! If you're on Unix you can test this out by in one terminal executing: //! //! cargo run --example echo-udp //! //! and in another terminal you can run: //! //! cargo run --example connect-udp 127.0.0.1:8080 //! //! Each line you type in to the `connect-udp` terminal should be echo'd back to you! #![warn(rust_2018_idioms)] use std::error::Error; use std::net::SocketAddr; use std::{env, io}; use tokio::net::UdpSocket; struct Server { socket: UdpSocket, buf: Vec<u8>, to_send: Option<(usize, SocketAddr)>, } impl Server { async fn run(self) -> Result<(), io::Error> { let Server { socket, mut buf, mut to_send, } = self; loop { // First we check to see if there's a message we need to echo back. // If so then we try to send it back to the original source, waiting // until it's writable and we're able to do so. if let Some((size, peer)) = to_send { let amt = socket.send_to(&buf[..size], &peer).await?; println!("Echoed {amt}/{size} bytes to {peer}"); } // If we're here then `to_send` is `None`, so we take a look for the // next message we're going to echo back. to_send = Some(socket.recv_from(&mut buf).await?); } } } #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let addr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:8080".to_string()); let socket = UdpSocket::bind(&addr).await?; println!("Listening on: {}", socket.local_addr()?); let server = Server { socket, buf: vec![0; 1024], to_send: None, }; // This starts the server task. server.run().await?; Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/examples/named-pipe-multi-client.rs
examples/named-pipe-multi-client.rs
use std::io; #[cfg(windows)] async fn windows_main() -> io::Result<()> { use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; use tokio::time; use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; const PIPE_NAME: &str = r"\\.\pipe\named-pipe-multi-client"; const N: usize = 10; // The first server needs to be constructed early so that clients can // be correctly connected. Otherwise a waiting client will error. // // Here we also make use of `first_pipe_instance`, which will ensure // that there are no other servers up and running already. let mut server = ServerOptions::new() .first_pipe_instance(true) .create(PIPE_NAME)?; let server = tokio::spawn(async move { // Artificial workload. time::sleep(Duration::from_secs(1)).await; for _ in 0..N { // Wait for client to connect. server.connect().await?; let mut inner = server; // Construct the next server to be connected before sending the one // we already have of onto a task. This ensures that the server // isn't closed (after it's done in the task) before a new one is // available. Otherwise the client might error with // `io::ErrorKind::NotFound`. server = ServerOptions::new().create(PIPE_NAME)?; let _ = tokio::spawn(async move { let mut buf = vec![0u8; 4]; inner.read_exact(&mut buf).await?; inner.write_all(b"pong").await?; Ok::<_, io::Error>(()) }); } Ok::<_, io::Error>(()) }); let mut clients = Vec::new(); for _ in 0..N { clients.push(tokio::spawn(async move { // This showcases a generic connect loop. // // We immediately try to create a client, if it's not found or // the pipe is busy we use the specialized wait function on the // client builder. let mut client = loop { match ClientOptions::new().open(PIPE_NAME) { Ok(client) => break client, Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), Err(e) => return Err(e), } time::sleep(Duration::from_millis(5)).await; }; let mut buf = [0u8; 4]; client.write_all(b"ping").await?; client.read_exact(&mut buf).await?; Ok::<_, io::Error>(buf) })); } for client in clients { let result = client.await?; assert_eq!(&result?[..], b"pong"); } server.await??; Ok(()) } #[tokio::main] async fn main() -> io::Result<()> { #[cfg(windows)] { windows_main().await?; } #[cfg(not(windows))] { println!("Named pipes are only supported on Windows!"); } Ok(()) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/stress-test/examples/simple_echo_tcp.rs
stress-test/examples/simple_echo_tcp.rs
//! Simple TCP echo server to check memory leaks using Valgrind. use std::{thread::sleep, time::Duration}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::{TcpListener, TcpSocket}, runtime::Builder, sync::oneshot, }; const TCP_ENDPOINT: &str = "127.0.0.1:8080"; const NUM_MSGS: usize = 100; const MSG_SIZE: usize = 1024; fn main() { let rt = Builder::new_multi_thread().enable_io().build().unwrap(); let rt2 = Builder::new_multi_thread().enable_io().build().unwrap(); rt.spawn(async { let listener = TcpListener::bind(TCP_ENDPOINT).await.unwrap(); let (mut socket, _) = listener.accept().await.unwrap(); let (mut rd, mut wr) = socket.split(); while tokio::io::copy(&mut rd, &mut wr).await.is_ok() {} }); // wait a bit so that the listener binds. sleep(Duration::from_millis(100)); // create a channel to let the main thread know that all the messages were sent and received. let (tx, mut rx) = oneshot::channel(); rt2.spawn(async { let addr = TCP_ENDPOINT.parse().unwrap(); let socket = TcpSocket::new_v4().unwrap(); let mut stream = socket.connect(addr).await.unwrap(); let mut buff = [0; MSG_SIZE]; for _ in 0..NUM_MSGS { let one_mega_random_bytes: Vec<u8> = (0..MSG_SIZE).map(|_| rand::random::<u8>()).collect(); stream .write_all(one_mega_random_bytes.as_slice()) .await .unwrap(); let _ = stream.read(&mut buff).await.unwrap(); } tx.send(()).unwrap(); }); loop { // check that we're done. match rx.try_recv() { Err(oneshot::error::TryRecvError::Empty) => (), Err(oneshot::error::TryRecvError::Closed) => panic!("channel got closed..."), Ok(()) => break, } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/loom.rs
tokio-util/src/loom.rs
//! This module abstracts over `loom` and `std::sync` types depending on whether we //! are running loom tests or not. pub(crate) mod sync { #[cfg(all(test, loom))] pub(crate) use loom::sync::{Arc, Mutex, MutexGuard}; #[cfg(not(all(test, loom)))] pub(crate) use std::sync::{Arc, Mutex, MutexGuard}; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/lib.rs
tokio-util/src/lib.rs
#![allow(clippy::needless_doctest_main)] #![warn( missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub )] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] #![cfg_attr(docsrs, feature(doc_cfg))] //! Utilities for working with Tokio. //! //! This crate is not versioned in lockstep with the core //! [`tokio`] crate. However, `tokio-util` _will_ respect Rust's //! semantic versioning policy, especially with regard to breaking changes. #[macro_use] mod cfg; mod loom; cfg_codec! { #[macro_use] mod tracing; pub mod codec; } cfg_net! { #[cfg(not(target_arch = "wasm32"))] pub mod udp; pub mod net; } cfg_compat! { pub mod compat; } cfg_io! { pub mod io; } cfg_rt! { pub mod context; } #[cfg(feature = "rt")] pub mod task; cfg_time! { pub mod time; } pub mod sync; pub mod either; pub use bytes; mod util; pub mod future;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/future.rs
tokio-util/src/future.rs
//! An extension trait for Futures that provides a variety of convenient adapters. mod with_cancellation_token; use with_cancellation_token::{WithCancellationTokenFuture, WithCancellationTokenFutureOwned}; use std::future::Future; use crate::sync::CancellationToken; /// A trait which contains a variety of convenient adapters and utilities for `Future`s. pub trait FutureExt: Future { cfg_time! { /// A wrapper around [`tokio::time::timeout`], with the advantage that it is easier to write /// fluent call chains. /// /// # Examples /// /// ```rust /// use tokio::{sync::oneshot, time::Duration}; /// use tokio_util::future::FutureExt; /// /// # async fn dox() { /// let (_tx, rx) = oneshot::channel::<()>(); /// /// let res = rx.timeout(Duration::from_millis(10)).await; /// assert!(res.is_err()); /// # } /// ``` #[track_caller] fn timeout(self, timeout: std::time::Duration) -> tokio::time::Timeout<Self> where Self: Sized, { tokio::time::timeout(timeout, self) } /// A wrapper around [`tokio::time::timeout_at`], with the advantage that it is easier to write /// fluent call chains. /// /// # Examples /// /// ```rust /// use tokio::{sync::oneshot, time::{Duration, Instant}}; /// use tokio_util::future::FutureExt; /// /// # async fn dox() { /// let (_tx, rx) = oneshot::channel::<()>(); /// let deadline = Instant::now() + Duration::from_millis(10); /// /// let res = rx.timeout_at(deadline).await; /// assert!(res.is_err()); /// # } /// ``` fn timeout_at(self, deadline: tokio::time::Instant) -> tokio::time::Timeout<Self> where Self: Sized, { tokio::time::timeout_at(deadline, self) } } /// Similar to [`CancellationToken::run_until_cancelled`], /// but with the advantage that it is easier to write fluent call chains. /// /// # Fairness /// /// Calling this on an already-cancelled token directly returns `None`. /// For all subsequent polls, in case of concurrent completion and /// cancellation, this is biased towards the `self` future completion. /// /// # Examples /// /// ```rust /// use tokio::sync::oneshot; /// use tokio_util::future::FutureExt; /// use tokio_util::sync::CancellationToken; /// /// # async fn dox() { /// let (_tx, rx) = oneshot::channel::<()>(); /// let token = CancellationToken::new(); /// let token_clone = token.clone(); /// tokio::spawn(async move { /// tokio::time::sleep(std::time::Duration::from_millis(10)).await; /// token.cancel(); /// }); /// assert!(rx.with_cancellation_token(&token_clone).await.is_none()) /// # } /// ``` fn with_cancellation_token( self, cancellation_token: &CancellationToken, ) -> WithCancellationTokenFuture<'_, Self> where Self: Sized, { WithCancellationTokenFuture::new(cancellation_token, self) } /// Similar to [`CancellationToken::run_until_cancelled_owned`], /// but with the advantage that it is easier to write fluent call chains. /// /// # Fairness /// /// Calling this on an already-cancelled token directly returns `None`. /// For all subsequent polls, in case of concurrent completion and /// cancellation, this is biased towards the `self` future completion. /// /// # Examples /// /// ```rust /// use tokio::sync::oneshot; /// use tokio_util::future::FutureExt; /// use tokio_util::sync::CancellationToken; /// /// # async fn dox() { /// let (_tx, rx) = oneshot::channel::<()>(); /// let token = CancellationToken::new(); /// let token_clone = token.clone(); /// tokio::spawn(async move { /// tokio::time::sleep(std::time::Duration::from_millis(10)).await; /// token.cancel(); /// }); /// assert!(rx.with_cancellation_token_owned(token_clone).await.is_none()) /// # } /// ``` fn with_cancellation_token_owned( self, cancellation_token: CancellationToken, ) -> WithCancellationTokenFutureOwned<Self> where Self: Sized, { WithCancellationTokenFutureOwned::new(cancellation_token, self) } } impl<T: Future + ?Sized> FutureExt for T {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/either.rs
tokio-util/src/either.rs
//! Module defining an Either type. use std::{ future::Future, io::SeekFrom, pin::Pin, task::{Context, Poll}, }; use tokio::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf, Result}; /// Combines two different futures, streams, or sinks having the same associated types into a single type. /// /// This type implements common asynchronous traits such as [`Future`] and those in Tokio. /// /// [`Future`]: std::future::Future /// /// # Example /// /// The following code will not work: /// /// ```compile_fail /// # fn some_condition() -> bool { true } /// # async fn some_async_function() -> u32 { 10 } /// # async fn other_async_function() -> u32 { 20 } /// #[tokio::main] /// async fn main() { /// let result = if some_condition() { /// some_async_function() /// } else { /// other_async_function() // <- Will print: "`if` and `else` have incompatible types" /// }; /// /// println!("Result is {}", result.await); /// } /// ``` /// // This is because although the output types for both futures is the same, the exact future // types are different, but the compiler must be able to choose a single type for the // `result` variable. /// /// When the output type is the same, we can wrap each future in `Either` to avoid the /// issue: /// /// ``` /// use tokio_util::either::Either; /// # fn some_condition() -> bool { true } /// # async fn some_async_function() -> u32 { 10 } /// # async fn other_async_function() -> u32 { 20 } /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let result = if some_condition() { /// Either::Left(some_async_function()) /// } else { /// Either::Right(other_async_function()) /// }; /// /// let value = result.await; /// println!("Result is {}", value); /// # assert_eq!(value, 10); /// # } /// ``` #[allow(missing_docs)] // Doc-comments for variants in this particular case don't make much sense. #[derive(Debug, Clone)] pub enum Either<L, R> { Left(L), Right(R), } /// A small helper macro which reduces amount of boilerplate in the actual trait method implementation. /// It takes an invocation of method as an argument (e.g. `self.poll(cx)`), and redirects it to either /// enum variant held in `self`. macro_rules! delegate_call { ($self:ident.$method:ident($($args:ident),+)) => { unsafe { match $self.get_unchecked_mut() { Self::Left(l) => Pin::new_unchecked(l).$method($($args),+), Self::Right(r) => Pin::new_unchecked(r).$method($($args),+), } } } } impl<L, R, O> Future for Either<L, R> where L: Future<Output = O>, R: Future<Output = O>, { type Output = O; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { delegate_call!(self.poll(cx)) } } impl<L, R> AsyncRead for Either<L, R> where L: AsyncRead, R: AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<Result<()>> { delegate_call!(self.poll_read(cx, buf)) } } impl<L, R> AsyncBufRead for Either<L, R> where L: AsyncBufRead, R: AsyncBufRead, { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> { delegate_call!(self.poll_fill_buf(cx)) } fn consume(self: Pin<&mut Self>, amt: usize) { delegate_call!(self.consume(amt)); } } impl<L, R> AsyncSeek for Either<L, R> where L: AsyncSeek, R: AsyncSeek, { fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> Result<()> { delegate_call!(self.start_seek(position)) } fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<u64>> { delegate_call!(self.poll_complete(cx)) } } impl<L, R> AsyncWrite for Either<L, R> where L: AsyncWrite, R: AsyncWrite, { fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> { delegate_call!(self.poll_write(cx, buf)) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> { delegate_call!(self.poll_flush(cx)) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> { delegate_call!(self.poll_shutdown(cx)) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll<std::result::Result<usize, std::io::Error>> { delegate_call!(self.poll_write_vectored(cx, bufs)) } fn is_write_vectored(&self) -> bool { match self { Self::Left(l) => l.is_write_vectored(), Self::Right(r) => r.is_write_vectored(), } } } impl<L, R> futures_core::stream::Stream for Either<L, R> where L: futures_core::stream::Stream, R: futures_core::stream::Stream<Item = L::Item>, { type Item = L::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { delegate_call!(self.poll_next(cx)) } } impl<L, R, Item, Error> futures_sink::Sink<Item> for Either<L, R> where L: futures_sink::Sink<Item, Error = Error>, R: futures_sink::Sink<Item, Error = Error>, { type Error = Error; fn poll_ready( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<std::result::Result<(), Self::Error>> { delegate_call!(self.poll_ready(cx)) } fn start_send(self: Pin<&mut Self>, item: Item) -> std::result::Result<(), Self::Error> { delegate_call!(self.start_send(item)) } fn poll_flush( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<std::result::Result<(), Self::Error>> { delegate_call!(self.poll_flush(cx)) } fn poll_close( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<std::result::Result<(), Self::Error>> { delegate_call!(self.poll_close(cx)) } } #[cfg(all(test, not(loom)))] mod tests { use super::*; use tokio::io::{repeat, AsyncReadExt, Repeat}; use tokio_stream::{once, Once, StreamExt}; #[tokio::test] async fn either_is_stream() { let mut either: Either<Once<u32>, Once<u32>> = Either::Left(once(1)); assert_eq!(Some(1u32), either.next().await); } #[tokio::test] async fn either_is_async_read() { let mut buffer = [0; 3]; let mut either: Either<Repeat, Repeat> = Either::Right(repeat(0b101)); either.read_exact(&mut buffer).await.unwrap(); assert_eq!(buffer, [0b101, 0b101, 0b101]); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/tracing.rs
tokio-util/src/tracing.rs
macro_rules! trace { ($($arg:tt)*) => { #[cfg(feature = "tracing")] tracing::trace!($($arg)*); }; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/context.rs
tokio-util/src/context.rs
//! Tokio context aware futures utilities. //! //! This module includes utilities around integrating tokio with other runtimes //! by allowing the context to be attached to futures. This allows spawning //! futures on other executors while still using tokio to drive them. This //! can be useful if you need to use a tokio based library in an executor/runtime //! that does not provide a tokio context. use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; use tokio::runtime::{Handle, Runtime}; pin_project! { /// `TokioContext` allows running futures that must be inside Tokio's /// context on a non-Tokio runtime. /// /// It contains a [`Handle`] to the runtime. A handle to the runtime can be /// obtain by calling the [`Runtime::handle()`] method. /// /// Note that the `TokioContext` wrapper only works if the `Runtime` it is /// connected to has not yet been destroyed. You must keep the `Runtime` /// alive until the future has finished executing. /// /// **Warning:** If `TokioContext` is used together with a [current thread] /// runtime, that runtime must be inside a call to `block_on` for the /// wrapped future to work. For this reason, it is recommended to use a /// [multi thread] runtime, even if you configure it to only spawn one /// worker thread. /// /// # Examples /// /// This example creates two runtimes, but only [enables time] on one of /// them. It then uses the context of the runtime with the timer enabled to /// execute a [`sleep`] future on the runtime with timing disabled. /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::time::{sleep, Duration}; /// use tokio_util::context::RuntimeExt; /// /// // This runtime has timers enabled. /// let rt = tokio::runtime::Builder::new_multi_thread() /// .enable_all() /// .build() /// .unwrap(); /// /// // This runtime has timers disabled. /// let rt2 = tokio::runtime::Builder::new_multi_thread() /// .build() /// .unwrap(); /// /// // Wrap the sleep future in the context of rt. /// let fut = rt.wrap(async { sleep(Duration::from_millis(2)).await }); /// /// // Execute the future on rt2. /// rt2.block_on(fut); /// # } /// ``` /// /// [`Handle`]: struct@tokio::runtime::Handle /// [`Runtime::handle()`]: fn@tokio::runtime::Runtime::handle /// [`RuntimeExt`]: trait@crate::context::RuntimeExt /// [`new_static`]: fn@Self::new_static /// [`sleep`]: fn@tokio::time::sleep /// [current thread]: fn@tokio::runtime::Builder::new_current_thread /// [enables time]: fn@tokio::runtime::Builder::enable_time /// [multi thread]: fn@tokio::runtime::Builder::new_multi_thread pub struct TokioContext<F> { #[pin] inner: F, handle: Handle, } } impl<F> TokioContext<F> { /// Associate the provided future with the context of the runtime behind /// the provided `Handle`. /// /// This constructor uses a `'static` lifetime to opt-out of checking that /// the runtime still exists. /// /// # Examples /// /// This is the same as the example above, but uses the `new` constructor /// rather than [`RuntimeExt::wrap`]. /// /// [`RuntimeExt::wrap`]: fn@RuntimeExt::wrap /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::time::{sleep, Duration}; /// use tokio_util::context::TokioContext; /// /// // This runtime has timers enabled. /// let rt = tokio::runtime::Builder::new_multi_thread() /// .enable_all() /// .build() /// .unwrap(); /// /// // This runtime has timers disabled. /// let rt2 = tokio::runtime::Builder::new_multi_thread() /// .build() /// .unwrap(); /// /// let fut = TokioContext::new( /// async { sleep(Duration::from_millis(2)).await }, /// rt.handle().clone(), /// ); /// /// // Execute the future on rt2. /// rt2.block_on(fut); /// # } /// ``` pub fn new(future: F, handle: Handle) -> TokioContext<F> { TokioContext { inner: future, handle, } } /// Obtain a reference to the handle inside this `TokioContext`. pub fn handle(&self) -> &Handle { &self.handle } /// Remove the association between the Tokio runtime and the wrapped future. pub fn into_inner(self) -> F { self.inner } } impl<F: Future> Future for TokioContext<F> { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let me = self.project(); let handle = me.handle; let fut = me.inner; let _enter = handle.enter(); fut.poll(cx) } } /// Extension trait that simplifies bundling a `Handle` with a `Future`. pub trait RuntimeExt { /// Create a [`TokioContext`] that wraps the provided future and runs it in /// this runtime's context. /// /// # Examples /// /// This example creates two runtimes, but only [enables time] on one of /// them. It then uses the context of the runtime with the timer enabled to /// execute a [`sleep`] future on the runtime with timing disabled. /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::time::{sleep, Duration}; /// use tokio_util::context::RuntimeExt; /// /// // This runtime has timers enabled. /// let rt = tokio::runtime::Builder::new_multi_thread() /// .enable_all() /// .build() /// .unwrap(); /// /// // This runtime has timers disabled. /// let rt2 = tokio::runtime::Builder::new_multi_thread() /// .build() /// .unwrap(); /// /// // Wrap the sleep future in the context of rt. /// let fut = rt.wrap(async { sleep(Duration::from_millis(2)).await }); /// /// // Execute the future on rt2. /// rt2.block_on(fut); /// # } /// ``` /// /// [`TokioContext`]: struct@crate::context::TokioContext /// [`sleep`]: fn@tokio::time::sleep /// [enables time]: fn@tokio::runtime::Builder::enable_time fn wrap<F: Future>(&self, fut: F) -> TokioContext<F>; } impl RuntimeExt for Runtime { fn wrap<F: Future>(&self, fut: F) -> TokioContext<F> { TokioContext { inner: fut, handle: self.handle().clone(), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/cfg.rs
tokio-util/src/cfg.rs
macro_rules! cfg_codec { ($($item:item)*) => { $( #[cfg(feature = "codec")] #[cfg_attr(docsrs, doc(cfg(feature = "codec")))] $item )* } } macro_rules! cfg_compat { ($($item:item)*) => { $( #[cfg(feature = "compat")] #[cfg_attr(docsrs, doc(cfg(feature = "compat")))] $item )* } } macro_rules! cfg_net { ($($item:item)*) => { $( #[cfg(all(feature = "net", feature = "codec"))] #[cfg_attr(docsrs, doc(cfg(all(feature = "net", feature = "codec"))))] $item )* } } macro_rules! cfg_io { ($($item:item)*) => { $( #[cfg(feature = "io")] #[cfg_attr(docsrs, doc(cfg(feature = "io")))] $item )* } } cfg_io! { macro_rules! cfg_io_util { ($($item:item)*) => { $( #[cfg(feature = "io-util")] #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] $item )* } } } macro_rules! cfg_rt { ($($item:item)*) => { $( #[cfg(feature = "rt")] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] $item )* } } macro_rules! cfg_not_rt { ($($item:item)*) => { $( #[cfg(not(feature = "rt"))] $item )* } } macro_rules! cfg_time { ($($item:item)*) => { $( #[cfg(feature = "time")] #[cfg_attr(docsrs, doc(cfg(feature = "time")))] $item )* } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/compat.rs
tokio-util/src/compat.rs
//! Compatibility between the `tokio::io` and `futures-io` versions of the //! `AsyncRead` and `AsyncWrite` traits. //! //! ## Bridging Tokio and Futures I/O with `compat()` //! //! The [`compat()`] function provides a compatibility layer that allows types implementing //! [`tokio::io::AsyncRead`] or [`tokio::io::AsyncWrite`] to be used as their //! [`futures::io::AsyncRead`] or [`futures::io::AsyncWrite`] counterparts — and vice versa. //! //! This is especially useful when working with libraries that expect I/O types from one ecosystem //! (usually `futures`) but you are using types from the other (usually `tokio`). //! //! ## Compatibility Overview //! //! | Inner Type Implements... | `Compat<T>` Implements... | //! |-----------------------------|-----------------------------| //! | [`tokio::io::AsyncRead`] | [`futures::io::AsyncRead`] | //! | [`futures::io::AsyncRead`] | [`tokio::io::AsyncRead`] | //! | [`tokio::io::AsyncWrite`] | [`futures::io::AsyncWrite`] | //! | [`futures::io::AsyncWrite`] | [`tokio::io::AsyncWrite`] | //! //! ## Feature Flag //! //! This functionality is available through the `compat` feature flag: //! //! ```toml //! tokio-util = { version = "...", features = ["compat"] } //! ``` //! //! ## Example 1: Tokio -> Futures (`AsyncRead`) //! //! This example demonstrates sending data over a [`tokio::net::TcpStream`] and using //! [`futures::io::AsyncReadExt::read`] from the `futures` crate to read it after adapting the //! stream via [`compat()`]. //! //! ```no_run //! # #[cfg(not(target_family = "wasm"))] //! # { //! use tokio::net::{TcpListener, TcpStream}; //! use tokio::io::AsyncWriteExt; //! use tokio_util::compat::TokioAsyncReadCompatExt; //! use futures::io::AsyncReadExt; //! //! #[tokio::main] //! async fn main() -> std::io::Result<()> { //! let listener = TcpListener::bind("127.0.0.1:8081").await?; //! //! tokio::spawn(async { //! let mut client = TcpStream::connect("127.0.0.1:8081").await.unwrap(); //! client.write_all(b"Hello World").await.unwrap(); //! }); //! //! let (stream, _) = listener.accept().await?; //! //! // Adapt `tokio::TcpStream` to be used with `futures::io::AsyncReadExt` //! let mut compat_stream = stream.compat(); //! let mut buffer = [0; 20]; //! let n = compat_stream.read(&mut buffer).await?; //! println!("Received: {}", String::from_utf8_lossy(&buffer[..n])); //! //! Ok(()) //! } //! # } //! ``` //! //! ## Example 2: Futures -> Tokio (`AsyncRead`) //! //! The reverse is also possible: you can take a [`futures::io::AsyncRead`] (e.g. a cursor) and //! adapt it to be used with [`tokio::io::AsyncReadExt::read_to_end`] //! //! ``` //! # #[cfg(not(target_family = "wasm"))] //! # { //! use futures::io::Cursor; //! use tokio_util::compat::FuturesAsyncReadCompatExt; //! use tokio::io::AsyncReadExt; //! //! fn main() { //! let future = async { //! let reader = Cursor::new(b"Hello from futures"); //! let mut compat_reader = reader.compat(); //! let mut buf = Vec::new(); //! compat_reader.read_to_end(&mut buf).await.unwrap(); //! assert_eq!(&buf, b"Hello from futures"); //! }; //! //! // Run the future inside a Tokio runtime //! tokio::runtime::Runtime::new().unwrap().block_on(future); //! } //! # } //! ``` //! //! ## Common Use Cases //! //! - Using `tokio` sockets with `async-tungstenite`, `async-compression`, or `futures-rs`-based //! libraries. //! - Bridging I/O interfaces between mixed-ecosystem libraries. //! - Avoiding rewrites or duplication of I/O code in async environments. //! //! ## See Also //! //! - [`Compat`] type //! - [`TokioAsyncReadCompatExt`] //! - [`FuturesAsyncReadCompatExt`] //! - [`tokio::io`] //! - [`futures::io`] //! //! [`futures::io`]: https://docs.rs/futures/latest/futures/io/ //! [`futures::io::AsyncRead`]: https://docs.rs/futures/latest/futures/io/trait.AsyncRead.html //! [`futures::io::AsyncWrite`]: https://docs.rs/futures/latest/futures/io/trait.AsyncWrite.html //! [`futures::io::AsyncReadExt::read`]: https://docs.rs/futures/latest/futures/io/trait.AsyncReadExt.html#method.read //! [`compat()`]: TokioAsyncReadCompatExt::compat use pin_project_lite::pin_project; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { /// A compatibility layer that allows conversion between the /// `tokio::io` and `futures-io` `AsyncRead` and `AsyncWrite` traits. #[derive(Copy, Clone, Debug)] pub struct Compat<T> { #[pin] inner: T, seek_pos: Option<io::SeekFrom>, } } /// Extension trait that allows converting a type implementing /// `futures_io::AsyncRead` to implement `tokio::io::AsyncRead`. pub trait FuturesAsyncReadCompatExt: futures_io::AsyncRead { /// Wraps `self` with a compatibility layer that implements /// `tokio_io::AsyncRead`. fn compat(self) -> Compat<Self> where Self: Sized, { Compat::new(self) } } impl<T: futures_io::AsyncRead> FuturesAsyncReadCompatExt for T {} /// Extension trait that allows converting a type implementing /// `futures_io::AsyncWrite` to implement `tokio::io::AsyncWrite`. pub trait FuturesAsyncWriteCompatExt: futures_io::AsyncWrite { /// Wraps `self` with a compatibility layer that implements /// `tokio::io::AsyncWrite`. fn compat_write(self) -> Compat<Self> where Self: Sized, { Compat::new(self) } } impl<T: futures_io::AsyncWrite> FuturesAsyncWriteCompatExt for T {} /// Extension trait that allows converting a type implementing /// `tokio::io::AsyncRead` to implement `futures_io::AsyncRead`. pub trait TokioAsyncReadCompatExt: tokio::io::AsyncRead { /// Wraps `self` with a compatibility layer that implements /// `futures_io::AsyncRead`. fn compat(self) -> Compat<Self> where Self: Sized, { Compat::new(self) } } impl<T: tokio::io::AsyncRead> TokioAsyncReadCompatExt for T {} /// Extension trait that allows converting a type implementing /// `tokio::io::AsyncWrite` to implement `futures_io::AsyncWrite`. pub trait TokioAsyncWriteCompatExt: tokio::io::AsyncWrite { /// Wraps `self` with a compatibility layer that implements /// `futures_io::AsyncWrite`. fn compat_write(self) -> Compat<Self> where Self: Sized, { Compat::new(self) } } impl<T: tokio::io::AsyncWrite> TokioAsyncWriteCompatExt for T {} // === impl Compat === impl<T> Compat<T> { fn new(inner: T) -> Self { Self { inner, seek_pos: None, } } /// Get a reference to the `Future`, `Stream`, `AsyncRead`, or `AsyncWrite` object /// contained within. pub fn get_ref(&self) -> &T { &self.inner } /// Get a mutable reference to the `Future`, `Stream`, `AsyncRead`, or `AsyncWrite` object /// contained within. pub fn get_mut(&mut self) -> &mut T { &mut self.inner } /// Returns the wrapped item. pub fn into_inner(self) -> T { self.inner } } impl<T> tokio::io::AsyncRead for Compat<T> where T: futures_io::AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> Poll<io::Result<()>> { // We can't trust the inner type to not peak at the bytes, // so we must defensively initialize the buffer. let slice = buf.initialize_unfilled(); let n = ready!(futures_io::AsyncRead::poll_read( self.project().inner, cx, slice ))?; buf.advance(n); Poll::Ready(Ok(())) } } impl<T> futures_io::AsyncRead for Compat<T> where T: tokio::io::AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, slice: &mut [u8], ) -> Poll<io::Result<usize>> { let mut buf = tokio::io::ReadBuf::new(slice); ready!(tokio::io::AsyncRead::poll_read( self.project().inner, cx, &mut buf ))?; Poll::Ready(Ok(buf.filled().len())) } } impl<T> tokio::io::AsyncBufRead for Compat<T> where T: futures_io::AsyncBufRead, { fn poll_fill_buf<'a>( self: Pin<&'a mut Self>, cx: &mut Context<'_>, ) -> Poll<io::Result<&'a [u8]>> { futures_io::AsyncBufRead::poll_fill_buf(self.project().inner, cx) } fn consume(self: Pin<&mut Self>, amt: usize) { futures_io::AsyncBufRead::consume(self.project().inner, amt) } } impl<T> futures_io::AsyncBufRead for Compat<T> where T: tokio::io::AsyncBufRead, { fn poll_fill_buf<'a>( self: Pin<&'a mut Self>, cx: &mut Context<'_>, ) -> Poll<io::Result<&'a [u8]>> { tokio::io::AsyncBufRead::poll_fill_buf(self.project().inner, cx) } fn consume(self: Pin<&mut Self>, amt: usize) { tokio::io::AsyncBufRead::consume(self.project().inner, amt) } } impl<T> tokio::io::AsyncWrite for Compat<T> where T: futures_io::AsyncWrite, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { futures_io::AsyncWrite::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { futures_io::AsyncWrite::poll_flush(self.project().inner, cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { futures_io::AsyncWrite::poll_close(self.project().inner, cx) } } impl<T> futures_io::AsyncWrite for Compat<T> where T: tokio::io::AsyncWrite, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { tokio::io::AsyncWrite::poll_flush(self.project().inner, cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) } } impl<T: tokio::io::AsyncSeek> futures_io::AsyncSeek for Compat<T> { fn poll_seek( mut self: Pin<&mut Self>, cx: &mut Context<'_>, pos: io::SeekFrom, ) -> Poll<io::Result<u64>> { if self.seek_pos != Some(pos) { // Ensure previous seeks have finished before starting a new one ready!(self.as_mut().project().inner.poll_complete(cx))?; self.as_mut().project().inner.start_seek(pos)?; *self.as_mut().project().seek_pos = Some(pos); } let res = ready!(self.as_mut().project().inner.poll_complete(cx)); *self.as_mut().project().seek_pos = None; Poll::Ready(res) } } impl<T: futures_io::AsyncSeek> tokio::io::AsyncSeek for Compat<T> { fn start_seek(mut self: Pin<&mut Self>, pos: io::SeekFrom) -> io::Result<()> { *self.as_mut().project().seek_pos = Some(pos); Ok(()) } fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { let pos = match self.seek_pos { None => { // tokio 1.x AsyncSeek recommends calling poll_complete before start_seek. // We don't have to guarantee that the value returned by // poll_complete called without start_seek is correct, // so we'll return 0. return Poll::Ready(Ok(0)); } Some(pos) => pos, }; let res = ready!(self.as_mut().project().inner.poll_seek(cx, pos)); *self.as_mut().project().seek_pos = None; Poll::Ready(res) } } #[cfg(unix)] impl<T: std::os::unix::io::AsRawFd> std::os::unix::io::AsRawFd for Compat<T> { fn as_raw_fd(&self) -> std::os::unix::io::RawFd { self.inner.as_raw_fd() } } #[cfg(windows)] impl<T: std::os::windows::io::AsRawHandle> std::os::windows::io::AsRawHandle for Compat<T> { fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { self.inner.as_raw_handle() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/framed_write.rs
tokio-util/src/codec/framed_write.rs
use crate::codec::encoder::Encoder; use crate::codec::framed_impl::{FramedImpl, WriteFrame}; use futures_core::Stream; use tokio::io::AsyncWrite; use bytes::BytesMut; use futures_sink::Sink; use pin_project_lite::pin_project; use std::fmt; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; use super::FramedParts; pin_project! { /// A [`Sink`] of frames encoded to an `AsyncWrite`. /// /// For examples of how to use `FramedWrite` with a codec, see the /// examples on the [`codec`] module. /// /// # Cancellation safety /// /// * [`futures_util::sink::SinkExt::send`]: if send is used as the event in a /// `tokio::select!` statement and some other branch completes first, then it is /// guaranteed that the message was not sent, but the message itself is lost. /// /// [`Sink`]: futures_sink::Sink /// [`codec`]: crate::codec /// [`futures_util::sink::SinkExt::send`]: futures_util::sink::SinkExt::send pub struct FramedWrite<T, E> { #[pin] inner: FramedImpl<T, E, WriteFrame>, } } impl<T, E> FramedWrite<T, E> { /// Creates a new `FramedWrite` with the given `encoder`. pub fn new(inner: T, encoder: E) -> FramedWrite<T, E> { FramedWrite { inner: FramedImpl { inner, codec: encoder, state: WriteFrame::default(), }, } } /// Creates a new `FramedWrite` with the given `encoder` and a buffer of `capacity` /// initial size. pub fn with_capacity(inner: T, encoder: E, capacity: usize) -> FramedWrite<T, E> { FramedWrite { inner: FramedImpl { inner, codec: encoder, state: WriteFrame { buffer: BytesMut::with_capacity(capacity), backpressure_boundary: capacity, }, }, } } /// Returns a reference to the underlying I/O stream wrapped by /// `FramedWrite`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_ref(&self) -> &T { &self.inner.inner } /// Returns a mutable reference to the underlying I/O stream wrapped by /// `FramedWrite`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_mut(&mut self) -> &mut T { &mut self.inner.inner } /// Returns a pinned mutable reference to the underlying I/O stream wrapped by /// `FramedWrite`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { self.project().inner.project().inner } /// Consumes the `FramedWrite`, returning its underlying I/O stream. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_inner(self) -> T { self.inner.inner } /// Returns a reference to the underlying encoder. pub fn encoder(&self) -> &E { &self.inner.codec } /// Returns a mutable reference to the underlying encoder. pub fn encoder_mut(&mut self) -> &mut E { &mut self.inner.codec } /// Maps the encoder `E` to `C`, preserving the write buffer /// wrapped by `Framed`. pub fn map_encoder<C, F>(self, map: F) -> FramedWrite<T, C> where F: FnOnce(E) -> C, { // This could be potentially simplified once rust-lang/rust#86555 hits stable let FramedImpl { inner, state, codec, } = self.inner; FramedWrite { inner: FramedImpl { inner, state, codec: map(codec), }, } } /// Returns a mutable reference to the underlying encoder. pub fn encoder_pin_mut(self: Pin<&mut Self>) -> &mut E { self.project().inner.project().codec } /// Returns a reference to the write buffer. pub fn write_buffer(&self) -> &BytesMut { &self.inner.state.buffer } /// Returns a mutable reference to the write buffer. pub fn write_buffer_mut(&mut self) -> &mut BytesMut { &mut self.inner.state.buffer } /// Returns backpressure boundary pub fn backpressure_boundary(&self) -> usize { self.inner.state.backpressure_boundary } /// Updates backpressure boundary pub fn set_backpressure_boundary(&mut self, boundary: usize) { self.inner.state.backpressure_boundary = boundary; } /// Consumes the `FramedWrite`, returning its underlying I/O stream, the buffer /// with unprocessed data, and the codec. pub fn into_parts(self) -> FramedParts<T, E> { FramedParts { io: self.inner.inner, codec: self.inner.codec, read_buf: BytesMut::new(), write_buf: self.inner.state.buffer, _priv: (), } } } // This impl just defers to the underlying FramedImpl impl<T, I, E> Sink<I> for FramedWrite<T, E> where T: AsyncWrite, E: Encoder<I>, E::Error: From<io::Error>, { type Error = E::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { self.project().inner.start_send(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_close(cx) } } // This impl just defers to the underlying T: Stream impl<T, D> Stream for FramedWrite<T, D> where T: Stream, { type Item = T::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.project().inner.project().inner.poll_next(cx) } } impl<T, U> fmt::Debug for FramedWrite<T, U> where T: fmt::Debug, U: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FramedWrite") .field("inner", &self.get_ref()) .field("encoder", &self.encoder()) .field("buffer", &self.inner.state.buffer) .finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/encoder.rs
tokio-util/src/codec/encoder.rs
use bytes::BytesMut; use std::io; /// Trait of helper objects to write out messages as bytes, for use with /// [`FramedWrite`]. /// /// [`FramedWrite`]: crate::codec::FramedWrite pub trait Encoder<Item> { /// The type of encoding errors. /// /// [`FramedWrite`] requires `Encoder`s errors to implement `From<io::Error>` /// in the interest of letting it return `Error`s directly. /// /// [`FramedWrite`]: crate::codec::FramedWrite type Error: From<io::Error>; /// Encodes a frame into the buffer provided. /// /// This method will encode `item` into the byte buffer provided by `dst`. /// The `dst` provided is an internal buffer of the [`FramedWrite`] instance and /// will be written out when possible. /// /// [`FramedWrite`]: crate::codec::FramedWrite fn encode(&mut self, item: Item, dst: &mut BytesMut) -> Result<(), Self::Error>; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/length_delimited.rs
tokio-util/src/codec/length_delimited.rs
//! Frame a stream of bytes based on a length prefix //! //! Many protocols delimit their frames by prefacing frame data with a //! frame head that specifies the length of the frame. The //! `length_delimited` module provides utilities for handling the length //! based framing. This allows the consumer to work with entire frames //! without having to worry about buffering or other framing logic. //! //! # Getting started //! //! If implementing a protocol from scratch, using length delimited framing //! is an easy way to get started. [`LengthDelimitedCodec::new()`] will //! return a length delimited codec using default configuration values. //! This can then be used to construct a framer to adapt a full-duplex //! byte stream into a stream of frames. //! //! ``` //! use tokio::io::{AsyncRead, AsyncWrite}; //! use tokio_util::codec::{Framed, LengthDelimitedCodec}; //! //! fn bind_transport<T: AsyncRead + AsyncWrite>(io: T) //! -> Framed<T, LengthDelimitedCodec> //! { //! Framed::new(io, LengthDelimitedCodec::new()) //! } //! # pub fn main() {} //! ``` //! //! The returned transport implements `Sink + Stream` for `BytesMut`. It //! encodes the frame with a big-endian `u32` header denoting the frame //! payload length: //! //! ```text //! +----------+--------------------------------+ //! | len: u32 | frame payload | //! +----------+--------------------------------+ //! ``` //! //! Specifically, given the following: //! //! ``` //! use tokio::io::{AsyncRead, AsyncWrite}; //! use tokio_util::codec::{Framed, LengthDelimitedCodec}; //! //! use futures::SinkExt; //! use bytes::Bytes; //! //! async fn write_frame<T>(io: T) -> Result<(), Box<dyn std::error::Error>> //! where //! T: AsyncRead + AsyncWrite + Unpin, //! { //! let mut transport = Framed::new(io, LengthDelimitedCodec::new()); //! let frame = Bytes::from("hello world"); //! //! transport.send(frame).await?; //! Ok(()) //! } //! ``` //! //! The encoded frame will look like this: //! //! ```text //! +---- len: u32 ----+---- data ----+ //! | \x00\x00\x00\x0b | hello world | //! +------------------+--------------+ //! ``` //! //! # Decoding //! //! [`FramedRead`] adapts an [`AsyncRead`] into a `Stream` of [`BytesMut`], //! such that each yielded [`BytesMut`] value contains the contents of an //! entire frame. There are many configuration parameters enabling //! [`FramedRead`] to handle a wide range of protocols. Here are some //! examples that will cover the various options at a high level. //! //! ## Example 1 //! //! The following will parse a `u16` length field at offset 0, omitting the //! frame head in the yielded `BytesMut`. //! //! ``` //! # use tokio_stream::StreamExt; //! # use tokio_util::codec::LengthDelimitedCodec; //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! # let io: &[u8] = b"\x00\x0BHello world"; //! let mut reader = LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value //! .length_field_type::<u16>() //! .length_adjustment(0) // default value //! .new_read(io); //! # let res = reader.next().await.unwrap().unwrap().to_vec(); //! # assert_eq!(res, b"Hello world"); //! # } //! ``` //! //! The following frame will be decoded as such: //! //! ```text //! INPUT DECODED //! +-- len ---+--- Payload ---+ +--- Payload ---+ //! | \x00\x0B | Hello world | --> | Hello world | //! +----------+---------------+ +---------------+ //! ``` //! //! The value of the length field is 11 (`\x0B`) which represents the length //! of the payload, `hello world`. By default, [`FramedRead`] assumes that //! the length field represents the number of bytes that **follows** the //! length field. Thus, the entire frame has a length of 13: 2 bytes for the //! frame head + 11 bytes for the payload. //! //! ## Example 2 //! //! The following will parse a `u16` length field at offset 0, including the //! frame head in the yielded `BytesMut`. //! //! ``` //! # use tokio_stream::StreamExt; //! # use tokio_util::codec::LengthDelimitedCodec; //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! # let io: &[u8] = b"\x00\x0BHello world"; //! let mut reader = LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value //! .length_field_type::<u16>() //! .length_adjustment(2) // Add head size to length //! .num_skip(0) // Do NOT skip the head //! .new_read(io); //! # let res = reader.next().await.unwrap().unwrap().to_vec(); //! # assert_eq!(res, b"\x00\x0BHello world"); //! # } //! ``` //! //! The following frame will be decoded as such: //! //! ```text //! INPUT DECODED //! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+ //! | \x00\x0B | Hello world | --> | \x00\x0B | Hello world | //! +----------+---------------+ +----------+---------------+ //! ``` //! //! This is similar to the first example, the only difference is that the //! frame head is **included** in the yielded `BytesMut` value. To achieve //! this, we need to add the header size to the length with `length_adjustment`, //! and set `num_skip` to `0` to prevent skipping the head. //! //! ## Example 3 //! //! The following will parse a `u16` length field at offset 0, omitting the //! frame head in the yielded `BytesMut`. In this case, the length field //! **includes** the frame head length. //! //! ``` //! # use tokio_stream::StreamExt; //! # use tokio_util::codec::LengthDelimitedCodec; //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! # let io: &[u8] = b"\x00\x0DHello world"; //! let mut reader = LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value //! .length_field_type::<u16>() //! .length_adjustment(-2) // size of head //! .new_read(io); //! # let res = reader.next().await.unwrap().unwrap().to_vec(); //! # assert_eq!(res, b"Hello world"); //! # } //! ``` //! //! The following frame will be decoded as such: //! //! ```text //! INPUT DECODED //! +-- len ---+--- Payload ---+ +--- Payload ---+ //! | \x00\x0D | Hello world | --> | Hello world | //! +----------+---------------+ +---------------+ //! ``` //! //! In most cases, the length field represents the length of the payload //! only, as shown in the previous examples. However, in some protocols the //! length field represents the length of the whole frame, including the //! head. In such cases, we specify a negative `length_adjustment` to adjust //! the value provided in the frame head to represent the payload length. //! //! ## Example 4 //! //! The following will parse a 3 byte length field at offset 0 in a 5 byte //! frame head, including the frame head in the yielded `BytesMut`. //! //! ``` //! # use tokio_stream::StreamExt; //! # use tokio_util::codec::LengthDelimitedCodec; //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! # let io: &[u8] = b"\x00\x00\x0B\xCA\xFEHello world"; //! let mut reader = LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value //! .length_field_length(3) //! .length_adjustment(3 + 2) // len field and remaining head //! .num_skip(0) //! .new_read(io); //! # let res = reader.next().await.unwrap().unwrap().to_vec(); //! # assert_eq!(res, b"\x00\x00\x0B\xCA\xFEHello world"); //! # } //! ``` //! //! The following frame will be decoded as such: //! //! ```text //! INPUT //! +---- len -----+- head -+--- Payload ---+ //! | \x00\x00\x0B | \xCAFE | Hello world | //! +--------------+--------+---------------+ //! //! DECODED //! +---- len -----+- head -+--- Payload ---+ //! | \x00\x00\x0B | \xCAFE | Hello world | //! +--------------+--------+---------------+ //! ``` //! //! A more advanced example that shows a case where there is extra frame //! head data between the length field and the payload. In such cases, it is //! usually desirable to include the frame head as part of the yielded //! `BytesMut`. This lets consumers of the length delimited framer to //! process the frame head as needed. //! //! The positive `length_adjustment` value lets `FramedRead` factor in the //! additional head into the frame length calculation. //! //! ## Example 5 //! //! The following will parse a `u16` length field at offset 1 of a 4 byte //! frame head. The first byte and the length field will be omitted from the //! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be //! included. //! //! ``` //! # use tokio_stream::StreamExt; //! # use tokio_util::codec::LengthDelimitedCodec; //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! # let io: &[u8] = b"\xCA\x00\x0B\xFEHello world"; //! let mut reader = LengthDelimitedCodec::builder() //! .length_field_offset(1) // length of hdr1 //! .length_field_type::<u16>() //! .length_adjustment(1) // length of hdr2 //! .num_skip(3) // length of hdr1 + LEN //! .new_read(io); //! # let res = reader.next().await.unwrap().unwrap().to_vec(); //! # assert_eq!(res, b"\xFEHello world"); //! # } //! ``` //! //! The following frame will be decoded as such: //! //! ```text //! INPUT //! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+ //! | \xCA | \x00\x0B | \xFE | Hello world | //! +--------+----------+--------+---------------+ //! //! DECODED //! +- hdr2 -+--- Payload ---+ //! | \xFE | Hello world | //! +--------+---------------+ //! ``` //! //! The length field is situated in the middle of the frame head. In this //! case, the first byte in the frame head could be a version or some other //! identifier that is not needed for processing. On the other hand, the //! second half of the head is needed. //! //! `length_field_offset` indicates how many bytes to skip before starting //! to read the length field. `length_adjustment` is the number of bytes to //! skip starting at the end of the length field. In this case, it is the //! second half of the head. //! //! ## Example 6 //! //! The following will parse a `u16` length field at offset 1 of a 4 byte //! frame head. The first byte and the length field will be omitted from the //! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be //! included. In this case, the length field **includes** the frame head //! length. //! //! ``` //! # use tokio_stream::StreamExt; //! # use tokio_util::codec::LengthDelimitedCodec; //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! # let io: &[u8] = b"\xCA\x00\x0F\xFEHello world"; //! let mut reader = LengthDelimitedCodec::builder() //! .length_field_offset(1) // length of hdr1 //! .length_field_type::<u16>() //! .length_adjustment(-3) // length of hdr1 + LEN, negative //! .num_skip(3) //! .new_read(io); //! # let res = reader.next().await.unwrap().unwrap().to_vec(); //! # assert_eq!(res, b"\xFEHello world"); //! # } //! ``` //! //! The following frame will be decoded as such: //! //! ```text //! INPUT //! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+ //! | \xCA | \x00\x0F | \xFE | Hello world | //! +--------+----------+--------+---------------+ //! //! DECODED //! +- hdr2 -+--- Payload ---+ //! | \xFE | Hello world | //! +--------+---------------+ //! ``` //! //! Similar to the example above, the difference is that the length field //! represents the length of the entire frame instead of just the payload. //! The length of `hdr1` and `len` must be counted in `length_adjustment`. //! Note that the length of `hdr2` does **not** need to be explicitly set //! anywhere because it already is factored into the total frame length that //! is read from the byte stream. //! //! ## Example 7 //! //! The following will parse a 3 byte length field at offset 0 in a 4 byte //! frame head, excluding the 4th byte from the yielded `BytesMut`. //! //! ``` //! # use tokio_stream::StreamExt; //! # use tokio_util::codec::LengthDelimitedCodec; //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! # let io: &[u8] = b"\x00\x00\x0B\xFFHello world"; //! let mut reader = LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value //! .length_field_length(3) //! .length_adjustment(0) // default value //! .num_skip(4) // skip the first 4 bytes //! .new_read(io); //! # let res = reader.next().await.unwrap().unwrap().to_vec(); //! # assert_eq!(res, b"Hello world"); //! # } //! ``` //! //! The following frame will be decoded as such: //! //! ```text //! INPUT DECODED //! +------- len ------+--- Payload ---+ +--- Payload ---+ //! | \x00\x00\x0B\xFF | Hello world | => | Hello world | //! +------------------+---------------+ +---------------+ //! ``` //! //! A simple example where there are unused bytes between the length field //! and the payload. //! //! # Encoding //! //! [`FramedWrite`] adapts an [`AsyncWrite`] into a `Sink` of [`BytesMut`], //! such that each submitted [`BytesMut`] is prefaced by a length field. //! There are fewer configuration options than [`FramedRead`]. Given //! protocols that have more complex frame heads, an encoder should probably //! be written by hand using [`Encoder`]. //! //! Here is a simple example, given a `FramedWrite` with the following //! configuration: //! //! ``` //! # use tokio::io::AsyncWrite; //! # use tokio_util::codec::LengthDelimitedCodec; //! # fn write_frame<T: AsyncWrite>(io: T) { //! # let _ = //! LengthDelimitedCodec::builder() //! .length_field_type::<u16>() //! .new_write(io); //! # } //! # pub fn main() {} //! ``` //! //! A payload of `hello world` will be encoded as: //! //! ```text //! +- len: u16 -+---- data ----+ //! | \x00\x0b | hello world | //! +------------+--------------+ //! ``` //! //! [`LengthDelimitedCodec::new()`]: method@LengthDelimitedCodec::new //! [`FramedRead`]: struct@FramedRead //! [`FramedWrite`]: struct@FramedWrite //! [`AsyncRead`]: trait@tokio::io::AsyncRead //! [`AsyncWrite`]: trait@tokio::io::AsyncWrite //! [`Encoder`]: trait@Encoder //! [`BytesMut`]: bytes::BytesMut use crate::codec::{Decoder, Encoder, Framed, FramedRead, FramedWrite}; use tokio::io::{AsyncRead, AsyncWrite}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use std::error::Error as StdError; use std::io::{self, Cursor}; use std::{cmp, fmt, mem}; /// Configure length delimited `LengthDelimitedCodec`s. /// /// `Builder` enables constructing configured length delimited codecs. Note /// that not all configuration settings apply to both encoding and decoding. See /// the documentation for specific methods for more detail. /// /// Note that the if the value of [`Builder::max_frame_length`] becomes larger than /// what can actually fit in [`Builder::length_field_length`], it will be clipped to /// the maximum value that can fit. #[derive(Debug, Clone, Copy)] pub struct Builder { // Maximum frame length max_frame_len: usize, // Number of bytes representing the field length length_field_len: usize, // Number of bytes in the header before the length field length_field_offset: usize, // Adjust the length specified in the header field by this amount length_adjustment: isize, // Total number of bytes to skip before reading the payload, if not set, // `length_field_len + length_field_offset` num_skip: Option<usize>, // Length field byte order (little or big endian) length_field_is_big_endian: bool, } /// An error when the number of bytes read is more than max frame length. pub struct LengthDelimitedCodecError { _priv: (), } /// A codec for frames delimited by a frame head specifying their lengths. /// /// This allows the consumer to work with entire frames without having to worry /// about buffering or other framing logic. /// /// See [module level] documentation for more detail. /// /// [module level]: index.html #[derive(Debug, Clone)] pub struct LengthDelimitedCodec { // Configuration values builder: Builder, // Read state state: DecodeState, } #[derive(Debug, Clone, Copy)] enum DecodeState { Head, Data(usize), } // ===== impl LengthDelimitedCodec ====== impl LengthDelimitedCodec { /// Creates a new `LengthDelimitedCodec` with the default configuration values. pub fn new() -> Self { Self { builder: Builder::new(), state: DecodeState::Head, } } /// Creates a new length delimited codec builder with default configuration /// values. pub fn builder() -> Builder { Builder::new() } /// Returns the current max frame setting /// /// This is the largest size this codec will accept from the wire. Larger /// frames will be rejected. pub fn max_frame_length(&self) -> usize { self.builder.max_frame_len } /// Updates the max frame setting. /// /// The change takes effect the next time a frame is decoded. In other /// words, if a frame is currently in process of being decoded with a frame /// size greater than `val` but less than the max frame length in effect /// before calling this function, then the frame will be allowed. pub fn set_max_frame_length(&mut self, val: usize) { self.builder.max_frame_length(val); } fn decode_head(&mut self, src: &mut BytesMut) -> io::Result<Option<usize>> { let head_len = self.builder.num_head_bytes(); let field_len = self.builder.length_field_len; if src.len() < head_len { // Not enough data return Ok(None); } let n = { let mut src = Cursor::new(&mut *src); // Skip the required bytes src.advance(self.builder.length_field_offset); // match endianness let n = if self.builder.length_field_is_big_endian { src.get_uint(field_len) } else { src.get_uint_le(field_len) }; if n > self.builder.max_frame_len as u64 { return Err(io::Error::new( io::ErrorKind::InvalidData, LengthDelimitedCodecError { _priv: () }, )); } // The check above ensures there is no overflow let n = n as usize; // Adjust `n` with bounds checking let n = if self.builder.length_adjustment < 0 { n.checked_sub(-self.builder.length_adjustment as usize) } else { n.checked_add(self.builder.length_adjustment as usize) }; // Error handling match n { Some(n) => n, None => { return Err(io::Error::new( io::ErrorKind::InvalidInput, "provided length would overflow after adjustment", )); } } }; src.advance(self.builder.get_num_skip()); // Ensure that the buffer has enough space to read the incoming // payload src.reserve(n.saturating_sub(src.len())); Ok(Some(n)) } fn decode_data(&self, n: usize, src: &mut BytesMut) -> Option<BytesMut> { // At this point, the buffer has already had the required capacity // reserved. All there is to do is read. if src.len() < n { return None; } Some(src.split_to(n)) } } impl Decoder for LengthDelimitedCodec { type Item = BytesMut; type Error = io::Error; fn decode(&mut self, src: &mut BytesMut) -> io::Result<Option<BytesMut>> { let n = match self.state { DecodeState::Head => match self.decode_head(src)? { Some(n) => { self.state = DecodeState::Data(n); n } None => return Ok(None), }, DecodeState::Data(n) => n, }; match self.decode_data(n, src) { Some(data) => { // Update the decode state self.state = DecodeState::Head; // Make sure the buffer has enough space to read the next head src.reserve(self.builder.num_head_bytes().saturating_sub(src.len())); Ok(Some(data)) } None => Ok(None), } } } impl Encoder<Bytes> for LengthDelimitedCodec { type Error = io::Error; fn encode(&mut self, data: Bytes, dst: &mut BytesMut) -> Result<(), io::Error> { let n = data.len(); if n > self.builder.max_frame_len { return Err(io::Error::new( io::ErrorKind::InvalidInput, LengthDelimitedCodecError { _priv: () }, )); } // Adjust `n` with bounds checking let n = if self.builder.length_adjustment < 0 { n.checked_add(-self.builder.length_adjustment as usize) } else { n.checked_sub(self.builder.length_adjustment as usize) }; let n = n.ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "provided length would overflow after adjustment", ) })?; // Reserve capacity in the destination buffer to fit the frame and // length field (plus adjustment). dst.reserve(self.builder.length_field_len + n); if self.builder.length_field_is_big_endian { dst.put_uint(n as u64, self.builder.length_field_len); } else { dst.put_uint_le(n as u64, self.builder.length_field_len); } // Write the frame to the buffer dst.extend_from_slice(&data[..]); Ok(()) } } impl Default for LengthDelimitedCodec { fn default() -> Self { Self::new() } } // ===== impl Builder ===== mod builder { /// Types that can be used with `Builder::length_field_type`. pub trait LengthFieldType {} impl LengthFieldType for u8 {} impl LengthFieldType for u16 {} impl LengthFieldType for u32 {} impl LengthFieldType for u64 {} #[cfg(any( target_pointer_width = "16", target_pointer_width = "32", target_pointer_width = "64", ))] impl LengthFieldType for usize {} } impl Builder { /// Creates a new length delimited codec builder with default configuration /// values. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_offset(0) /// .length_field_type::<u16>() /// .length_adjustment(0) /// .num_skip(0) /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn new() -> Builder { Builder { // Default max frame length of 8MB max_frame_len: 8 * 1_024 * 1_024, // Default byte length of 4 length_field_len: 4, // Default to the header field being at the start of the header. length_field_offset: 0, length_adjustment: 0, // Total number of bytes to skip before reading the payload, if not set, // `length_field_len + length_field_offset` num_skip: None, // Default to reading the length field in network (big) endian. length_field_is_big_endian: true, } } /// Read the length field as a big endian integer /// /// This is the default setting. /// /// This configuration option applies to both encoding and decoding. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .big_endian() /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn big_endian(&mut self) -> &mut Self { self.length_field_is_big_endian = true; self } /// Read the length field as a little endian integer /// /// The default setting is big endian. /// /// This configuration option applies to both encoding and decoding. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .little_endian() /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn little_endian(&mut self) -> &mut Self { self.length_field_is_big_endian = false; self } /// Read the length field as a native endian integer /// /// The default setting is big endian. /// /// This configuration option applies to both encoding and decoding. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .native_endian() /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn native_endian(&mut self) -> &mut Self { if cfg!(target_endian = "big") { self.big_endian() } else { self.little_endian() } } /// Sets the max frame length in bytes /// /// This configuration option applies to both encoding and decoding. The /// default value is 8MB. /// /// When decoding, the length field read from the byte stream is checked /// against this setting **before** any adjustments are applied. When /// encoding, the length of the submitted payload is checked against this /// setting. /// /// When frames exceed the max length, an `io::Error` with the custom value /// of the `LengthDelimitedCodecError` type will be returned. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .max_frame_length(8 * 1024 * 1024) /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn max_frame_length(&mut self, val: usize) -> &mut Self { self.max_frame_len = val; self } /// Sets the unsigned integer type used to represent the length field. /// /// The default type is [`u32`]. The max type is [`u64`] (or [`usize`] on /// 64-bit targets). /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_type::<u32>() /// .new_read(io); /// # } /// # pub fn main() {} /// ``` /// /// Unlike [`Builder::length_field_length`], this does not fail at runtime /// and instead produces a compile error: /// /// ```compile_fail /// # use tokio::io::AsyncRead; /// # use tokio_util::codec::LengthDelimitedCodec; /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_type::<u128>() /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn length_field_type<T: builder::LengthFieldType>(&mut self) -> &mut Self { self.length_field_length(mem::size_of::<T>()) } /// Sets the number of bytes used to represent the length field /// /// The default value is `4`. The max value is `8`. /// /// This configuration option applies to both encoding and decoding. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_length(4) /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn length_field_length(&mut self, val: usize) -> &mut Self { assert!(val > 0 && val <= 8, "invalid length field length"); self.length_field_len = val; self } /// Sets the number of bytes in the header before the length field /// /// This configuration option only applies to decoding. /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_offset(1) /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn length_field_offset(&mut self, val: usize) -> &mut Self { self.length_field_offset = val; self } /// Delta between the payload length specified in the header and the real /// payload length /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .length_adjustment(-2) /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn length_adjustment(&mut self, val: isize) -> &mut Self { self.length_adjustment = val; self } /// Sets the number of bytes to skip before reading the payload /// /// Default value is `length_field_len + length_field_offset` /// /// This configuration option only applies to decoding /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .num_skip(4) /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn num_skip(&mut self, val: usize) -> &mut Self { self.num_skip = Some(val); self } /// Create a configured length delimited `LengthDelimitedCodec` /// /// # Examples /// /// ``` /// use tokio_util::codec::LengthDelimitedCodec; /// # pub fn main() { /// LengthDelimitedCodec::builder() /// .length_field_offset(0) /// .length_field_type::<u16>() /// .length_adjustment(0) /// .num_skip(0) /// .new_codec(); /// # } /// ``` pub fn new_codec(&self) -> LengthDelimitedCodec { let mut builder = *self; builder.adjust_max_frame_len(); LengthDelimitedCodec { builder, state: DecodeState::Head, } } /// Create a configured length delimited `FramedRead` /// /// # Examples /// /// ``` /// # use tokio::io::AsyncRead; /// use tokio_util::codec::LengthDelimitedCodec; /// /// # fn bind_read<T: AsyncRead>(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_offset(0) /// .length_field_type::<u16>() /// .length_adjustment(0) /// .num_skip(0) /// .new_read(io); /// # } /// # pub fn main() {} /// ``` pub fn new_read<T>(&self, upstream: T) -> FramedRead<T, LengthDelimitedCodec> where T: AsyncRead, { FramedRead::new(upstream, self.new_codec()) } /// Create a configured length delimited `FramedWrite` /// /// # Examples /// /// ``` /// # use tokio::io::AsyncWrite; /// # use tokio_util::codec::LengthDelimitedCodec; /// # fn write_frame<T: AsyncWrite>(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_type::<u16>() /// .new_write(io); /// # } /// # pub fn main() {} /// ``` pub fn new_write<T>(&self, inner: T) -> FramedWrite<T, LengthDelimitedCodec> where T: AsyncWrite, { FramedWrite::new(inner, self.new_codec()) } /// Create a configured length delimited `Framed` /// /// # Examples /// /// ``` /// # use tokio::io::{AsyncRead, AsyncWrite}; /// # use tokio_util::codec::LengthDelimitedCodec; /// # fn write_frame<T: AsyncRead + AsyncWrite>(io: T) { /// # let _ = /// LengthDelimitedCodec::builder() /// .length_field_type::<u16>() /// .new_framed(io); /// # }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/framed.rs
tokio-util/src/codec/framed.rs
use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; use crate::codec::framed_impl::{FramedImpl, RWFrames, ReadFrame, WriteFrame}; use futures_core::Stream; use tokio::io::{AsyncRead, AsyncWrite}; use bytes::BytesMut; use futures_sink::Sink; use pin_project_lite::pin_project; use std::fmt; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// A unified [`Stream`] and [`Sink`] interface to an underlying I/O object, using /// the `Encoder` and `Decoder` traits to encode and decode frames. /// /// You can create a `Framed` instance by using the [`Decoder::framed`] adapter, or /// by using the `new` function seen below. /// /// # Cancellation safety /// /// * [`futures_util::sink::SinkExt::send`]: if send is used as the event in a /// `tokio::select!` statement and some other branch completes first, then it is /// guaranteed that the message was not sent, but the message itself is lost. /// * [`tokio_stream::StreamExt::next`]: This method is cancel safe. The returned /// future only holds onto a reference to the underlying stream, so dropping it will /// never lose a value. /// /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Decoder::framed`]: crate::codec::Decoder::framed() /// [`futures_util::sink::SinkExt::send`]: futures_util::sink::SinkExt::send /// [`tokio_stream::StreamExt::next`]: https://docs.rs/tokio-stream/latest/tokio_stream/trait.StreamExt.html#method.next pub struct Framed<T, U> { #[pin] inner: FramedImpl<T, U, RWFrames> } } impl<T, U> Framed<T, U> { /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this /// I/O object, using [`Decoder`] and [`Encoder`] to read and write the raw data. /// /// Raw I/O objects work with byte sequences, but higher-level code usually /// wants to batch these into meaningful chunks, called "frames". This /// method layers framing on top of an I/O object, by using the codec /// traits to handle encoding and decoding of messages frames. Note that /// the incoming and outgoing frame types may be distinct. /// /// This function returns a *single* object that is both [`Stream`] and /// [`Sink`]; grouping this into a single object is often useful for layering /// things like gzip or TLS, which require both read and write access to the /// underlying object. /// /// If you want to work more directly with the streams and sink, consider /// calling [`split`] on the `Framed` returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// /// Note that, for some byte sources, the stream can be resumed after an EOF /// by reading from it, even after it has returned `None`. Repeated attempts /// to do so, without new data available, continue to return `None` without /// creating more (closing) frames. /// /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Decode`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split pub fn new(inner: T, codec: U) -> Framed<T, U> { Framed { inner: FramedImpl { inner, codec, state: Default::default(), }, } } /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this /// I/O object, using [`Decoder`] and [`Encoder`] to read and write the raw data, /// with a specific read buffer initial capacity. /// /// Raw I/O objects work with byte sequences, but higher-level code usually /// wants to batch these into meaningful chunks, called "frames". This /// method layers framing on top of an I/O object, by using the codec /// traits to handle encoding and decoding of messages frames. Note that /// the incoming and outgoing frame types may be distinct. /// /// This function returns a *single* object that is both [`Stream`] and /// [`Sink`]; grouping this into a single object is often useful for layering /// things like gzip or TLS, which require both read and write access to the /// underlying object. /// /// If you want to work more directly with the streams and sink, consider /// calling [`split`] on the `Framed` returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Decode`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split pub fn with_capacity(inner: T, codec: U, capacity: usize) -> Framed<T, U> { Framed { inner: FramedImpl { inner, codec, state: RWFrames { read: ReadFrame { eof: false, is_readable: false, buffer: BytesMut::with_capacity(capacity), has_errored: false, }, write: WriteFrame { buffer: BytesMut::with_capacity(capacity), backpressure_boundary: capacity, }, }, }, } } /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this /// I/O object, using [`Decoder`] and [`Encoder`] to read and write the raw data. /// /// Raw I/O objects work with byte sequences, but higher-level code usually /// wants to batch these into meaningful chunks, called "frames". This /// method layers framing on top of an I/O object, by using the `Codec` /// traits to handle encoding and decoding of messages frames. Note that /// the incoming and outgoing frame types may be distinct. /// /// This function returns a *single* object that is both [`Stream`] and /// [`Sink`]; grouping this into a single object is often useful for layering /// things like gzip or TLS, which require both read and write access to the /// underlying object. /// /// This objects takes a stream and a `readbuffer` and a `writebuffer`. These field /// can be obtained from an existing `Framed` with the [`into_parts`] method. /// /// If you want to work more directly with the streams and sink, consider /// calling [`split`] on the `Framed` returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Decoder`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder /// [`into_parts`]: crate::codec::Framed::into_parts() /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> { Framed { inner: FramedImpl { inner: parts.io, codec: parts.codec, state: RWFrames { read: parts.read_buf.into(), write: parts.write_buf.into(), }, }, } } /// Returns a reference to the underlying I/O stream wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_ref(&self) -> &T { &self.inner.inner } /// Returns a mutable reference to the underlying I/O stream wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_mut(&mut self) -> &mut T { &mut self.inner.inner } /// Returns a pinned mutable reference to the underlying I/O stream wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { self.project().inner.project().inner } /// Returns a reference to the underlying codec wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn codec(&self) -> &U { &self.inner.codec } /// Returns a mutable reference to the underlying codec wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn codec_mut(&mut self) -> &mut U { &mut self.inner.codec } /// Maps the codec `U` to `C`, preserving the read and write buffers /// wrapped by `Framed`. /// /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn map_codec<C, F>(self, map: F) -> Framed<T, C> where F: FnOnce(U) -> C, { // This could be potentially simplified once rust-lang/rust#86555 hits stable let parts = self.into_parts(); Framed::from_parts(FramedParts { io: parts.io, codec: map(parts.codec), read_buf: parts.read_buf, write_buf: parts.write_buf, _priv: (), }) } /// Returns a mutable reference to the underlying codec wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn codec_pin_mut(self: Pin<&mut Self>) -> &mut U { self.project().inner.project().codec } /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { &self.inner.state.read.buffer } /// Returns a mutable reference to the read buffer. pub fn read_buffer_mut(&mut self) -> &mut BytesMut { &mut self.inner.state.read.buffer } /// Returns a reference to the write buffer. pub fn write_buffer(&self) -> &BytesMut { &self.inner.state.write.buffer } /// Returns a mutable reference to the write buffer. pub fn write_buffer_mut(&mut self) -> &mut BytesMut { &mut self.inner.state.write.buffer } /// Returns backpressure boundary pub fn backpressure_boundary(&self) -> usize { self.inner.state.write.backpressure_boundary } /// Updates backpressure boundary pub fn set_backpressure_boundary(&mut self, boundary: usize) { self.inner.state.write.backpressure_boundary = boundary; } /// Consumes the `Framed`, returning its underlying I/O stream. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_inner(self) -> T { self.inner.inner } /// Consumes the `Framed`, returning its underlying I/O stream, the buffer /// with unprocessed data, and the codec. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_parts(self) -> FramedParts<T, U> { FramedParts { io: self.inner.inner, codec: self.inner.codec, read_buf: self.inner.state.read.buffer, write_buf: self.inner.state.write.buffer, _priv: (), } } } // This impl just defers to the underlying FramedImpl impl<T, U> Stream for Framed<T, U> where T: AsyncRead, U: Decoder, { type Item = Result<U::Item, U::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.project().inner.poll_next(cx) } } // This impl just defers to the underlying FramedImpl impl<T, I, U> Sink<I> for Framed<T, U> where T: AsyncWrite, U: Encoder<I>, U::Error: From<io::Error>, { type Error = U::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { self.project().inner.start_send(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_close(cx) } } impl<T, U> fmt::Debug for Framed<T, U> where T: fmt::Debug, U: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Framed") .field("io", self.get_ref()) .field("codec", self.codec()) .finish() } } /// `FramedParts` contains an export of the data of a Framed transport. /// It can be used to construct a new [`Framed`] with a different codec. /// It contains all current buffers and the inner transport. /// /// [`Framed`]: crate::codec::Framed #[derive(Debug)] #[allow(clippy::manual_non_exhaustive)] pub struct FramedParts<T, U> { /// The inner transport used to read bytes to and write bytes to pub io: T, /// The codec pub codec: U, /// The buffer with read but unprocessed data. pub read_buf: BytesMut, /// A buffer with unprocessed data which are not written yet. pub write_buf: BytesMut, /// This private field allows us to add additional fields in the future in a /// backwards compatible way. pub(crate) _priv: (), } impl<T, U> FramedParts<T, U> { /// Create a new, default, `FramedParts` pub fn new<I>(io: T, codec: U) -> FramedParts<T, U> where U: Encoder<I>, { FramedParts { io, codec, read_buf: BytesMut::new(), write_buf: BytesMut::new(), _priv: (), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/bytes_codec.rs
tokio-util/src/codec/bytes_codec.rs
use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; use bytes::{BufMut, Bytes, BytesMut}; use std::io; /// A simple [`Decoder`] and [`Encoder`] implementation that just ships bytes around. /// /// [`Decoder`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder /// /// # Example /// /// Turn an [`AsyncRead`] into a stream of `Result<`[`BytesMut`]`, `[`Error`]`>`. /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`BytesMut`]: bytes::BytesMut /// [`Error`]: std::io::Error /// /// ``` /// # mod hidden { /// # #[allow(unused_imports)] /// use tokio::fs::File; /// # } /// use tokio::io::AsyncRead; /// use tokio_util::codec::{FramedRead, BytesCodec}; /// /// # enum File {} /// # impl File { /// # async fn open(_name: &str) -> Result<impl AsyncRead, std::io::Error> { /// # use std::io::Cursor; /// # Ok(Cursor::new(vec![0, 1, 2, 3, 4, 5])) /// # } /// # } /// # /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), std::io::Error> { /// let my_async_read = File::open("filename.txt").await?; /// let my_stream_of_bytes = FramedRead::new(my_async_read, BytesCodec::new()); /// # Ok(()) /// # } /// ``` /// #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Default)] pub struct BytesCodec(()); impl BytesCodec { /// Creates a new `BytesCodec` for shipping around raw bytes. pub fn new() -> BytesCodec { BytesCodec(()) } } impl Decoder for BytesCodec { type Item = BytesMut; type Error = io::Error; fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<BytesMut>, io::Error> { if !buf.is_empty() { let len = buf.len(); Ok(Some(buf.split_to(len))) } else { Ok(None) } } } impl Encoder<Bytes> for BytesCodec { type Error = io::Error; fn encode(&mut self, data: Bytes, buf: &mut BytesMut) -> Result<(), io::Error> { buf.reserve(data.len()); buf.put(data); Ok(()) } } impl Encoder<BytesMut> for BytesCodec { type Error = io::Error; fn encode(&mut self, data: BytesMut, buf: &mut BytesMut) -> Result<(), io::Error> { buf.reserve(data.len()); buf.put(data); Ok(()) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/decoder.rs
tokio-util/src/codec/decoder.rs
use crate::codec::Framed; use tokio::io::{AsyncRead, AsyncWrite}; use bytes::BytesMut; use std::io; /// Decoding of frames via buffers. /// /// This trait is used when constructing an instance of [`Framed`] or /// [`FramedRead`]. An implementation of `Decoder` takes a byte stream that has /// already been buffered in `src` and decodes the data into a stream of /// `Self::Item` frames. /// /// Implementations are able to track state on `self`, which enables /// implementing stateful streaming parsers. In many cases, though, this type /// will simply be a unit struct (e.g. `struct HttpDecoder`). /// /// For some underlying data-sources, namely files and FIFOs, /// it's possible to temporarily read 0 bytes by reaching EOF. /// /// In these cases `decode_eof` will be called until it signals /// fulfillment of all closing frames by returning `Ok(None)`. /// After that, repeated attempts to read from the [`Framed`] or [`FramedRead`] /// will not invoke `decode` or `decode_eof` again, until data can be read /// during a retry. /// /// It is up to the Decoder to keep track of a restart after an EOF, /// and to decide how to handle such an event by, for example, /// allowing frames to cross EOF boundaries, re-emitting opening frames, or /// resetting the entire internal state. /// /// [`Framed`]: crate::codec::Framed /// [`FramedRead`]: crate::codec::FramedRead pub trait Decoder { /// The type of decoded frames. type Item; /// The type of unrecoverable frame decoding errors. /// /// If an individual message is ill-formed but can be ignored without /// interfering with the processing of future messages, it may be more /// useful to report the failure as an `Item`. /// /// `From<io::Error>` is required in the interest of making `Error` suitable /// for returning directly from a [`FramedRead`], and to enable the default /// implementation of `decode_eof` to yield an `io::Error` when the decoder /// fails to consume all available data. /// /// Note that implementors of this trait can simply indicate `type Error = /// io::Error` to use I/O errors as this type. /// /// [`FramedRead`]: crate::codec::FramedRead type Error: From<io::Error>; /// Attempts to decode a frame from the provided buffer of bytes. /// /// This method is called by [`FramedRead`] whenever bytes are ready to be /// parsed. The provided buffer of bytes is what's been read so far, and /// this instance of `Decode` can determine whether an entire frame is in /// the buffer and is ready to be returned. /// /// If an entire frame is available, then this instance will remove those /// bytes from the buffer provided and return them as a decoded /// frame. Note that removing bytes from the provided buffer doesn't always /// necessarily copy the bytes, so this should be an efficient operation in /// most circumstances. /// /// If the bytes look valid, but a frame isn't fully available yet, then /// `Ok(None)` is returned. This indicates to the [`Framed`] instance that /// it needs to read some more bytes before calling this method again. /// /// Note that the bytes provided may be empty. If a previous call to /// `decode` consumed all the bytes in the buffer then `decode` will be /// called again until it returns `Ok(None)`, indicating that more bytes need to /// be read. /// /// Finally, if the bytes in the buffer are malformed then an error is /// returned indicating why. This informs [`Framed`] that the stream is now /// corrupt and should be terminated. /// /// [`Framed`]: crate::codec::Framed /// [`FramedRead`]: crate::codec::FramedRead /// /// # Buffer management /// /// Before returning from the function, implementations should ensure that /// the buffer has appropriate capacity in anticipation of future calls to /// `decode`. Failing to do so leads to inefficiency. /// /// For example, if frames have a fixed length, or if the length of the /// current frame is known from a header, a possible buffer management /// strategy is: /// /// ```no_run /// # use std::io; /// # /// # use bytes::BytesMut; /// # use tokio_util::codec::Decoder; /// # /// # struct MyCodec; /// # /// impl Decoder for MyCodec { /// // ... /// # type Item = BytesMut; /// # type Error = io::Error; /// /// fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { /// // ... /// /// // Reserve enough to complete decoding of the current frame. /// let current_frame_len: usize = 1000; // Example. /// // And to start decoding the next frame. /// let next_frame_header_len: usize = 10; // Example. /// src.reserve(current_frame_len + next_frame_header_len); /// /// return Ok(None); /// } /// } /// ``` /// /// An optimal buffer management strategy minimizes reallocations and /// over-allocations. fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error>; /// A default method available to be called when there are no more bytes /// available to be read from the underlying I/O. /// /// This method defaults to calling `decode` and returns an error if /// `Ok(None)` is returned while there is unconsumed data in `buf`. /// Typically this doesn't need to be implemented unless the framing /// protocol differs near the end of the stream, or if you need to construct /// frames _across_ eof boundaries on sources that can be resumed. /// /// Note that the `buf` argument may be empty. If a previous call to /// `decode_eof` consumed all the bytes in the buffer, `decode_eof` will be /// called again until it returns `None`, indicating that there are no more /// frames to yield. This behavior enables returning finalization frames /// that may not be based on inbound data. /// /// Once `None` has been returned, `decode_eof` won't be called again until /// an attempt to resume the stream has been made, where the underlying stream /// actually returned more data. fn decode_eof(&mut self, buf: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { match self.decode(buf)? { Some(frame) => Ok(Some(frame)), None => { if buf.is_empty() { Ok(None) } else { Err(io::Error::new(io::ErrorKind::Other, "bytes remaining on stream").into()) } } } } /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this /// `Io` object, using `Decode` and `Encode` to read and write the raw data. /// /// Raw I/O objects work with byte sequences, but higher-level code usually /// wants to batch these into meaningful chunks, called "frames". This /// method layers framing on top of an I/O object, by using the `Codec` /// traits to handle encoding and decoding of messages frames. Note that /// the incoming and outgoing frame types may be distinct. /// /// This function returns a *single* object that is both `Stream` and /// `Sink`; grouping this into a single object is often useful for layering /// things like gzip or TLS, which require both read and write access to the /// underlying object. /// /// If you want to work more directly with the streams and sink, consider /// calling `split` on the [`Framed`] returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Framed`]: crate::codec::Framed fn framed<T: AsyncRead + AsyncWrite + Sized>(self, io: T) -> Framed<T, Self> where Self: Sized, { Framed::new(io, self) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/framed_read.rs
tokio-util/src/codec/framed_read.rs
use crate::codec::framed_impl::{FramedImpl, ReadFrame}; use crate::codec::Decoder; use futures_core::Stream; use tokio::io::AsyncRead; use bytes::BytesMut; use futures_sink::Sink; use pin_project_lite::pin_project; use std::fmt; use std::pin::Pin; use std::task::{Context, Poll}; use super::FramedParts; pin_project! { /// A [`Stream`] of messages decoded from an [`AsyncRead`]. /// /// For examples of how to use `FramedRead` with a codec, see the /// examples on the [`codec`] module. /// /// # Cancellation safety /// * [`tokio_stream::StreamExt::next`]: This method is cancel safe. The returned /// future only holds onto a reference to the underlying stream, so dropping it will /// never lose a value. /// /// [`Stream`]: futures_core::Stream /// [`AsyncRead`]: tokio::io::AsyncRead /// [`codec`]: crate::codec /// [`tokio_stream::StreamExt::next`]: https://docs.rs/tokio-stream/latest/tokio_stream/trait.StreamExt.html#method.next pub struct FramedRead<T, D> { #[pin] inner: FramedImpl<T, D, ReadFrame>, } } // ===== impl FramedRead ===== impl<T, D> FramedRead<T, D> { /// Creates a new `FramedRead` with the given `decoder`. pub fn new(inner: T, decoder: D) -> FramedRead<T, D> { FramedRead { inner: FramedImpl { inner, codec: decoder, state: Default::default(), }, } } /// Creates a new `FramedRead` with the given `decoder` and a buffer of `capacity` /// initial size. pub fn with_capacity(inner: T, decoder: D, capacity: usize) -> FramedRead<T, D> { FramedRead { inner: FramedImpl { inner, codec: decoder, state: ReadFrame { eof: false, is_readable: false, buffer: BytesMut::with_capacity(capacity), has_errored: false, }, }, } } /// Returns a reference to the underlying I/O stream wrapped by /// `FramedRead`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_ref(&self) -> &T { &self.inner.inner } /// Returns a mutable reference to the underlying I/O stream wrapped by /// `FramedRead`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_mut(&mut self) -> &mut T { &mut self.inner.inner } /// Returns a pinned mutable reference to the underlying I/O stream wrapped by /// `FramedRead`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { self.project().inner.project().inner } /// Consumes the `FramedRead`, returning its underlying I/O stream. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_inner(self) -> T { self.inner.inner } /// Returns a reference to the underlying decoder. pub fn decoder(&self) -> &D { &self.inner.codec } /// Returns a mutable reference to the underlying decoder. pub fn decoder_mut(&mut self) -> &mut D { &mut self.inner.codec } /// Maps the decoder `D` to `C`, preserving the read buffer /// wrapped by `Framed`. pub fn map_decoder<C, F>(self, map: F) -> FramedRead<T, C> where F: FnOnce(D) -> C, { // This could be potentially simplified once rust-lang/rust#86555 hits stable let FramedImpl { inner, state, codec, } = self.inner; FramedRead { inner: FramedImpl { inner, state, codec: map(codec), }, } } /// Returns a mutable reference to the underlying decoder. pub fn decoder_pin_mut(self: Pin<&mut Self>) -> &mut D { self.project().inner.project().codec } /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { &self.inner.state.buffer } /// Returns a mutable reference to the read buffer. pub fn read_buffer_mut(&mut self) -> &mut BytesMut { &mut self.inner.state.buffer } /// Consumes the `FramedRead`, returning its underlying I/O stream, the buffer /// with unprocessed data, and the codec. pub fn into_parts(self) -> FramedParts<T, D> { FramedParts { io: self.inner.inner, codec: self.inner.codec, read_buf: self.inner.state.buffer, write_buf: BytesMut::new(), _priv: (), } } } // This impl just defers to the underlying FramedImpl impl<T, D> Stream for FramedRead<T, D> where T: AsyncRead, D: Decoder, { type Item = Result<D::Item, D::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.project().inner.poll_next(cx) } } // This impl just defers to the underlying T: Sink impl<T, I, D> Sink<I> for FramedRead<T, D> where T: Sink<I>, { type Error = T::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.project().inner.poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { self.project().inner.project().inner.start_send(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.project().inner.poll_close(cx) } } impl<T, D> fmt::Debug for FramedRead<T, D> where T: fmt::Debug, D: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FramedRead") .field("inner", &self.get_ref()) .field("decoder", &self.decoder()) .field("eof", &self.inner.state.eof) .field("is_readable", &self.inner.state.is_readable) .field("buffer", &self.read_buffer()) .finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/mod.rs
tokio-util/src/codec/mod.rs
//! Adaptors from `AsyncRead`/`AsyncWrite` to Stream/Sink //! //! Raw I/O objects work with byte sequences, but higher-level code usually //! wants to batch these into meaningful chunks, called "frames". //! //! This module contains adapters to go from streams of bytes, [`AsyncRead`] and //! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`]. //! Framed streams are also known as transports. //! //! # Example encoding using `LinesCodec` //! //! The following example demonstrates how to use a codec such as [`LinesCodec`] to //! write framed data. [`FramedWrite`] can be used to achieve this. Data sent to //! [`FramedWrite`] are first framed according to a specific codec, and then sent to //! an implementor of [`AsyncWrite`]. //! //! ``` //! use futures::sink::SinkExt; //! use tokio_util::codec::LinesCodec; //! use tokio_util::codec::FramedWrite; //! //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! let buffer = Vec::new(); //! let messages = vec!["Hello", "World"]; //! let encoder = LinesCodec::new(); //! //! // FramedWrite is a sink which means you can send values into it //! // asynchronously. //! let mut writer = FramedWrite::new(buffer, encoder); //! //! // To be able to send values into a FramedWrite, you need to bring the //! // `SinkExt` trait into scope. //! writer.send(messages[0]).await.unwrap(); //! writer.send(messages[1]).await.unwrap(); //! //! let buffer = writer.get_ref(); //! //! assert_eq!(buffer.as_slice(), "Hello\nWorld\n".as_bytes()); //! # } //!``` //! //! # Example decoding using `LinesCodec` //! The following example demonstrates how to use a codec such as [`LinesCodec`] to //! read a stream of framed data. [`FramedRead`] can be used to achieve this. [`FramedRead`] //! will keep reading from an [`AsyncRead`] implementor until a whole frame, according to a codec, //! can be parsed. //! //!``` //! use tokio_stream::StreamExt; //! use tokio_util::codec::LinesCodec; //! use tokio_util::codec::FramedRead; //! //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() { //! let message = "Hello\nWorld".as_bytes(); //! let decoder = LinesCodec::new(); //! //! // FramedRead can be used to read a stream of values that are framed according to //! // a codec. FramedRead will read from its input (here `buffer`) until a whole frame //! // can be parsed. //! let mut reader = FramedRead::new(message, decoder); //! //! // To read values from a FramedRead, you need to bring the //! // `StreamExt` trait into scope. //! let frame1 = reader.next().await.unwrap().unwrap(); //! let frame2 = reader.next().await.unwrap().unwrap(); //! //! assert!(reader.next().await.is_none()); //! assert_eq!(frame1, "Hello"); //! assert_eq!(frame2, "World"); //! # } //! ``` //! //! # The Decoder trait //! //! A [`Decoder`] is used together with [`FramedRead`] or [`Framed`] to turn an //! [`AsyncRead`] into a [`Stream`]. The job of the decoder trait is to specify //! how sequences of bytes are turned into a sequence of frames, and to //! determine where the boundaries between frames are. The job of the //! `FramedRead` is to repeatedly switch between reading more data from the IO //! resource, and asking the decoder whether we have received enough data to //! decode another frame of data. //! //! The main method on the `Decoder` trait is the [`decode`] method. This method //! takes as argument the data that has been read so far, and when it is called, //! it will be in one of the following situations: //! //! 1. The buffer contains less than a full frame. //! 2. The buffer contains exactly a full frame. //! 3. The buffer contains more than a full frame. //! //! In the first situation, the decoder should return `Ok(None)`. //! //! In the second situation, the decoder should clear the provided buffer and //! return `Ok(Some(the_decoded_frame))`. //! //! In the third situation, the decoder should use a method such as [`split_to`] //! or [`advance`] to modify the buffer such that the frame is removed from the //! buffer, but any data in the buffer after that frame should still remain in //! the buffer. The decoder should also return `Ok(Some(the_decoded_frame))` in //! this case. //! //! Finally the decoder may return an error if the data is invalid in some way. //! The decoder should _not_ return an error just because it has yet to receive //! a full frame. //! //! It is guaranteed that, from one call to `decode` to another, the provided //! buffer will contain the exact same data as before, except that if more data //! has arrived through the IO resource, that data will have been appended to //! the buffer. This means that reading frames from a `FramedRead` is //! essentially equivalent to the following loop: //! //! ```no_run //! use tokio::io::AsyncReadExt; //! # // This uses async_stream to create an example that compiles. //! # fn foo() -> impl futures_core::Stream<Item = std::io::Result<bytes::BytesMut>> { async_stream::try_stream! { //! # use tokio_util::codec::Decoder; //! # let mut decoder = tokio_util::codec::BytesCodec::new(); //! # let io_resource = &mut &[0u8, 1, 2, 3][..]; //! //! let mut buf = bytes::BytesMut::new(); //! loop { //! // The read_buf call will append to buf rather than overwrite existing data. //! let len = io_resource.read_buf(&mut buf).await?; //! //! if len == 0 { //! while let Some(frame) = decoder.decode_eof(&mut buf)? { //! yield frame; //! } //! break; //! } //! //! while let Some(frame) = decoder.decode(&mut buf)? { //! yield frame; //! } //! } //! # }} //! ``` //! The example above uses `yield` whenever the `Stream` produces an item. //! //! ## Example decoder //! //! As an example, consider a protocol that can be used to send strings where //! each frame is a four byte integer that contains the length of the frame, //! followed by that many bytes of string data. The decoder fails with an error //! if the string data is not valid utf-8 or too long. //! //! Such a decoder can be written like this: //! ``` //! use tokio_util::codec::Decoder; //! use bytes::{BytesMut, Buf}; //! //! struct MyStringDecoder {} //! //! const MAX: usize = 8 * 1024 * 1024; //! //! impl Decoder for MyStringDecoder { //! type Item = String; //! type Error = std::io::Error; //! //! fn decode( //! &mut self, //! src: &mut BytesMut //! ) -> Result<Option<Self::Item>, Self::Error> { //! if src.len() < 4 { //! // Not enough data to read length marker. //! return Ok(None); //! } //! //! // Read length marker. //! let mut length_bytes = [0u8; 4]; //! length_bytes.copy_from_slice(&src[..4]); //! let length = u32::from_le_bytes(length_bytes) as usize; //! //! // Check that the length is not too large to avoid a denial of //! // service attack where the server runs out of memory. //! if length > MAX { //! return Err(std::io::Error::new( //! std::io::ErrorKind::InvalidData, //! format!("Frame of length {} is too large.", length) //! )); //! } //! //! if src.len() < 4 + length { //! // The full string has not yet arrived. //! // //! // We reserve more space in the buffer. This is not strictly //! // necessary, but is a good idea performance-wise. //! src.reserve(4 + length - src.len()); //! //! // We inform the Framed that we need more bytes to form the next //! // frame. //! return Ok(None); //! } //! //! // Use advance to modify src such that it no longer contains //! // this frame. //! let data = src[4..4 + length].to_vec(); //! src.advance(4 + length); //! //! // Convert the data to a string, or fail if it is not valid utf-8. //! match String::from_utf8(data) { //! Ok(string) => Ok(Some(string)), //! Err(utf8_error) => { //! Err(std::io::Error::new( //! std::io::ErrorKind::InvalidData, //! utf8_error.utf8_error(), //! )) //! }, //! } //! } //! } //! ``` //! //! # The Encoder trait //! //! An [`Encoder`] is used together with [`FramedWrite`] or [`Framed`] to turn //! an [`AsyncWrite`] into a [`Sink`]. The job of the encoder trait is to //! specify how frames are turned into a sequences of bytes. The job of the //! `FramedWrite` is to take the resulting sequence of bytes and write it to the //! IO resource. //! //! The main method on the `Encoder` trait is the [`encode`] method. This method //! takes an item that is being written, and a buffer to write the item to. The //! buffer may already contain data, and in this case, the encoder should append //! the new frame to the buffer rather than overwrite the existing data. //! //! It is guaranteed that, from one call to `encode` to another, the provided //! buffer will contain the exact same data as before, except that some of the //! data may have been removed from the front of the buffer. Writing to a //! `FramedWrite` is essentially equivalent to the following loop: //! //! ```no_run //! use tokio::io::AsyncWriteExt; //! use bytes::Buf; // for advance //! # use tokio_util::codec::Encoder; //! # async fn next_frame() -> bytes::Bytes { bytes::Bytes::new() } //! # async fn no_more_frames() { } //! # #[tokio::main] async fn main() -> std::io::Result<()> { //! # let mut io_resource = tokio::io::sink(); //! # let mut encoder = tokio_util::codec::BytesCodec::new(); //! //! const MAX: usize = 8192; //! //! let mut buf = bytes::BytesMut::new(); //! loop { //! tokio::select! { //! num_written = io_resource.write(&buf), if !buf.is_empty() => { //! buf.advance(num_written?); //! }, //! frame = next_frame(), if buf.len() < MAX => { //! encoder.encode(frame, &mut buf)?; //! }, //! _ = no_more_frames() => { //! io_resource.write_all(&buf).await?; //! io_resource.shutdown().await?; //! return Ok(()); //! }, //! } //! } //! # } //! ``` //! Here the `next_frame` method corresponds to any frames you write to the //! `FramedWrite`. The `no_more_frames` method corresponds to closing the //! `FramedWrite` with [`SinkExt::close`]. //! //! ## Example encoder //! //! As an example, consider a protocol that can be used to send strings where //! each frame is a four byte integer that contains the length of the frame, //! followed by that many bytes of string data. The encoder will fail if the //! string is too long. //! //! Such an encoder can be written like this: //! ``` //! use tokio_util::codec::Encoder; //! use bytes::BytesMut; //! //! struct MyStringEncoder {} //! //! const MAX: usize = 8 * 1024 * 1024; //! //! impl Encoder<String> for MyStringEncoder { //! type Error = std::io::Error; //! //! fn encode(&mut self, item: String, dst: &mut BytesMut) -> Result<(), Self::Error> { //! // Don't send a string if it is longer than the other end will //! // accept. //! if item.len() > MAX { //! return Err(std::io::Error::new( //! std::io::ErrorKind::InvalidData, //! format!("Frame of length {} is too large.", item.len()) //! )); //! } //! //! // Convert the length into a byte array. //! // The cast to u32 cannot overflow due to the length check above. //! let len_slice = u32::to_le_bytes(item.len() as u32); //! //! // Reserve space in the buffer. //! dst.reserve(4 + item.len()); //! //! // Write the length and string to the buffer. //! dst.extend_from_slice(&len_slice); //! dst.extend_from_slice(item.as_bytes()); //! Ok(()) //! } //! } //! ``` //! //! [`AsyncRead`]: tokio::io::AsyncRead //! [`AsyncWrite`]: tokio::io::AsyncWrite //! [`Stream`]: futures_core::Stream //! [`Sink`]: futures_sink::Sink //! [`SinkExt`]: https://docs.rs/futures/0.3/futures/sink/trait.SinkExt.html //! [`SinkExt::close`]: https://docs.rs/futures/0.3/futures/sink/trait.SinkExt.html#method.close //! [`FramedRead`]: struct@crate::codec::FramedRead //! [`FramedWrite`]: struct@crate::codec::FramedWrite //! [`Framed`]: struct@crate::codec::Framed //! [`Decoder`]: trait@crate::codec::Decoder //! [`decode`]: fn@crate::codec::Decoder::decode //! [`encode`]: fn@crate::codec::Encoder::encode //! [`split_to`]: fn@bytes::BytesMut::split_to //! [`advance`]: fn@bytes::Buf::advance mod bytes_codec; pub use self::bytes_codec::BytesCodec; mod decoder; pub use self::decoder::Decoder; mod encoder; pub use self::encoder::Encoder; mod framed_impl; #[allow(unused_imports)] pub(crate) use self::framed_impl::{FramedImpl, RWFrames, ReadFrame, WriteFrame}; mod framed; pub use self::framed::{Framed, FramedParts}; mod framed_read; pub use self::framed_read::FramedRead; mod framed_write; pub use self::framed_write::FramedWrite; pub mod length_delimited; pub use self::length_delimited::{LengthDelimitedCodec, LengthDelimitedCodecError}; mod lines_codec; pub use self::lines_codec::{LinesCodec, LinesCodecError}; mod any_delimiter_codec; pub use self::any_delimiter_codec::{AnyDelimiterCodec, AnyDelimiterCodecError};
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/lines_codec.rs
tokio-util/src/codec/lines_codec.rs
use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; use bytes::{Buf, BufMut, BytesMut}; use std::{cmp, fmt, io, str}; /// A simple [`Decoder`] and [`Encoder`] implementation that splits up data into lines. /// /// This uses the `\n` character as the line ending on all platforms. /// /// [`Decoder`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct LinesCodec { // Stored index of the next index to examine for a `\n` character. // This is used to optimize searching. // For example, if `decode` was called with `abc`, it would hold `3`, // because that is the next index to examine. // The next time `decode` is called with `abcde\n`, the method will // only look at `de\n` before returning. next_index: usize, /// The maximum length for a given line. If `usize::MAX`, lines will be /// read until a `\n` character is reached. max_length: usize, /// Are we currently discarding the remainder of a line which was over /// the length limit? is_discarding: bool, } impl LinesCodec { /// Returns a `LinesCodec` for splitting up data into lines. /// /// # Note /// /// The returned `LinesCodec` will not have an upper bound on the length /// of a buffered line. See the documentation for [`new_with_max_length`] /// for information on why this could be a potential security risk. /// /// [`new_with_max_length`]: crate::codec::LinesCodec::new_with_max_length() pub fn new() -> LinesCodec { LinesCodec { next_index: 0, max_length: usize::MAX, is_discarding: false, } } /// Returns a `LinesCodec` with a maximum line length limit. /// /// If this is set, calls to `LinesCodec::decode` will return a /// [`LinesCodecError`] when a line exceeds the length limit. Subsequent calls /// will discard up to `limit` bytes from that line until a newline /// character is reached, returning `None` until the line over the limit /// has been fully discarded. After that point, calls to `decode` will /// function as normal. /// /// # Note /// /// Setting a length limit is highly recommended for any `LinesCodec` which /// will be exposed to untrusted input. Otherwise, the size of the buffer /// that holds the line currently being read is unbounded. An attacker could /// exploit this unbounded buffer by sending an unbounded amount of input /// without any `\n` characters, causing unbounded memory consumption. /// /// [`LinesCodecError`]: crate::codec::LinesCodecError pub fn new_with_max_length(max_length: usize) -> Self { LinesCodec { max_length, ..LinesCodec::new() } } /// Returns the maximum line length when decoding. /// /// ``` /// use std::usize; /// use tokio_util::codec::LinesCodec; /// /// let codec = LinesCodec::new(); /// assert_eq!(codec.max_length(), usize::MAX); /// ``` /// ``` /// use tokio_util::codec::LinesCodec; /// /// let codec = LinesCodec::new_with_max_length(256); /// assert_eq!(codec.max_length(), 256); /// ``` pub fn max_length(&self) -> usize { self.max_length } } fn utf8(buf: &[u8]) -> Result<&str, io::Error> { str::from_utf8(buf) .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "Unable to decode input as UTF8")) } fn without_carriage_return(s: &[u8]) -> &[u8] { if let Some(&b'\r') = s.last() { &s[..s.len() - 1] } else { s } } impl Decoder for LinesCodec { type Item = String; type Error = LinesCodecError; fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<String>, LinesCodecError> { loop { // Determine how far into the buffer we'll search for a newline. If // there's no max_length set, we'll read to the end of the buffer. let read_to = cmp::min(self.max_length.saturating_add(1), buf.len()); let newline_offset = buf[self.next_index..read_to] .iter() .position(|b| *b == b'\n'); match (self.is_discarding, newline_offset) { (true, Some(offset)) => { // If we found a newline, discard up to that offset and // then stop discarding. On the next iteration, we'll try // to read a line normally. buf.advance(offset + self.next_index + 1); self.is_discarding = false; self.next_index = 0; } (true, None) => { // Otherwise, we didn't find a newline, so we'll discard // everything we read. On the next iteration, we'll continue // discarding up to max_len bytes unless we find a newline. buf.advance(read_to); self.next_index = 0; if buf.is_empty() { return Ok(None); } } (false, Some(offset)) => { // Found a line! let newline_index = offset + self.next_index; self.next_index = 0; let line = buf.split_to(newline_index + 1); let line = &line[..line.len() - 1]; let line = without_carriage_return(line); let line = utf8(line)?; return Ok(Some(line.to_string())); } (false, None) if buf.len() > self.max_length => { // Reached the maximum length without finding a // newline, return an error and start discarding on the // next call. self.is_discarding = true; return Err(LinesCodecError::MaxLineLengthExceeded); } (false, None) => { // We didn't find a line or reach the length limit, so the next // call will resume searching at the current offset. self.next_index = read_to; return Ok(None); } } } } fn decode_eof(&mut self, buf: &mut BytesMut) -> Result<Option<String>, LinesCodecError> { Ok(match self.decode(buf)? { Some(frame) => Some(frame), None => { self.next_index = 0; // No terminating newline - return remaining data, if any if buf.is_empty() || buf == &b"\r"[..] { None } else { let line = buf.split_to(buf.len()); let line = without_carriage_return(&line); let line = utf8(line)?; Some(line.to_string()) } } }) } } impl<T> Encoder<T> for LinesCodec where T: AsRef<str>, { type Error = LinesCodecError; fn encode(&mut self, line: T, buf: &mut BytesMut) -> Result<(), LinesCodecError> { let line = line.as_ref(); buf.reserve(line.len() + 1); buf.put(line.as_bytes()); buf.put_u8(b'\n'); Ok(()) } } impl Default for LinesCodec { fn default() -> Self { Self::new() } } /// An error occurred while encoding or decoding a line. #[derive(Debug)] pub enum LinesCodecError { /// The maximum line length was exceeded. MaxLineLengthExceeded, /// An IO error occurred. Io(io::Error), } impl fmt::Display for LinesCodecError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { LinesCodecError::MaxLineLengthExceeded => write!(f, "max line length exceeded"), LinesCodecError::Io(e) => write!(f, "{e}"), } } } impl From<io::Error> for LinesCodecError { fn from(e: io::Error) -> LinesCodecError { LinesCodecError::Io(e) } } impl std::error::Error for LinesCodecError {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/any_delimiter_codec.rs
tokio-util/src/codec/any_delimiter_codec.rs
use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; use bytes::{Buf, BufMut, Bytes, BytesMut}; use std::{cmp, fmt, io, str}; const DEFAULT_SEEK_DELIMITERS: &[u8] = b",;\n\r"; const DEFAULT_SEQUENCE_WRITER: &[u8] = b","; /// A simple [`Decoder`] and [`Encoder`] implementation that splits up data into chunks based on any character in the given delimiter string. /// /// [`Decoder`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder /// /// # Example /// Decode string of bytes containing various different delimiters. /// /// [`BytesMut`]: bytes::BytesMut /// [`Error`]: std::io::Error /// /// ``` /// use tokio_util::codec::{AnyDelimiterCodec, Decoder}; /// use bytes::{BufMut, BytesMut}; /// /// # /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), std::io::Error> { /// let mut codec = AnyDelimiterCodec::new(b",;\r\n".to_vec(),b";".to_vec()); /// let buf = &mut BytesMut::new(); /// buf.reserve(200); /// buf.put_slice(b"chunk 1,chunk 2;chunk 3\n\r"); /// assert_eq!("chunk 1", codec.decode(buf).unwrap().unwrap()); /// assert_eq!("chunk 2", codec.decode(buf).unwrap().unwrap()); /// assert_eq!("chunk 3", codec.decode(buf).unwrap().unwrap()); /// assert_eq!("", codec.decode(buf).unwrap().unwrap()); /// assert_eq!(None, codec.decode(buf).unwrap()); /// # Ok(()) /// # } /// ``` /// #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct AnyDelimiterCodec { // Stored index of the next index to examine for the delimiter character. // This is used to optimize searching. // For example, if `decode` was called with `abc` and the delimiter is '{}', it would hold `3`, // because that is the next index to examine. // The next time `decode` is called with `abcde}`, the method will // only look at `de}` before returning. next_index: usize, /// The maximum length for a given chunk. If `usize::MAX`, chunks will be /// read until a delimiter character is reached. max_length: usize, /// Are we currently discarding the remainder of a chunk which was over /// the length limit? is_discarding: bool, /// The bytes that are using for search during decode seek_delimiters: Vec<u8>, /// The bytes that are using for encoding sequence_writer: Vec<u8>, } impl AnyDelimiterCodec { /// Returns a `AnyDelimiterCodec` for splitting up data into chunks. /// /// # Note /// /// The returned `AnyDelimiterCodec` will not have an upper bound on the length /// of a buffered chunk. See the documentation for [`new_with_max_length`] /// for information on why this could be a potential security risk. /// /// [`new_with_max_length`]: crate::codec::AnyDelimiterCodec::new_with_max_length() pub fn new(seek_delimiters: Vec<u8>, sequence_writer: Vec<u8>) -> AnyDelimiterCodec { AnyDelimiterCodec { next_index: 0, max_length: usize::MAX, is_discarding: false, seek_delimiters, sequence_writer, } } /// Returns a `AnyDelimiterCodec` with a maximum chunk length limit. /// /// If this is set, calls to `AnyDelimiterCodec::decode` will return a /// [`AnyDelimiterCodecError`] when a chunk exceeds the length limit. Subsequent calls /// will discard up to `limit` bytes from that chunk until a delimiter /// character is reached, returning `None` until the delimiter over the limit /// has been fully discarded. After that point, calls to `decode` will /// function as normal. /// /// # Note /// /// Setting a length limit is highly recommended for any `AnyDelimiterCodec` which /// will be exposed to untrusted input. Otherwise, the size of the buffer /// that holds the chunk currently being read is unbounded. An attacker could /// exploit this unbounded buffer by sending an unbounded amount of input /// without any delimiter characters, causing unbounded memory consumption. /// /// [`AnyDelimiterCodecError`]: crate::codec::AnyDelimiterCodecError pub fn new_with_max_length( seek_delimiters: Vec<u8>, sequence_writer: Vec<u8>, max_length: usize, ) -> Self { AnyDelimiterCodec { max_length, ..AnyDelimiterCodec::new(seek_delimiters, sequence_writer) } } /// Returns the maximum chunk length when decoding. /// /// ``` /// use std::usize; /// use tokio_util::codec::AnyDelimiterCodec; /// /// let codec = AnyDelimiterCodec::new(b",;\n".to_vec(), b";".to_vec()); /// assert_eq!(codec.max_length(), usize::MAX); /// ``` /// ``` /// use tokio_util::codec::AnyDelimiterCodec; /// /// let codec = AnyDelimiterCodec::new_with_max_length(b",;\n".to_vec(), b";".to_vec(), 256); /// assert_eq!(codec.max_length(), 256); /// ``` pub fn max_length(&self) -> usize { self.max_length } } impl Decoder for AnyDelimiterCodec { type Item = Bytes; type Error = AnyDelimiterCodecError; fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Bytes>, AnyDelimiterCodecError> { loop { // Determine how far into the buffer we'll search for a delimiter. If // there's no max_length set, we'll read to the end of the buffer. let read_to = cmp::min(self.max_length.saturating_add(1), buf.len()); let new_chunk_offset = buf[self.next_index..read_to] .iter() .position(|b| self.seek_delimiters.contains(b)); match (self.is_discarding, new_chunk_offset) { (true, Some(offset)) => { // If we found a new chunk, discard up to that offset and // then stop discarding. On the next iteration, we'll try // to read a chunk normally. buf.advance(offset + self.next_index + 1); self.is_discarding = false; self.next_index = 0; } (true, None) => { // Otherwise, we didn't find a new chunk, so we'll discard // everything we read. On the next iteration, we'll continue // discarding up to max_len bytes unless we find a new chunk. buf.advance(read_to); self.next_index = 0; if buf.is_empty() { return Ok(None); } } (false, Some(offset)) => { // Found a chunk! let new_chunk_index = offset + self.next_index; self.next_index = 0; let mut chunk = buf.split_to(new_chunk_index + 1); chunk.truncate(chunk.len() - 1); let chunk = chunk.freeze(); return Ok(Some(chunk)); } (false, None) if buf.len() > self.max_length => { // Reached the maximum length without finding a // new chunk, return an error and start discarding on the // next call. self.is_discarding = true; return Err(AnyDelimiterCodecError::MaxChunkLengthExceeded); } (false, None) => { // We didn't find a chunk or reach the length limit, so the next // call will resume searching at the current offset. self.next_index = read_to; return Ok(None); } } } } fn decode_eof(&mut self, buf: &mut BytesMut) -> Result<Option<Bytes>, AnyDelimiterCodecError> { Ok(match self.decode(buf)? { Some(frame) => Some(frame), None => { // return remaining data, if any if buf.is_empty() { None } else { let chunk = buf.split_to(buf.len()); self.next_index = 0; Some(chunk.freeze()) } } }) } } impl<T> Encoder<T> for AnyDelimiterCodec where T: AsRef<str>, { type Error = AnyDelimiterCodecError; fn encode(&mut self, chunk: T, buf: &mut BytesMut) -> Result<(), AnyDelimiterCodecError> { let chunk = chunk.as_ref(); buf.reserve(chunk.len() + self.sequence_writer.len()); buf.put(chunk.as_bytes()); buf.put(self.sequence_writer.as_ref()); Ok(()) } } impl Default for AnyDelimiterCodec { fn default() -> Self { Self::new( DEFAULT_SEEK_DELIMITERS.to_vec(), DEFAULT_SEQUENCE_WRITER.to_vec(), ) } } /// An error occurred while encoding or decoding a chunk. #[derive(Debug)] pub enum AnyDelimiterCodecError { /// The maximum chunk length was exceeded. MaxChunkLengthExceeded, /// An IO error occurred. Io(io::Error), } impl fmt::Display for AnyDelimiterCodecError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { AnyDelimiterCodecError::MaxChunkLengthExceeded => { write!(f, "max chunk length exceeded") } AnyDelimiterCodecError::Io(e) => write!(f, "{e}"), } } } impl From<io::Error> for AnyDelimiterCodecError { fn from(e: io::Error) -> AnyDelimiterCodecError { AnyDelimiterCodecError::Io(e) } } impl std::error::Error for AnyDelimiterCodecError {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/codec/framed_impl.rs
tokio-util/src/codec/framed_impl.rs
use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; use futures_core::Stream; use tokio::io::{AsyncRead, AsyncWrite}; use bytes::BytesMut; use futures_sink::Sink; use pin_project_lite::pin_project; use std::borrow::{Borrow, BorrowMut}; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; pin_project! { #[derive(Debug)] pub(crate) struct FramedImpl<T, U, State> { #[pin] pub(crate) inner: T, pub(crate) state: State, pub(crate) codec: U, } } const INITIAL_CAPACITY: usize = 8 * 1024; #[derive(Debug)] pub(crate) struct ReadFrame { pub(crate) eof: bool, pub(crate) is_readable: bool, pub(crate) buffer: BytesMut, pub(crate) has_errored: bool, } pub(crate) struct WriteFrame { pub(crate) buffer: BytesMut, pub(crate) backpressure_boundary: usize, } #[derive(Default)] pub(crate) struct RWFrames { pub(crate) read: ReadFrame, pub(crate) write: WriteFrame, } impl Default for ReadFrame { fn default() -> Self { Self { eof: false, is_readable: false, buffer: BytesMut::with_capacity(INITIAL_CAPACITY), has_errored: false, } } } impl Default for WriteFrame { fn default() -> Self { Self { buffer: BytesMut::with_capacity(INITIAL_CAPACITY), backpressure_boundary: INITIAL_CAPACITY, } } } impl From<BytesMut> for ReadFrame { fn from(mut buffer: BytesMut) -> Self { let size = buffer.capacity(); if size < INITIAL_CAPACITY { buffer.reserve(INITIAL_CAPACITY - size); } Self { buffer, is_readable: size > 0, eof: false, has_errored: false, } } } impl From<BytesMut> for WriteFrame { fn from(mut buffer: BytesMut) -> Self { let size = buffer.capacity(); if size < INITIAL_CAPACITY { buffer.reserve(INITIAL_CAPACITY - size); } Self { buffer, backpressure_boundary: INITIAL_CAPACITY, } } } impl Borrow<ReadFrame> for RWFrames { fn borrow(&self) -> &ReadFrame { &self.read } } impl BorrowMut<ReadFrame> for RWFrames { fn borrow_mut(&mut self) -> &mut ReadFrame { &mut self.read } } impl Borrow<WriteFrame> for RWFrames { fn borrow(&self) -> &WriteFrame { &self.write } } impl BorrowMut<WriteFrame> for RWFrames { fn borrow_mut(&mut self) -> &mut WriteFrame { &mut self.write } } impl<T, U, R> Stream for FramedImpl<T, U, R> where T: AsyncRead, U: Decoder, R: BorrowMut<ReadFrame>, { type Item = Result<U::Item, U::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { use crate::util::poll_read_buf; let mut pinned = self.project(); let state: &mut ReadFrame = pinned.state.borrow_mut(); // The following loops implements a state machine with each state corresponding // to a combination of the `is_readable` and `eof` flags. States persist across // loop entries and most state transitions occur with a return. // // The initial state is `reading`. // // | state | eof | is_readable | has_errored | // |---------|-------|-------------|-------------| // | reading | false | false | false | // | framing | false | true | false | // | pausing | true | true | false | // | paused | true | false | false | // | errored | <any> | <any> | true | // `decode_eof` returns Err // ┌────────────────────────────────────────────────────────┐ // `decode_eof` returns │ │ // `Ok(Some)` │ │ // ┌─────┐ │ `decode_eof` returns After returning │ // Read 0 bytes ├─────▼──┴┐ `Ok(None)` ┌────────┐ ◄───┐ `None` ┌───▼─────┐ // ┌────────────────►│ Pausing ├───────────────────────►│ Paused ├─┐ └───────────┤ Errored │ // │ └─────────┘ └─┬──▲───┘ │ └───▲───▲─┘ // Pending read │ │ │ │ │ │ // ┌──────┐ │ `decode` returns `Some` │ └─────┘ │ │ // │ │ │ ┌──────┐ │ Pending │ │ // │ ┌────▼──┴─┐ Read n>0 bytes ┌┴──────▼─┐ read n>0 bytes │ read │ │ // └─┤ Reading ├───────────────►│ Framing │◄────────────────────────┘ │ │ // └──┬─▲────┘ └─────┬──┬┘ │ │ // │ │ │ │ `decode` returns Err │ │ // │ └───decode` returns `None`──┘ └───────────────────────────────────────────────────────┘ │ // │ read returns Err │ // └────────────────────────────────────────────────────────────────────────────────────────────┘ loop { // Return `None` if we have encountered an error from the underlying decoder // See: https://github.com/tokio-rs/tokio/issues/3976 if state.has_errored { // preparing has_errored -> paused trace!("Returning None and setting paused"); state.is_readable = false; state.has_errored = false; return Poll::Ready(None); } // Repeatedly call `decode` or `decode_eof` while the buffer is "readable", // i.e. it _might_ contain data consumable as a frame or closing frame. // Both signal that there is no such data by returning `None`. // // If `decode` couldn't read a frame and the upstream source has returned eof, // `decode_eof` will attempt to decode the remaining bytes as closing frames. // // If the underlying AsyncRead is resumable, we may continue after an EOF, // but must finish emitting all of it's associated `decode_eof` frames. // Furthermore, we don't want to emit any `decode_eof` frames on retried // reads after an EOF unless we've actually read more data. if state.is_readable { // pausing or framing if state.eof { // pausing let frame = pinned.codec.decode_eof(&mut state.buffer).map_err(|err| { trace!("Got an error, going to errored state"); state.has_errored = true; err })?; if frame.is_none() { state.is_readable = false; // prepare pausing -> paused } // implicit pausing -> pausing or pausing -> paused return Poll::Ready(frame.map(Ok)); } // framing trace!("attempting to decode a frame"); if let Some(frame) = pinned.codec.decode(&mut state.buffer).map_err(|op| { trace!("Got an error, going to errored state"); state.has_errored = true; op })? { trace!("frame decoded from buffer"); // implicit framing -> framing return Poll::Ready(Some(Ok(frame))); } // framing -> reading state.is_readable = false; } // reading or paused // If we can't build a frame yet, try to read more data and try again. // Make sure we've got room for at least one byte to read to ensure // that we don't get a spurious 0 that looks like EOF. state.buffer.reserve(1); #[allow(clippy::blocks_in_conditions)] let bytect = match poll_read_buf(pinned.inner.as_mut(), cx, &mut state.buffer).map_err( |err| { trace!("Got an error, going to errored state"); state.has_errored = true; err }, )? { Poll::Ready(ct) => ct, // implicit reading -> reading or implicit paused -> paused Poll::Pending => return Poll::Pending, }; if bytect == 0 { if state.eof { // We're already at an EOF, and since we've reached this path // we're also not readable. This implies that we've already finished // our `decode_eof` handling, so we can simply return `None`. // implicit paused -> paused return Poll::Ready(None); } // prepare reading -> paused state.eof = true; } else { // prepare paused -> framing or noop reading -> framing state.eof = false; } // paused -> framing or reading -> framing or reading -> pausing state.is_readable = true; } } } impl<T, I, U, W> Sink<I> for FramedImpl<T, U, W> where T: AsyncWrite, U: Encoder<I>, U::Error: From<io::Error>, W: BorrowMut<WriteFrame>, { type Error = U::Error; fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { if self.state.borrow().buffer.len() >= self.state.borrow().backpressure_boundary { self.as_mut().poll_flush(cx) } else { Poll::Ready(Ok(())) } } fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { let pinned = self.project(); pinned .codec .encode(item, &mut pinned.state.borrow_mut().buffer)?; Ok(()) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { use crate::util::poll_write_buf; trace!("flushing framed transport"); let mut pinned = self.project(); while !pinned.state.borrow_mut().buffer.is_empty() { let WriteFrame { buffer, .. } = pinned.state.borrow_mut(); trace!(remaining = buffer.len(), "writing;"); let n = ready!(poll_write_buf(pinned.inner.as_mut(), cx, buffer))?; if n == 0 { return Poll::Ready(Err(io::Error::new( io::ErrorKind::WriteZero, "failed to \ write frame to transport", ) .into())); } } // Try flushing the underlying IO ready!(pinned.inner.poll_flush(cx))?; trace!("framed transport flushed"); Poll::Ready(Ok(())) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ready!(self.as_mut().poll_flush(cx))?; ready!(self.project().inner.poll_shutdown(cx))?; Poll::Ready(Ok(())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/util/mod.rs
tokio-util/src/util/mod.rs
mod maybe_dangling; #[cfg(any(feature = "io", feature = "codec"))] mod poll_buf; pub(crate) use maybe_dangling::MaybeDangling; #[cfg(any(feature = "io", feature = "codec"))] #[cfg_attr(not(feature = "io"), allow(unreachable_pub))] pub use poll_buf::{poll_read_buf, poll_write_buf}; cfg_rt! { #[cfg_attr(not(feature = "io"), allow(unused))] pub(crate) use tokio::task::coop::poll_proceed; } cfg_not_rt! { #[cfg_attr(not(feature = "io"), allow(unused))] use std::task::{Context, Poll}; #[cfg_attr(not(feature = "io"), allow(unused))] pub(crate) struct RestoreOnPending; #[cfg_attr(not(feature = "io"), allow(unused))] impl RestoreOnPending { pub(crate) fn made_progress(&self) {} } #[cfg_attr(not(feature = "io"), allow(unused))] pub(crate) fn poll_proceed(_cx: &mut Context<'_>) -> Poll<RestoreOnPending> { Poll::Ready(RestoreOnPending) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/util/poll_buf.rs
tokio-util/src/util/poll_buf.rs
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use bytes::{Buf, BufMut}; use std::io::{self, IoSlice}; use std::pin::Pin; use std::task::{ready, Context, Poll}; /// Try to read data from an `AsyncRead` into an implementer of the [`BufMut`] trait. /// /// [`BufMut`]: bytes::Buf /// /// # Example /// /// ``` /// use bytes::{Bytes, BytesMut}; /// use tokio_stream as stream; /// use tokio::io::Result; /// use tokio_util::io::{StreamReader, poll_read_buf}; /// use std::future::poll_fn; /// use std::pin::Pin; /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> std::io::Result<()> { /// /// // Create a reader from an iterator. This particular reader will always be /// // ready. /// let mut read = StreamReader::new(stream::iter(vec![Result::Ok(Bytes::from_static(&[0, 1, 2, 3]))])); /// /// let mut buf = BytesMut::new(); /// let mut reads = 0; /// /// loop { /// reads += 1; /// let n = poll_fn(|cx| poll_read_buf(Pin::new(&mut read), cx, &mut buf)).await?; /// /// if n == 0 { /// break; /// } /// } /// /// // one or more reads might be necessary. /// assert!(reads >= 1); /// assert_eq!(&buf[..], &[0, 1, 2, 3]); /// # Ok(()) /// # } /// ``` #[cfg_attr(not(feature = "io"), allow(unreachable_pub))] pub fn poll_read_buf<T: AsyncRead + ?Sized, B: BufMut>( io: Pin<&mut T>, cx: &mut Context<'_>, buf: &mut B, ) -> Poll<io::Result<usize>> { if !buf.has_remaining_mut() { return Poll::Ready(Ok(0)); } let n = { let dst = buf.chunk_mut(); // Safety: `chunk_mut()` returns a `&mut UninitSlice`, and `UninitSlice` is a // transparent wrapper around `[MaybeUninit<u8>]`. let dst = unsafe { dst.as_uninit_slice_mut() }; let mut buf = ReadBuf::uninit(dst); let ptr = buf.filled().as_ptr(); ready!(io.poll_read(cx, &mut buf)?); // Ensure the pointer does not change from under us assert_eq!(ptr, buf.filled().as_ptr()); buf.filled().len() }; // Safety: This is guaranteed to be the number of initialized (and read) // bytes due to the invariants provided by `ReadBuf::filled`. unsafe { buf.advance_mut(n); } Poll::Ready(Ok(n)) } /// Try to write data from an implementer of the [`Buf`] trait to an /// [`AsyncWrite`], advancing the buffer's internal cursor. /// /// This function will use [vectored writes] when the [`AsyncWrite`] supports /// vectored writes. /// /// # Examples /// /// [`File`] implements [`AsyncWrite`] and [`Cursor<&[u8]>`] implements /// [`Buf`]: /// /// ```no_run /// use tokio_util::io::poll_write_buf; /// use tokio::io; /// use tokio::fs::File; /// /// use bytes::Buf; /// use std::future::poll_fn; /// use std::io::Cursor; /// use std::pin::Pin; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut file = File::create("foo.txt").await?; /// let mut buf = Cursor::new(b"data to write"); /// /// // Loop until the entire contents of the buffer are written to /// // the file. /// while buf.has_remaining() { /// poll_fn(|cx| poll_write_buf(Pin::new(&mut file), cx, &mut buf)).await?; /// } /// /// Ok(()) /// } /// ``` /// /// [`Buf`]: bytes::Buf /// [`AsyncWrite`]: tokio::io::AsyncWrite /// [`File`]: tokio::fs::File /// [vectored writes]: tokio::io::AsyncWrite::poll_write_vectored #[cfg_attr(not(feature = "io"), allow(unreachable_pub))] pub fn poll_write_buf<T: AsyncWrite + ?Sized, B: Buf>( io: Pin<&mut T>, cx: &mut Context<'_>, buf: &mut B, ) -> Poll<io::Result<usize>> { const MAX_BUFS: usize = 64; if !buf.has_remaining() { return Poll::Ready(Ok(0)); } let n = if io.is_write_vectored() { let mut slices = [IoSlice::new(&[]); MAX_BUFS]; let cnt = buf.chunks_vectored(&mut slices); ready!(io.poll_write_vectored(cx, &slices[..cnt]))? } else { ready!(io.poll_write(cx, buf.chunk()))? }; buf.advance(n); Poll::Ready(Ok(n)) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/util/maybe_dangling.rs
tokio-util/src/util/maybe_dangling.rs
use core::future::Future; use core::mem::MaybeUninit; use core::pin::Pin; use core::task::{Context, Poll}; /// A wrapper type that tells the compiler that the contents might not be valid. /// /// This is necessary mainly when `T` contains a reference. In that case, the /// compiler will sometimes assume that the reference is always valid; in some /// cases it will assume this even after the destructor of `T` runs. For /// example, when a reference is used as a function argument, then the compiler /// will assume that the reference is valid until the function returns, even if /// the reference is destroyed during the function. When the reference is used /// as part of a self-referential struct, that assumption can be false. Wrapping /// the reference in this type prevents the compiler from making that /// assumption. /// /// # Invariants /// /// The `MaybeUninit` will always contain a valid value until the destructor runs. // // Reference // See <https://users.rust-lang.org/t/unsafe-code-review-semi-owning-weak-rwlock-t-guard/95706> // // TODO: replace this with an official solution once RFC #3336 or similar is available. // <https://github.com/rust-lang/rfcs/pull/3336> #[repr(transparent)] pub(crate) struct MaybeDangling<T>(MaybeUninit<T>); impl<T> Drop for MaybeDangling<T> { fn drop(&mut self) { // Safety: `0` is always initialized. unsafe { core::ptr::drop_in_place(self.0.as_mut_ptr()) }; } } impl<T> MaybeDangling<T> { pub(crate) fn new(inner: T) -> Self { Self(MaybeUninit::new(inner)) } } impl<F: Future> Future for MaybeDangling<F> { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // Safety: `0` is always initialized. let fut = unsafe { self.map_unchecked_mut(|this| this.0.assume_init_mut()) }; fut.poll(cx) } } #[test] fn maybedangling_runs_drop() { struct SetOnDrop<'a>(&'a mut bool); impl Drop for SetOnDrop<'_> { fn drop(&mut self) { *self.0 = true; } } let mut success = false; drop(MaybeDangling::new(SetOnDrop(&mut success))); assert!(success); }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/udp/mod.rs
tokio-util/src/udp/mod.rs
#![cfg(not(loom))] //! UDP framing mod frame; pub use frame::UdpFramed;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/udp/frame.rs
tokio-util/src/udp/frame.rs
use crate::codec::{Decoder, Encoder}; use futures_core::Stream; use tokio::{io::ReadBuf, net::UdpSocket}; use bytes::{BufMut, BytesMut}; use futures_sink::Sink; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; use std::{ borrow::Borrow, net::{Ipv4Addr, SocketAddr, SocketAddrV4}, }; /// A unified [`Stream`] and [`Sink`] interface to an underlying `UdpSocket`, using /// the `Encoder` and `Decoder` traits to encode and decode frames. /// /// Raw UDP sockets work with datagrams, but higher-level code usually wants to /// batch these into meaningful chunks, called "frames". This method layers /// framing on top of this socket by using the `Encoder` and `Decoder` traits to /// handle encoding and decoding of messages frames. Note that the incoming and /// outgoing frame types may be distinct. /// /// This function returns a *single* object that is both [`Stream`] and [`Sink`]; /// grouping this into a single object is often useful for layering things which /// require both read and write access to the underlying object. /// /// If you want to work more directly with the streams and sink, consider /// calling [`split`] on the `UdpFramed` returned by this method, which will break /// them into separate objects, allowing them to interact more easily. /// /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split #[must_use = "sinks do nothing unless polled"] #[derive(Debug)] pub struct UdpFramed<C, T = UdpSocket> { socket: T, codec: C, rd: BytesMut, wr: BytesMut, out_addr: SocketAddr, flushed: bool, is_readable: bool, current_addr: Option<SocketAddr>, } const INITIAL_RD_CAPACITY: usize = 64 * 1024; const INITIAL_WR_CAPACITY: usize = 8 * 1024; impl<C, T> Unpin for UdpFramed<C, T> {} impl<C, T> Stream for UdpFramed<C, T> where T: Borrow<UdpSocket>, C: Decoder, { type Item = Result<(C::Item, SocketAddr), C::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let pin = self.get_mut(); pin.rd.reserve(INITIAL_RD_CAPACITY); loop { // Are there still bytes left in the read buffer to decode? if pin.is_readable { if let Some(frame) = pin.codec.decode_eof(&mut pin.rd)? { let current_addr = pin .current_addr .expect("will always be set before this line is called"); return Poll::Ready(Some(Ok((frame, current_addr)))); } // if this line has been reached then decode has returned `None`. pin.is_readable = false; pin.rd.clear(); } // We're out of data. Try and fetch more data to decode let addr = { // Safety: `chunk_mut()` returns a `&mut UninitSlice`, and `UninitSlice` is a // transparent wrapper around `[MaybeUninit<u8>]`. let buf = unsafe { pin.rd.chunk_mut().as_uninit_slice_mut() }; let mut read = ReadBuf::uninit(buf); let ptr = read.filled().as_ptr(); let res = ready!(pin.socket.borrow().poll_recv_from(cx, &mut read)); assert_eq!(ptr, read.filled().as_ptr()); let addr = res?; let filled = read.filled().len(); // Safety: This is guaranteed to be the number of initialized (and read) bytes due // to the invariants provided by `ReadBuf::filled`. unsafe { pin.rd.advance_mut(filled) }; addr }; pin.current_addr = Some(addr); pin.is_readable = true; } } } impl<I, C, T> Sink<(I, SocketAddr)> for UdpFramed<C, T> where T: Borrow<UdpSocket>, C: Encoder<I>, { type Error = C::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { if !self.flushed { match self.poll_flush(cx)? { Poll::Ready(()) => {} Poll::Pending => return Poll::Pending, } } Poll::Ready(Ok(())) } fn start_send(self: Pin<&mut Self>, item: (I, SocketAddr)) -> Result<(), Self::Error> { let (frame, out_addr) = item; let pin = self.get_mut(); pin.codec.encode(frame, &mut pin.wr)?; pin.out_addr = out_addr; pin.flushed = false; Ok(()) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { if self.flushed { return Poll::Ready(Ok(())); } let Self { ref socket, ref mut out_addr, ref mut wr, .. } = *self; let n = ready!(socket.borrow().poll_send_to(cx, wr, *out_addr))?; let wrote_all = n == self.wr.len(); self.wr.clear(); self.flushed = true; let res = if wrote_all { Ok(()) } else { Err(io::Error::new( io::ErrorKind::Other, "failed to write entire datagram to socket", ) .into()) }; Poll::Ready(res) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ready!(self.poll_flush(cx))?; Poll::Ready(Ok(())) } } impl<C, T> UdpFramed<C, T> where T: Borrow<UdpSocket>, { /// Create a new `UdpFramed` backed by the given socket and codec. /// /// See struct level documentation for more details. pub fn new(socket: T, codec: C) -> UdpFramed<C, T> { Self { socket, codec, out_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)), rd: BytesMut::with_capacity(INITIAL_RD_CAPACITY), wr: BytesMut::with_capacity(INITIAL_WR_CAPACITY), flushed: true, is_readable: false, current_addr: None, } } /// Returns a reference to the underlying I/O stream wrapped by `Framed`. /// /// # Note /// /// Care should be taken to not tamper with the underlying stream of data /// coming in as it may corrupt the stream of frames otherwise being worked /// with. pub fn get_ref(&self) -> &T { &self.socket } /// Returns a mutable reference to the underlying I/O stream wrapped by `Framed`. /// /// # Note /// /// Care should be taken to not tamper with the underlying stream of data /// coming in as it may corrupt the stream of frames otherwise being worked /// with. pub fn get_mut(&mut self) -> &mut T { &mut self.socket } /// Returns a reference to the underlying codec wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn codec(&self) -> &C { &self.codec } /// Returns a mutable reference to the underlying codec wrapped by /// `UdpFramed`. /// /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn codec_mut(&mut self) -> &mut C { &mut self.codec } /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { &self.rd } /// Returns a mutable reference to the read buffer. pub fn read_buffer_mut(&mut self) -> &mut BytesMut { &mut self.rd } /// Consumes the `Framed`, returning its underlying I/O stream. pub fn into_inner(self) -> T { self.socket } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/time/mod.rs
tokio-util/src/time/mod.rs
//! Additional utilities for tracking time. //! //! This module provides additional utilities for executing code after a set period //! of time. Currently there is only one: //! //! * `DelayQueue`: A queue where items are returned once the requested delay //! has expired. //! //! This type must be used from within the context of the `Runtime`. use std::time::Duration; mod wheel; pub mod delay_queue; // re-export `FutureExt` to avoid breaking change #[doc(inline)] pub use crate::future::FutureExt; #[doc(inline)] pub use delay_queue::DelayQueue; // ===== Internal utils ===== enum Round { Up, Down, } /// Convert a `Duration` to milliseconds, rounding up and saturating at /// `u64::MAX`. /// /// The saturating is fine because `u64::MAX` milliseconds are still many /// million years. #[inline] fn ms(duration: Duration, round: Round) -> u64 { const NANOS_PER_MILLI: u32 = 1_000_000; const MILLIS_PER_SEC: u64 = 1_000; // Round up. let millis = match round { Round::Up => (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI, Round::Down => duration.subsec_millis(), }; duration .as_secs() .saturating_mul(MILLIS_PER_SEC) .saturating_add(u64::from(millis)) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/time/delay_queue.rs
tokio-util/src/time/delay_queue.rs
//! A queue of delayed elements. //! //! See [`DelayQueue`] for more details. //! //! [`DelayQueue`]: struct@DelayQueue use crate::time::wheel::{self, Wheel}; use tokio::time::{sleep_until, Duration, Instant, Sleep}; use core::ops::{Index, IndexMut}; use slab::Slab; use std::cmp; use std::collections::HashMap; use std::convert::From; use std::fmt; use std::fmt::Debug; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; use std::task::{self, ready, Poll, Waker}; /// A queue of delayed elements. /// /// Once an element is inserted into the `DelayQueue`, it is yielded once the /// specified deadline has been reached. /// /// # Usage /// /// Elements are inserted into `DelayQueue` using the [`insert`] or /// [`insert_at`] methods. A deadline is provided with the item and a [`Key`] is /// returned. The key is used to remove the entry or to change the deadline at /// which it should be yielded back. /// /// Once delays have been configured, the `DelayQueue` is used via its /// [`Stream`] implementation. [`poll_expired`] is called. If an entry has reached its /// deadline, it is returned. If not, `Poll::Pending` is returned indicating that the /// current task will be notified once the deadline has been reached. /// /// # `Stream` implementation /// /// Items are retrieved from the queue via [`DelayQueue::poll_expired`]. If no delays have /// expired, no items are returned. In this case, [`Poll::Pending`] is returned and the /// current task is registered to be notified once the next item's delay has /// expired. /// /// If no items are in the queue, i.e. `is_empty()` returns `true`, then `poll` /// returns `Poll::Ready(None)`. This indicates that the stream has reached an end. /// However, if a new item is inserted *after*, `poll` will once again start /// returning items or `Poll::Pending`. /// /// Items are returned ordered by their expirations. Items that are configured /// to expire first will be returned first. There are no ordering guarantees /// for items configured to expire at the same instant. Also note that delays are /// rounded to the closest millisecond. /// /// # Implementation /// /// The [`DelayQueue`] is backed by a separate instance of a timer wheel similar to that used internally /// by Tokio's standalone timer utilities such as [`sleep`]. Because of this, it offers the same /// performance and scalability benefits. /// /// State associated with each entry is stored in a [`slab`]. This amortizes the cost of allocation, /// and allows reuse of the memory allocated for expired entries. /// /// Capacity can be checked using [`capacity`] and allocated preemptively by using /// the [`reserve`] method. /// /// # Cancellation safety /// /// [`DelayQueue`]'s implementation of [`StreamExt::next`] is cancellation safe. /// /// # Usage /// /// Using [`DelayQueue`] to manage cache entries. /// /// ```rust,no_run /// use tokio_util::time::{DelayQueue, delay_queue}; /// /// use std::collections::HashMap; /// use std::task::{ready, Context, Poll}; /// use std::time::Duration; /// # type CacheKey = String; /// # type Value = String; /// /// struct Cache { /// entries: HashMap<CacheKey, (Value, delay_queue::Key)>, /// expirations: DelayQueue<CacheKey>, /// } /// /// const TTL_SECS: u64 = 30; /// /// impl Cache { /// fn insert(&mut self, key: CacheKey, value: Value) { /// let delay = self.expirations /// .insert(key.clone(), Duration::from_secs(TTL_SECS)); /// /// self.entries.insert(key, (value, delay)); /// } /// /// fn get(&self, key: &CacheKey) -> Option<&Value> { /// self.entries.get(key) /// .map(|&(ref v, _)| v) /// } /// /// fn remove(&mut self, key: &CacheKey) { /// if let Some((_, cache_key)) = self.entries.remove(key) { /// self.expirations.remove(&cache_key); /// } /// } /// /// fn poll_purge(&mut self, cx: &mut Context<'_>) -> Poll<()> { /// while let Some(entry) = ready!(self.expirations.poll_expired(cx)) { /// self.entries.remove(entry.get_ref()); /// } /// /// Poll::Ready(()) /// } /// } /// ``` /// /// [`insert`]: method@Self::insert /// [`insert_at`]: method@Self::insert_at /// [`Key`]: struct@Key /// [`Stream`]: https://docs.rs/futures/0.3.31/futures/stream/trait.Stream.html /// [`StreamExt::next`]: https://docs.rs/tokio-stream/0.1.17/tokio_stream/trait.StreamExt.html#method.next /// [`poll_expired`]: method@Self::poll_expired /// [`Stream::poll_expired`]: method@Self::poll_expired /// [`DelayQueue`]: struct@DelayQueue /// [`sleep`]: fn@tokio::time::sleep /// [`slab`]: slab /// [`capacity`]: method@Self::capacity /// [`reserve`]: method@Self::reserve #[derive(Debug)] pub struct DelayQueue<T> { /// Stores data associated with entries slab: SlabStorage<T>, /// Lookup structure tracking all delays in the queue wheel: Wheel<Stack<T>>, /// Delays that were inserted when already expired. These cannot be stored /// in the wheel expired: Stack<T>, /// Delay expiring when the *first* item in the queue expires delay: Option<Pin<Box<Sleep>>>, /// Wheel polling state wheel_now: u64, /// Instant at which the timer starts start: Instant, /// Waker that is invoked when we potentially need to reset the timer. /// Because we lazily create the timer when the first entry is created, we /// need to awaken any poller that polled us before that point. waker: Option<Waker>, } #[derive(Default)] struct SlabStorage<T> { inner: Slab<Data<T>>, // A `compact` call requires a re-mapping of the `Key`s that were changed // during the `compact` call of the `slab`. Since the keys that were given out // cannot be changed retroactively we need to keep track of these re-mappings. // The keys of `key_map` correspond to the old keys that were given out and // the values to the `Key`s that were re-mapped by the `compact` call. key_map: HashMap<Key, KeyInternal>, // Index used to create new keys to hand out. next_key_index: usize, // Whether `compact` has been called, necessary in order to decide whether // to include keys in `key_map`. compact_called: bool, } impl<T> SlabStorage<T> { pub(crate) fn with_capacity(capacity: usize) -> SlabStorage<T> { SlabStorage { inner: Slab::with_capacity(capacity), key_map: HashMap::new(), next_key_index: 0, compact_called: false, } } // Inserts data into the inner slab and re-maps keys if necessary pub(crate) fn insert(&mut self, val: Data<T>) -> Key { let mut key = KeyInternal::new(self.inner.insert(val)); let key_contained = self.key_map.contains_key(&key.into()); if key_contained { // It's possible that a `compact` call creates capacity in `self.inner` in // such a way that a `self.inner.insert` call creates a `key` which was // previously given out during an `insert` call prior to the `compact` call. // If `key` is contained in `self.key_map`, we have encountered this exact situation, // We need to create a new key `key_to_give_out` and include the relation // `key_to_give_out` -> `key` in `self.key_map`. let key_to_give_out = self.create_new_key(); assert!(!self.key_map.contains_key(&key_to_give_out.into())); self.key_map.insert(key_to_give_out.into(), key); key = key_to_give_out; } else if self.compact_called { // Include an identity mapping in `self.key_map` in order to allow us to // panic if a key that was handed out is removed more than once. self.key_map.insert(key.into(), key); } key.into() } // Re-map the key in case compact was previously called. // Note: Since we include identity mappings in key_map after compact was called, // we have information about all keys that were handed out. In the case in which // compact was called and we try to remove a Key that was previously removed // we can detect invalid keys if no key is found in `key_map`. This is necessary // in order to prevent situations in which a previously removed key // corresponds to a re-mapped key internally and which would then be incorrectly // removed from the slab. // // Example to illuminate this problem: // // Let's assume our `key_map` is {1 -> 2, 2 -> 1} and we call remove(1). If we // were to remove 1 again, we would not find it inside `key_map` anymore. // If we were to imply from this that no re-mapping was necessary, we would // incorrectly remove 1 from `self.slab.inner`, which corresponds to the // handed-out key 2. pub(crate) fn remove(&mut self, key: &Key) -> Data<T> { let remapped_key = if self.compact_called { match self.key_map.remove(key) { Some(key_internal) => key_internal, None => panic!("invalid key"), } } else { (*key).into() }; self.inner.remove(remapped_key.index) } pub(crate) fn shrink_to_fit(&mut self) { self.inner.shrink_to_fit(); self.key_map.shrink_to_fit(); } pub(crate) fn compact(&mut self) { if !self.compact_called { for (key, _) in self.inner.iter() { self.key_map.insert(Key::new(key), KeyInternal::new(key)); } } let mut remapping = HashMap::new(); self.inner.compact(|_, from, to| { remapping.insert(from, to); true }); // At this point `key_map` contains a mapping for every element. for internal_key in self.key_map.values_mut() { if let Some(new_internal_key) = remapping.get(&internal_key.index) { *internal_key = KeyInternal::new(*new_internal_key); } } if self.key_map.capacity() > 2 * self.key_map.len() { self.key_map.shrink_to_fit(); } self.compact_called = true; } // Tries to re-map a `Key` that was given out to the user to its // corresponding internal key. fn remap_key(&self, key: &Key) -> Option<KeyInternal> { let key_map = &self.key_map; if self.compact_called { key_map.get(key).copied() } else { Some((*key).into()) } } fn create_new_key(&mut self) -> KeyInternal { while self.key_map.contains_key(&Key::new(self.next_key_index)) { self.next_key_index = self.next_key_index.wrapping_add(1); } KeyInternal::new(self.next_key_index) } pub(crate) fn len(&self) -> usize { self.inner.len() } pub(crate) fn capacity(&self) -> usize { self.inner.capacity() } pub(crate) fn clear(&mut self) { self.inner.clear(); self.key_map.clear(); self.compact_called = false; } pub(crate) fn reserve(&mut self, additional: usize) { self.inner.reserve(additional); if self.compact_called { self.key_map.reserve(additional); } } pub(crate) fn is_empty(&self) -> bool { self.inner.is_empty() } pub(crate) fn contains(&self, key: &Key) -> bool { let remapped_key = self.remap_key(key); match remapped_key { Some(internal_key) => self.inner.contains(internal_key.index), None => false, } } } impl<T> fmt::Debug for SlabStorage<T> where T: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { if fmt.alternate() { fmt.debug_map().entries(self.inner.iter()).finish() } else { fmt.debug_struct("Slab") .field("len", &self.len()) .field("cap", &self.capacity()) .finish() } } } impl<T> Index<Key> for SlabStorage<T> { type Output = Data<T>; fn index(&self, key: Key) -> &Self::Output { let remapped_key = self.remap_key(&key); match remapped_key { Some(internal_key) => &self.inner[internal_key.index], None => panic!("Invalid index {}", key.index), } } } impl<T> IndexMut<Key> for SlabStorage<T> { fn index_mut(&mut self, key: Key) -> &mut Data<T> { let remapped_key = self.remap_key(&key); match remapped_key { Some(internal_key) => &mut self.inner[internal_key.index], None => panic!("Invalid index {}", key.index), } } } /// An entry in `DelayQueue` that has expired and been removed. /// /// Values are returned by [`DelayQueue::poll_expired`]. /// /// [`DelayQueue::poll_expired`]: method@DelayQueue::poll_expired #[derive(Debug)] pub struct Expired<T> { /// The data stored in the queue data: T, /// The expiration time deadline: Instant, /// The key associated with the entry key: Key, } /// Token to a value stored in a `DelayQueue`. /// /// Instances of `Key` are returned by [`DelayQueue::insert`]. See [`DelayQueue`] /// documentation for more details. /// /// [`DelayQueue`]: struct@DelayQueue /// [`DelayQueue::insert`]: method@DelayQueue::insert #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Key { index: usize, } // Whereas `Key` is given out to users that use `DelayQueue`, internally we use // `KeyInternal` as the key type in order to make the logic of mapping between keys // as a result of `compact` calls clearer. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] struct KeyInternal { index: usize, } #[derive(Debug)] struct Stack<T> { /// Head of the stack head: Option<Key>, _p: PhantomData<fn() -> T>, } #[derive(Debug)] struct Data<T> { /// The data being stored in the queue and will be returned at the requested /// instant. inner: T, /// The instant at which the item is returned. when: u64, /// Set to true when stored in the `expired` queue expired: bool, /// Next entry in the stack next: Option<Key>, /// Previous entry in the stack prev: Option<Key>, } /// Maximum number of entries the queue can handle const MAX_ENTRIES: usize = (1 << 30) - 1; impl<T> DelayQueue<T> { /// Creates a new, empty, `DelayQueue`. /// /// The queue will not allocate storage until items are inserted into it. /// /// # Examples /// /// ```rust /// # use tokio_util::time::DelayQueue; /// let delay_queue: DelayQueue<u32> = DelayQueue::new(); /// ``` pub fn new() -> DelayQueue<T> { DelayQueue::with_capacity(0) } /// Creates a new, empty, `DelayQueue` with the specified capacity. /// /// The queue will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the queue will not allocate for /// storage. /// /// # Examples /// /// ```rust /// # use tokio_util::time::DelayQueue; /// # use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::with_capacity(10); /// /// // These insertions are done without further allocation /// for i in 0..10 { /// delay_queue.insert(i, Duration::from_secs(i)); /// } /// /// // This will make the queue allocate additional storage /// delay_queue.insert(11, Duration::from_secs(11)); /// # } /// ``` pub fn with_capacity(capacity: usize) -> DelayQueue<T> { DelayQueue { wheel: Wheel::new(), slab: SlabStorage::with_capacity(capacity), expired: Stack::default(), delay: None, wheel_now: 0, start: Instant::now(), waker: None, } } /// Inserts `value` into the queue set to expire at a specific instant in /// time. /// /// This function is identical to `insert`, but takes an `Instant` instead /// of a `Duration`. /// /// `value` is stored in the queue until `when` is reached. At which point, /// `value` will be returned from [`poll_expired`]. If `when` has already been /// reached, then `value` is immediately made available to poll. /// /// The return value represents the insertion and is used as an argument to /// [`remove`] and [`reset`]. Note that [`Key`] is a token and is reused once /// `value` is removed from the queue either by calling [`poll_expired`] after /// `when` is reached or by calling [`remove`]. At this point, the caller /// must take care to not use the returned [`Key`] again as it may reference /// a different item in the queue. /// /// See [type] level documentation for more details. /// /// # Panics /// /// This function panics if `when` is too far in the future. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio::time::{Duration, Instant}; /// use tokio_util::time::DelayQueue; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::new(); /// let key = delay_queue.insert_at( /// "foo", Instant::now() + Duration::from_secs(5)); /// /// // Remove the entry /// let item = delay_queue.remove(&key); /// assert_eq!(*item.get_ref(), "foo"); /// # } /// ``` /// /// [`poll_expired`]: method@Self::poll_expired /// [`remove`]: method@Self::remove /// [`reset`]: method@Self::reset /// [`Key`]: struct@Key /// [type]: # #[track_caller] pub fn insert_at(&mut self, value: T, when: Instant) -> Key { assert!(self.slab.len() < MAX_ENTRIES, "max entries exceeded"); // Normalize the deadline. Values cannot be set to expire in the past. let when = self.normalize_deadline(when); // Insert the value in the store let key = self.slab.insert(Data { inner: value, when, expired: false, next: None, prev: None, }); self.insert_idx(when, key); // Set a new delay if the current's deadline is later than the one of the new item let should_set_delay = if let Some(ref delay) = self.delay { let current_exp = self.normalize_deadline(delay.deadline()); current_exp > when } else { true }; if should_set_delay { if let Some(waker) = self.waker.take() { waker.wake(); } let delay_time = self.start + Duration::from_millis(when); if let Some(ref mut delay) = &mut self.delay { delay.as_mut().reset(delay_time); } else { self.delay = Some(Box::pin(sleep_until(delay_time))); } } key } /// Attempts to pull out the next value of the delay queue, registering the /// current task for wakeup if the value is not yet available, and returning /// `None` if the queue is exhausted. pub fn poll_expired(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<Expired<T>>> { if !self .waker .as_ref() .map(|w| w.will_wake(cx.waker())) .unwrap_or(false) { self.waker = Some(cx.waker().clone()); } let item = ready!(self.poll_idx(cx)); Poll::Ready(item.map(|key| { let data = self.slab.remove(&key); debug_assert!(data.next.is_none()); debug_assert!(data.prev.is_none()); Expired { key, data: data.inner, deadline: self.start + Duration::from_millis(data.when), } })) } /// Inserts `value` into the queue set to expire after the requested duration /// elapses. /// /// This function is identical to `insert_at`, but takes a `Duration` /// instead of an `Instant`. /// /// `value` is stored in the queue until `timeout` duration has /// elapsed after `insert` was called. At that point, `value` will /// be returned from [`poll_expired`]. If `timeout` is a `Duration` of /// zero, then `value` is immediately made available to poll. /// /// The return value represents the insertion and is used as an /// argument to [`remove`] and [`reset`]. Note that [`Key`] is a /// token and is reused once `value` is removed from the queue /// either by calling [`poll_expired`] after `timeout` has elapsed /// or by calling [`remove`]. At this point, the caller must not /// use the returned [`Key`] again as it may reference a different /// item in the queue. /// /// See [type] level documentation for more details. /// /// # Panics /// /// This function panics if `timeout` is greater than the maximum /// duration supported by the timer in the current `Runtime`. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::new(); /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// /// // Remove the entry /// let item = delay_queue.remove(&key); /// assert_eq!(*item.get_ref(), "foo"); /// # } /// ``` /// /// [`poll_expired`]: method@Self::poll_expired /// [`remove`]: method@Self::remove /// [`reset`]: method@Self::reset /// [`Key`]: struct@Key /// [type]: # #[track_caller] pub fn insert(&mut self, value: T, timeout: Duration) -> Key { self.insert_at(value, Instant::now() + timeout) } #[track_caller] fn insert_idx(&mut self, when: u64, key: Key) { use self::wheel::{InsertError, Stack}; // Register the deadline with the timer wheel match self.wheel.insert(when, key, &mut self.slab) { Ok(_) => {} Err((_, InsertError::Elapsed)) => { self.slab[key].expired = true; // The delay is already expired, store it in the expired queue self.expired.push(key, &mut self.slab); } Err((_, err)) => panic!("invalid deadline; err={err:?}"), } } /// Returns the deadline of the item associated with `key`. /// /// Since the queue operates at millisecond granularity, the returned /// deadline may not exactly match the value that was given when initially /// inserting the item into the queue. /// /// # Panics /// /// This function panics if `key` is not contained by the queue. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::new(); /// /// let key1 = delay_queue.insert("foo", Duration::from_secs(5)); /// let key2 = delay_queue.insert("bar", Duration::from_secs(10)); /// /// assert!(delay_queue.deadline(&key1) < delay_queue.deadline(&key2)); /// # } /// ``` #[track_caller] pub fn deadline(&self, key: &Key) -> Instant { self.start + Duration::from_millis(self.slab[*key].when) } /// Removes the key from the expired queue or the timer wheel /// depending on its expiration status. /// /// # Panics /// /// Panics if the key is not contained in the expired queue or the wheel. #[track_caller] fn remove_key(&mut self, key: &Key) { use crate::time::wheel::Stack; // Special case the `expired` queue if self.slab[*key].expired { self.expired.remove(key, &mut self.slab); } else { self.wheel.remove(key, &mut self.slab); } } /// Removes the item associated with `key` from the queue. /// /// There must be an item associated with `key`. The function returns the /// removed item as well as the `Instant` at which it will the delay will /// have expired. /// /// # Panics /// /// The function panics if `key` is not contained by the queue. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::new(); /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// /// // Remove the entry /// let item = delay_queue.remove(&key); /// assert_eq!(*item.get_ref(), "foo"); /// # } /// ``` #[track_caller] pub fn remove(&mut self, key: &Key) -> Expired<T> { let prev_deadline = self.next_deadline(); self.remove_key(key); let data = self.slab.remove(key); let next_deadline = self.next_deadline(); if prev_deadline != next_deadline { match (next_deadline, &mut self.delay) { (None, _) => self.delay = None, (Some(deadline), Some(delay)) => delay.as_mut().reset(deadline), (Some(deadline), None) => self.delay = Some(Box::pin(sleep_until(deadline))), } } if self.slab.is_empty() { if let Some(waker) = self.waker.take() { waker.wake(); } } Expired { key: Key::new(key.index), data: data.inner, deadline: self.start + Duration::from_millis(data.when), } } /// Attempts to remove the item associated with `key` from the queue. /// /// Removes the item associated with `key`, and returns it along with the /// `Instant` at which it would have expired, if it exists. /// /// Returns `None` if `key` is not in the queue. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::new(); /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// /// // The item is in the queue, `try_remove` returns `Some(Expired("foo"))`. /// let item = delay_queue.try_remove(&key); /// assert_eq!(item.unwrap().into_inner(), "foo"); /// /// // The item is not in the queue anymore, `try_remove` returns `None`. /// let item = delay_queue.try_remove(&key); /// assert!(item.is_none()); /// # } /// ``` pub fn try_remove(&mut self, key: &Key) -> Option<Expired<T>> { if self.slab.contains(key) { Some(self.remove(key)) } else { None } } /// Sets the delay of the item associated with `key` to expire at `when`. /// /// This function is identical to `reset` but takes an `Instant` instead of /// a `Duration`. /// /// The item remains in the queue but the delay is set to expire at `when`. /// If `when` is in the past, then the item is immediately made available to /// the caller. /// /// # Panics /// /// This function panics if `when` is too far in the future or if `key` is /// not contained by the queue. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio::time::{Duration, Instant}; /// use tokio_util::time::DelayQueue; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::new(); /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// /// // "foo" is scheduled to be returned in 5 seconds /// /// delay_queue.reset_at(&key, Instant::now() + Duration::from_secs(10)); /// /// // "foo" is now scheduled to be returned in 10 seconds /// # } /// ``` #[track_caller] pub fn reset_at(&mut self, key: &Key, when: Instant) { self.remove_key(key); // Normalize the deadline. Values cannot be set to expire in the past. let when = self.normalize_deadline(when); self.slab[*key].when = when; self.slab[*key].expired = false; self.insert_idx(when, *key); let next_deadline = self.next_deadline(); if let (Some(ref mut delay), Some(deadline)) = (&mut self.delay, next_deadline) { // This should awaken us if necessary (ie, if already expired) delay.as_mut().reset(deadline); } } /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation. /// This function is not guaranteed to, and in most cases, won't decrease the capacity of the slab /// to the number of elements still contained in it, because elements cannot be moved to a different /// index. To decrease the capacity to the size of the slab use [`compact`]. /// /// This function can take O(n) time even when the capacity cannot be reduced or the allocation is /// shrunk in place. Repeated calls run in O(1) though. /// /// [`compact`]: method@Self::compact pub fn shrink_to_fit(&mut self) { self.slab.shrink_to_fit(); } /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation, /// to the number of elements that are contained in it. /// /// This methods runs in O(n). /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::with_capacity(10); /// /// let key1 = delay_queue.insert(5, Duration::from_secs(5)); /// let key2 = delay_queue.insert(10, Duration::from_secs(10)); /// let key3 = delay_queue.insert(15, Duration::from_secs(15)); /// /// delay_queue.remove(&key2); /// /// delay_queue.compact(); /// assert_eq!(delay_queue.capacity(), 2); /// # } /// ``` pub fn compact(&mut self) { self.slab.compact(); } /// Gets the [`Key`] that [`poll_expired`] will pull out of the queue next, without /// pulling it out or waiting for the deadline to expire. /// /// Entries that have already expired may be returned in any order, but it is /// guaranteed that this method returns them in the same order as when items /// are popped from the `DelayQueue`. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut delay_queue = DelayQueue::new(); /// /// let key1 = delay_queue.insert("foo", Duration::from_secs(10)); /// let key2 = delay_queue.insert("bar", Duration::from_secs(5)); /// let key3 = delay_queue.insert("baz", Duration::from_secs(15)); /// /// assert_eq!(delay_queue.peek().unwrap(), key2); /// # } /// ``` /// /// [`Key`]: struct@Key /// [`poll_expired`]: method@Self::poll_expired pub fn peek(&self) -> Option<Key> { use self::wheel::Stack; self.expired.peek().or_else(|| self.wheel.peek()) } /// Returns the next time to poll as determined by the wheel. /// /// Note that this does not include deadlines in the `expired` queue. fn next_deadline(&self) -> Option<Instant> { self.wheel .poll_at() .map(|poll_at| self.start + Duration::from_millis(poll_at)) } /// Sets the delay of the item associated with `key` to expire after /// `timeout`. /// /// This function is identical to `reset_at` but takes a `Duration` instead /// of an `Instant`. /// /// The item remains in the queue but the delay is set to expire after /// `timeout`. If `timeout` is zero, then the item is immediately made /// available to the caller. /// /// # Panics /// /// This function panics if `timeout` is greater than the maximum supported /// duration or if `key` is not contained by the queue. /// /// # Examples /// /// Basic usage /// /// ```rust /// use tokio_util::time::DelayQueue;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
true
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/time/wheel/level.rs
tokio-util/src/time/wheel/level.rs
use crate::time::wheel::Stack; use std::fmt; /// Wheel for a single level in the timer. This wheel contains 64 slots. pub(crate) struct Level<T> { level: usize, /// Bit field tracking which slots currently contain entries. /// /// Using a bit field to track slots that contain entries allows avoiding a /// scan to find entries. This field is updated when entries are added or /// removed from a slot. /// /// The least-significant bit represents slot zero. occupied: u64, /// Slots slot: [T; LEVEL_MULT], } /// Indicates when a slot must be processed next. #[derive(Debug)] pub(crate) struct Expiration { /// The level containing the slot. pub(crate) level: usize, /// The slot index. pub(crate) slot: usize, /// The instant at which the slot needs to be processed. pub(crate) deadline: u64, } /// Level multiplier. /// /// Being a power of 2 is very important. const LEVEL_MULT: usize = 64; impl<T: Stack> Level<T> { pub(crate) fn new(level: usize) -> Level<T> { Level { level, occupied: 0, slot: std::array::from_fn(|_| T::default()), } } /// Finds the slot that needs to be processed next and returns the slot and /// `Instant` at which this slot must be processed. pub(crate) fn next_expiration(&self, now: u64) -> Option<Expiration> { // Use the `occupied` bit field to get the index of the next slot that // needs to be processed. let slot = self.next_occupied_slot(now)?; // From the slot index, calculate the `Instant` at which it needs to be // processed. This value *must* be in the future with respect to `now`. let level_range = level_range(self.level); let slot_range = slot_range(self.level); // TODO: This can probably be simplified w/ power of 2 math let level_start = now - (now % level_range); let mut deadline = level_start + slot as u64 * slot_range; if deadline < now { // A timer is in a slot "prior" to the current time. This can occur // because we do not have an infinite hierarchy of timer levels, and // eventually a timer scheduled for a very distant time might end up // being placed in a slot that is beyond the end of all of the // arrays. // // To deal with this, we first limit timers to being scheduled no // more than MAX_DURATION ticks in the future; that is, they're at // most one rotation of the top level away. Then, we force timers // that logically would go into the top+1 level, to instead go into // the top level's slots. // // What this means is that the top level's slots act as a // pseudo-ring buffer, and we rotate around them indefinitely. If we // compute a deadline before now, and it's the top level, it // therefore means we're actually looking at a slot in the future. debug_assert_eq!(self.level, super::NUM_LEVELS - 1); deadline += level_range; } debug_assert!( deadline >= now, "deadline={:016X}; now={:016X}; level={}; slot={}; occupied={:b}", deadline, now, self.level, slot, self.occupied ); Some(Expiration { level: self.level, slot, deadline, }) } fn next_occupied_slot(&self, now: u64) -> Option<usize> { if self.occupied == 0 { return None; } // Get the slot for now using Maths let now_slot = (now / slot_range(self.level)) as usize; let occupied = self.occupied.rotate_right(now_slot as u32); let zeros = occupied.trailing_zeros() as usize; let slot = (zeros + now_slot) % 64; Some(slot) } pub(crate) fn add_entry(&mut self, when: u64, item: T::Owned, store: &mut T::Store) { let slot = slot_for(when, self.level); self.slot[slot].push(item, store); self.occupied |= occupied_bit(slot); } pub(crate) fn remove_entry(&mut self, when: u64, item: &T::Borrowed, store: &mut T::Store) { let slot = slot_for(when, self.level); self.slot[slot].remove(item, store); if self.slot[slot].is_empty() { // The bit is currently set debug_assert!(self.occupied & occupied_bit(slot) != 0); // Unset the bit self.occupied ^= occupied_bit(slot); } } pub(crate) fn pop_entry_slot(&mut self, slot: usize, store: &mut T::Store) -> Option<T::Owned> { let ret = self.slot[slot].pop(store); if ret.is_some() && self.slot[slot].is_empty() { // The bit is currently set debug_assert!(self.occupied & occupied_bit(slot) != 0); self.occupied ^= occupied_bit(slot); } ret } pub(crate) fn peek_entry_slot(&self, slot: usize) -> Option<T::Owned> { self.slot[slot].peek() } } impl<T> fmt::Debug for Level<T> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Level") .field("occupied", &self.occupied) .finish() } } fn occupied_bit(slot: usize) -> u64 { 1 << slot } fn slot_range(level: usize) -> u64 { LEVEL_MULT.pow(level as u32) as u64 } fn level_range(level: usize) -> u64 { LEVEL_MULT as u64 * slot_range(level) } /// Convert a duration (milliseconds) and a level to a slot position fn slot_for(duration: u64, level: usize) -> usize { ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize } #[cfg(all(test, not(loom)))] mod test { use super::*; #[test] fn test_slot_for() { for pos in 0..64 { assert_eq!(pos as usize, slot_for(pos, 0)); } for level in 1..5 { for pos in level..64 { let a = pos * 64_usize.pow(level as u32); assert_eq!(pos, slot_for(a as u64, level)); } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/time/wheel/mod.rs
tokio-util/src/time/wheel/mod.rs
mod level; pub(crate) use self::level::Expiration; use self::level::Level; mod stack; pub(crate) use self::stack::Stack; use std::borrow::Borrow; use std::fmt::Debug; /// Timing wheel implementation. /// /// This type provides the hashed timing wheel implementation that backs /// [`DelayQueue`]. /// /// The structure is generic over `T: Stack`. This allows handling timeout data /// being stored on the heap or in a slab. In order to support the latter case, /// the slab must be passed into each function allowing the implementation to /// lookup timer entries. /// /// See `Driver` documentation for some implementation notes. /// /// [`DelayQueue`]: crate::time::DelayQueue #[derive(Debug)] pub(crate) struct Wheel<T> { /// The number of milliseconds elapsed since the wheel started. elapsed: u64, /// Timer wheel. /// /// Levels: /// /// * 1 ms slots / 64 ms range /// * 64 ms slots / ~ 4 sec range /// * ~ 4 sec slots / ~ 4 min range /// * ~ 4 min slots / ~ 4 hr range /// * ~ 4 hr slots / ~ 12 day range /// * ~ 12 day slots / ~ 2 yr range levels: Box<[Level<T>]>, } /// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots /// each, the timer is able to track time up to 2 years into the future with a /// precision of 1 millisecond. const NUM_LEVELS: usize = 6; /// The maximum duration of a delay const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1; #[derive(Debug)] pub(crate) enum InsertError { Elapsed, Invalid, } impl<T> Wheel<T> where T: Stack, { /// Create a new timing wheel pub(crate) fn new() -> Wheel<T> { let levels = (0..NUM_LEVELS).map(Level::new).collect(); Wheel { elapsed: 0, levels } } /// Return the number of milliseconds that have elapsed since the timing /// wheel's creation. pub(crate) fn elapsed(&self) -> u64 { self.elapsed } /// Insert an entry into the timing wheel. /// /// # Arguments /// /// * `when`: is the instant at which the entry should be fired. It is /// represented as the number of milliseconds since the creation /// of the timing wheel. /// /// * `item`: The item to insert into the wheel. /// /// * `store`: The slab or `()` when using heap storage. /// /// # Return /// /// Returns `Ok` when the item is successfully inserted, `Err` otherwise. /// /// `Err(Elapsed)` indicates that `when` represents an instant that has /// already passed. In this case, the caller should fire the timeout /// immediately. /// /// `Err(Invalid)` indicates an invalid `when` argument as been supplied. pub(crate) fn insert( &mut self, when: u64, item: T::Owned, store: &mut T::Store, ) -> Result<(), (T::Owned, InsertError)> { if when <= self.elapsed { return Err((item, InsertError::Elapsed)); } else if when - self.elapsed > MAX_DURATION { return Err((item, InsertError::Invalid)); } // Get the level at which the entry should be stored let level = self.level_for(when); self.levels[level].add_entry(when, item, store); debug_assert!({ self.levels[level] .next_expiration(self.elapsed) .map(|e| e.deadline >= self.elapsed) .unwrap_or(true) }); Ok(()) } /// Remove `item` from the timing wheel. #[track_caller] pub(crate) fn remove(&mut self, item: &T::Borrowed, store: &mut T::Store) { let when = T::when(item, store); assert!( self.elapsed <= when, "elapsed={}; when={}", self.elapsed, when ); let level = self.level_for(when); self.levels[level].remove_entry(when, item, store); } /// Instant at which to poll pub(crate) fn poll_at(&self) -> Option<u64> { self.next_expiration().map(|expiration| expiration.deadline) } /// Next key that will expire pub(crate) fn peek(&self) -> Option<T::Owned> { self.next_expiration() .and_then(|expiration| self.peek_entry(&expiration)) } /// Advances the timer up to the instant represented by `now`. pub(crate) fn poll(&mut self, now: u64, store: &mut T::Store) -> Option<T::Owned> { loop { let expiration = self.next_expiration().and_then(|expiration| { if expiration.deadline > now { None } else { Some(expiration) } }); match expiration { Some(ref expiration) => { if let Some(item) = self.poll_expiration(expiration, store) { return Some(item); } self.set_elapsed(expiration.deadline); } None => { // in this case the poll did not indicate an expiration // _and_ we were not able to find a next expiration in // the current list of timers. advance to the poll's // current time and do nothing else. self.set_elapsed(now); return None; } } } } /// Returns the instant at which the next timeout expires. fn next_expiration(&self) -> Option<Expiration> { // Check all levels for level in 0..NUM_LEVELS { if let Some(expiration) = self.levels[level].next_expiration(self.elapsed) { // There cannot be any expirations at a higher level that happen // before this one. debug_assert!(self.no_expirations_before(level + 1, expiration.deadline)); return Some(expiration); } } None } /// Used for debug assertions fn no_expirations_before(&self, start_level: usize, before: u64) -> bool { let mut res = true; for l2 in start_level..NUM_LEVELS { if let Some(e2) = self.levels[l2].next_expiration(self.elapsed) { if e2.deadline < before { res = false; } } } res } /// iteratively find entries that are between the wheel's current /// time and the expiration time. for each in that population either /// return it for notification (in the case of the last level) or tier /// it down to the next level (in all other cases). pub(crate) fn poll_expiration( &mut self, expiration: &Expiration, store: &mut T::Store, ) -> Option<T::Owned> { while let Some(item) = self.pop_entry(expiration, store) { if expiration.level == 0 { debug_assert_eq!(T::when(item.borrow(), store), expiration.deadline); return Some(item); } else { let when = T::when(item.borrow(), store); let next_level = expiration.level - 1; self.levels[next_level].add_entry(when, item, store); } } None } fn set_elapsed(&mut self, when: u64) { assert!( self.elapsed <= when, "elapsed={:?}; when={:?}", self.elapsed, when ); if when > self.elapsed { self.elapsed = when; } } fn pop_entry(&mut self, expiration: &Expiration, store: &mut T::Store) -> Option<T::Owned> { self.levels[expiration.level].pop_entry_slot(expiration.slot, store) } fn peek_entry(&self, expiration: &Expiration) -> Option<T::Owned> { self.levels[expiration.level].peek_entry_slot(expiration.slot) } fn level_for(&self, when: u64) -> usize { level_for(self.elapsed, when) } } fn level_for(elapsed: u64, when: u64) -> usize { const SLOT_MASK: u64 = (1 << 6) - 1; // Mask in the trailing bits ignored by the level calculation in order to cap // the possible leading zeros let mut masked = elapsed ^ when | SLOT_MASK; if masked >= MAX_DURATION { // Fudge the timer into the top level masked = MAX_DURATION - 1; } let leading_zeros = masked.leading_zeros() as usize; let significant = 63 - leading_zeros; significant / 6 } #[cfg(all(test, not(loom)))] mod test { use super::*; #[test] fn test_level_for() { for pos in 0..64 { assert_eq!(0, level_for(0, pos), "level_for({pos}) -- binary = {pos:b}"); } for level in 1..5 { for pos in level..64 { let a = pos * 64_usize.pow(level as u32); assert_eq!( level, level_for(0, a as u64), "level_for({a}) -- binary = {a:b}" ); if pos > level { let a = a - 1; assert_eq!( level, level_for(0, a as u64), "level_for({a}) -- binary = {a:b}" ); } if pos < 64 { let a = a + 1; assert_eq!( level, level_for(0, a as u64), "level_for({a}) -- binary = {a:b}" ); } } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/time/wheel/stack.rs
tokio-util/src/time/wheel/stack.rs
use std::borrow::Borrow; use std::cmp::Eq; use std::hash::Hash; /// Abstracts the stack operations needed to track timeouts. pub(crate) trait Stack: Default { /// Type of the item stored in the stack type Owned: Borrow<Self::Borrowed>; /// Borrowed item type Borrowed: Eq + Hash; /// Item storage, this allows a slab to be used instead of just the heap type Store; /// Returns `true` if the stack is empty fn is_empty(&self) -> bool; /// Push an item onto the stack fn push(&mut self, item: Self::Owned, store: &mut Self::Store); /// Pop an item from the stack fn pop(&mut self, store: &mut Self::Store) -> Option<Self::Owned>; /// Peek into the stack. fn peek(&self) -> Option<Self::Owned>; fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store); fn when(item: &Self::Borrowed, store: &Self::Store) -> u64; }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/net/mod.rs
tokio-util/src/net/mod.rs
#![cfg(not(loom))] //! TCP/UDP/Unix helpers for tokio. use crate::either::Either; use std::future::Future; use std::io::Result; use std::pin::Pin; use std::task::{Context, Poll}; #[cfg(unix)] pub mod unix; /// A trait for a listener: `TcpListener` and `UnixListener`. pub trait Listener { /// The stream's type of this listener. type Io: tokio::io::AsyncRead + tokio::io::AsyncWrite; /// The socket address type of this listener. type Addr; /// Polls to accept a new incoming connection to this listener. fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<Result<(Self::Io, Self::Addr)>>; /// Accepts a new incoming connection from this listener. fn accept(&mut self) -> ListenerAcceptFut<'_, Self> where Self: Sized, { ListenerAcceptFut { listener: self } } /// Returns the local address that this listener is bound to. fn local_addr(&self) -> Result<Self::Addr>; } impl Listener for tokio::net::TcpListener { type Io = tokio::net::TcpStream; type Addr = std::net::SocketAddr; fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<Result<(Self::Io, Self::Addr)>> { Self::poll_accept(self, cx) } fn local_addr(&self) -> Result<Self::Addr> { self.local_addr() } } /// Future for accepting a new connection from a listener. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct ListenerAcceptFut<'a, L> { listener: &'a mut L, } impl<'a, L> Future for ListenerAcceptFut<'a, L> where L: Listener, { type Output = Result<(L::Io, L::Addr)>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { self.listener.poll_accept(cx) } } impl<L, R> Either<L, R> where L: Listener, R: Listener, { /// Accepts a new incoming connection from this listener. pub async fn accept(&mut self) -> Result<Either<(L::Io, L::Addr), (R::Io, R::Addr)>> { match self { Either::Left(listener) => { let (stream, addr) = listener.accept().await?; Ok(Either::Left((stream, addr))) } Either::Right(listener) => { let (stream, addr) = listener.accept().await?; Ok(Either::Right((stream, addr))) } } } /// Returns the local address that this listener is bound to. pub fn local_addr(&self) -> Result<Either<L::Addr, R::Addr>> { match self { Either::Left(listener) => { let addr = listener.local_addr()?; Ok(Either::Left(addr)) } Either::Right(listener) => { let addr = listener.local_addr()?; Ok(Either::Right(addr)) } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/net/unix/mod.rs
tokio-util/src/net/unix/mod.rs
//! Unix domain socket helpers. use super::Listener; use std::io::Result; use std::task::{Context, Poll}; impl Listener for tokio::net::UnixListener { type Io = tokio::net::UnixStream; type Addr = tokio::net::unix::SocketAddr; fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<Result<(Self::Io, Self::Addr)>> { Self::poll_accept(self, cx) } fn local_addr(&self) -> Result<Self::Addr> { self.local_addr() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/task/join_queue.rs
tokio-util/src/task/join_queue.rs
use super::AbortOnDropHandle; use std::{ collections::VecDeque, future::Future, pin::Pin, task::{Context, Poll}, }; use tokio::{ runtime::Handle, task::{AbortHandle, Id, JoinError, JoinHandle}, }; /// A FIFO queue for of tasks spawned on a Tokio runtime. /// /// A [`JoinQueue`] can be used to await the completion of the tasks in FIFO /// order. That is, if tasks are spawned in the order A, B, C, then /// awaiting the next completed task will always return A first, then B, /// then C, regardless of the order in which the tasks actually complete. /// /// All of the tasks must have the same return type `T`. /// /// When the [`JoinQueue`] is dropped, all tasks in the [`JoinQueue`] are /// immediately aborted. pub struct JoinQueue<T>(VecDeque<AbortOnDropHandle<T>>); impl<T> JoinQueue<T> { /// Create a new empty [`JoinQueue`]. pub const fn new() -> Self { Self(VecDeque::new()) } /// Creates an empty [`JoinQueue`] with space for at least `capacity` tasks. pub fn with_capacity(capacity: usize) -> Self { Self(VecDeque::with_capacity(capacity)) } /// Returns the number of tasks currently in the [`JoinQueue`]. /// /// This includes both tasks that are currently running and tasks that have /// completed but not yet been removed from the queue because outputting of /// them waits for FIFO order. pub fn len(&self) -> usize { self.0.len() } /// Returns whether the [`JoinQueue`] is empty. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Spawn the provided task on the [`JoinQueue`], returning an [`AbortHandle`] /// that can be used to remotely cancel the task. /// /// The provided future will start running in the background immediately /// when this method is called, even if you don't await anything on this /// [`JoinQueue`]. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`AbortHandle`]: tokio::task::AbortHandle #[track_caller] pub fn spawn<F>(&mut self, task: F) -> AbortHandle where F: Future<Output = T> + Send + 'static, T: Send + 'static, { self.push_back(tokio::spawn(task)) } /// Spawn the provided task on the provided runtime and store it in this /// [`JoinQueue`] returning an [`AbortHandle`] that can be used to remotely /// cancel the task. /// /// The provided future will start running in the background immediately /// when this method is called, even if you don't await anything on this /// [`JoinQueue`]. /// /// [`AbortHandle`]: tokio::task::AbortHandle #[track_caller] pub fn spawn_on<F>(&mut self, task: F, handle: &Handle) -> AbortHandle where F: Future<Output = T> + Send + 'static, T: Send + 'static, { self.push_back(handle.spawn(task)) } /// Spawn the provided task on the current [`LocalSet`] or [`LocalRuntime`] /// and store it in this [`JoinQueue`], returning an [`AbortHandle`] that /// can be used to remotely cancel the task. /// /// The provided future will start running in the background immediately /// when this method is called, even if you don't await anything on this /// [`JoinQueue`]. /// /// # Panics /// /// This method panics if it is called outside of a `LocalSet` or `LocalRuntime`. /// /// [`LocalSet`]: tokio::task::LocalSet /// [`LocalRuntime`]: tokio::runtime::LocalRuntime /// [`AbortHandle`]: tokio::task::AbortHandle #[track_caller] pub fn spawn_local<F>(&mut self, task: F) -> AbortHandle where F: Future<Output = T> + 'static, T: 'static, { self.push_back(tokio::task::spawn_local(task)) } /// Spawn the blocking code on the blocking threadpool and store /// it in this [`JoinQueue`], returning an [`AbortHandle`] that can be /// used to remotely cancel the task. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`AbortHandle`]: tokio::task::AbortHandle #[track_caller] pub fn spawn_blocking<F>(&mut self, f: F) -> AbortHandle where F: FnOnce() -> T + Send + 'static, T: Send + 'static, { self.push_back(tokio::task::spawn_blocking(f)) } /// Spawn the blocking code on the blocking threadpool of the /// provided runtime and store it in this [`JoinQueue`], returning an /// [`AbortHandle`] that can be used to remotely cancel the task. /// /// [`AbortHandle`]: tokio::task::AbortHandle #[track_caller] pub fn spawn_blocking_on<F>(&mut self, f: F, handle: &Handle) -> AbortHandle where F: FnOnce() -> T + Send + 'static, T: Send + 'static, { self.push_back(handle.spawn_blocking(f)) } fn push_back(&mut self, jh: JoinHandle<T>) -> AbortHandle { let jh = AbortOnDropHandle::new(jh); let abort_handle = jh.abort_handle(); self.0.push_back(jh); abort_handle } /// Waits until the next task in FIFO order completes and returns its output. /// /// Returns `None` if the queue is empty. /// /// # Cancel Safety /// /// This method is cancel safe. If `join_next` is used as the event in a `tokio::select!` /// statement and some other branch completes first, it is guaranteed that no tasks were /// removed from this [`JoinQueue`]. pub async fn join_next(&mut self) -> Option<Result<T, JoinError>> { std::future::poll_fn(|cx| self.poll_join_next(cx)).await } /// Waits until the next task in FIFO order completes and returns its output, /// along with the [task ID] of the completed task. /// /// Returns `None` if the queue is empty. /// /// When this method returns an error, then the id of the task that failed can be accessed /// using the [`JoinError::id`] method. /// /// # Cancel Safety /// /// This method is cancel safe. If `join_next_with_id` is used as the event in a `tokio::select!` /// statement and some other branch completes first, it is guaranteed that no tasks were /// removed from this [`JoinQueue`]. /// /// [task ID]: tokio::task::Id /// [`JoinError::id`]: fn@tokio::task::JoinError::id pub async fn join_next_with_id(&mut self) -> Option<Result<(Id, T), JoinError>> { std::future::poll_fn(|cx| self.poll_join_next_with_id(cx)).await } /// Tries to poll an `AbortOnDropHandle` without blocking or yielding. /// /// Note that on success the handle will panic on subsequent polls /// since it becomes consumed. fn try_poll_handle(jh: &mut AbortOnDropHandle<T>) -> Option<Result<T, JoinError>> { let waker = futures_util::task::noop_waker(); let mut cx = Context::from_waker(&waker); // Since this function is not async and cannot be forced to yield, we should // disable budgeting when we want to check for the `JoinHandle` readiness. let jh = std::pin::pin!(tokio::task::coop::unconstrained(jh)); if let Poll::Ready(res) = jh.poll(&mut cx) { Some(res) } else { None } } /// Tries to join the next task in FIFO order if it has completed. /// /// Returns `None` if the queue is empty or if the next task is not yet ready. pub fn try_join_next(&mut self) -> Option<Result<T, JoinError>> { let jh = self.0.front_mut()?; let res = Self::try_poll_handle(jh)?; // Use `detach` to avoid calling `abort` on a task that has already completed. // Dropping `AbortOnDropHandle` would abort the task, but since it is finished, // we only need to drop the `JoinHandle` for cleanup. drop(self.0.pop_front().unwrap().detach()); Some(res) } /// Tries to join the next task in FIFO order if it has completed and return its output, /// along with its [task ID]. /// /// Returns `None` if the queue is empty or if the next task is not yet ready. /// /// When this method returns an error, then the id of the task that failed can be accessed /// using the [`JoinError::id`] method. /// /// [task ID]: tokio::task::Id /// [`JoinError::id`]: fn@tokio::task::JoinError::id pub fn try_join_next_with_id(&mut self) -> Option<Result<(Id, T), JoinError>> { let jh = self.0.front_mut()?; let res = Self::try_poll_handle(jh)?; // Use `detach` to avoid calling `abort` on a task that has already completed. // Dropping `AbortOnDropHandle` would abort the task, but since it is finished, // we only need to drop the `JoinHandle` for cleanup. let jh = self.0.pop_front().unwrap().detach(); let id = jh.id(); drop(jh); Some(res.map(|output| (id, output))) } /// Aborts all tasks and waits for them to finish shutting down. /// /// Calling this method is equivalent to calling [`abort_all`] and then calling [`join_next`] in /// a loop until it returns `None`. /// /// This method ignores any panics in the tasks shutting down. When this call returns, the /// [`JoinQueue`] will be empty. /// /// [`abort_all`]: fn@Self::abort_all /// [`join_next`]: fn@Self::join_next pub async fn shutdown(&mut self) { self.abort_all(); while self.join_next().await.is_some() {} } /// Awaits the completion of all tasks in this [`JoinQueue`], returning a vector of their results. /// /// The results will be stored in the order they were spawned, not the order they completed. /// This is a convenience method that is equivalent to calling [`join_next`] in /// a loop. If any tasks on the [`JoinQueue`] fail with an [`JoinError`], then this call /// to `join_all` will panic and all remaining tasks on the [`JoinQueue`] are /// cancelled. To handle errors in any other way, manually call [`join_next`] /// in a loop. /// /// # Cancel Safety /// /// This method is not cancel safe as it calls `join_next` in a loop. If you need /// cancel safety, manually call `join_next` in a loop with `Vec` accumulator. /// /// [`join_next`]: fn@Self::join_next /// [`JoinError::id`]: fn@tokio::task::JoinError::id pub async fn join_all(mut self) -> Vec<T> { let mut output = Vec::with_capacity(self.len()); while let Some(res) = self.join_next().await { match res { Ok(t) => output.push(t), Err(err) if err.is_panic() => std::panic::resume_unwind(err.into_panic()), Err(err) => panic!("{err}"), } } output } /// Aborts all tasks on this [`JoinQueue`]. /// /// This does not remove the tasks from the [`JoinQueue`]. To wait for the tasks to complete /// cancellation, you should call `join_next` in a loop until the [`JoinQueue`] is empty. pub fn abort_all(&mut self) { self.0.iter().for_each(|jh| jh.abort()); } /// Removes all tasks from this [`JoinQueue`] without aborting them. /// /// The tasks removed by this call will continue to run in the background even if the [`JoinQueue`] /// is dropped. pub fn detach_all(&mut self) { self.0.drain(..).for_each(|jh| drop(jh.detach())); } /// Polls for the next task in [`JoinQueue`] to complete. /// /// If this returns `Poll::Ready(Some(_))`, then the task that completed is removed from the queue. /// /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` is scheduled /// to receive a wakeup when a task in the [`JoinQueue`] completes. Note that on multiple calls to /// `poll_join_next`, only the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. /// /// # Returns /// /// This function returns: /// /// * `Poll::Pending` if the [`JoinQueue`] is not empty but there is no task whose output is /// available right now. /// * `Poll::Ready(Some(Ok(value)))` if the next task in this [`JoinQueue`] has completed. /// The `value` is the return value that task. /// * `Poll::Ready(Some(Err(err)))` if the next task in this [`JoinQueue`] has panicked or been /// aborted. The `err` is the `JoinError` from the panicked/aborted task. /// * `Poll::Ready(None)` if the [`JoinQueue`] is empty. pub fn poll_join_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<T, JoinError>>> { let jh = match self.0.front_mut() { None => return Poll::Ready(None), Some(jh) => jh, }; if let Poll::Ready(res) = Pin::new(jh).poll(cx) { // Use `detach` to avoid calling `abort` on a task that has already completed. // Dropping `AbortOnDropHandle` would abort the task, but since it is finished, // we only need to drop the `JoinHandle` for cleanup. drop(self.0.pop_front().unwrap().detach()); Poll::Ready(Some(res)) } else { Poll::Pending } } /// Polls for the next task in [`JoinQueue`] to complete. /// /// If this returns `Poll::Ready(Some(_))`, then the task that completed is removed from the queue. /// /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` is scheduled /// to receive a wakeup when a task in the [`JoinQueue`] completes. Note that on multiple calls to /// `poll_join_next`, only the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. /// /// # Returns /// /// This function returns: /// /// * `Poll::Pending` if the [`JoinQueue`] is not empty but there is no task whose output is /// available right now. /// * `Poll::Ready(Some(Ok((id, value))))` if the next task in this [`JoinQueue`] has completed. /// The `value` is the return value that task, and `id` is its [task ID]. /// * `Poll::Ready(Some(Err(err)))` if the next task in this [`JoinQueue`] has panicked or been /// aborted. The `err` is the `JoinError` from the panicked/aborted task. /// * `Poll::Ready(None)` if the [`JoinQueue`] is empty. /// /// [task ID]: tokio::task::Id pub fn poll_join_next_with_id( &mut self, cx: &mut Context<'_>, ) -> Poll<Option<Result<(Id, T), JoinError>>> { let jh = match self.0.front_mut() { None => return Poll::Ready(None), Some(jh) => jh, }; if let Poll::Ready(res) = Pin::new(jh).poll(cx) { // Use `detach` to avoid calling `abort` on a task that has already completed. // Dropping `AbortOnDropHandle` would abort the task, but since it is finished, // we only need to drop the `JoinHandle` for cleanup. let jh = self.0.pop_front().unwrap().detach(); let id = jh.id(); drop(jh); // If the task succeeded, add the task ID to the output. Otherwise, the // `JoinError` will already have the task's ID. Poll::Ready(Some(res.map(|output| (id, output)))) } else { Poll::Pending } } } impl<T> std::fmt::Debug for JoinQueue<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_list() .entries(self.0.iter().map(|jh| JoinHandle::id(jh.as_ref()))) .finish() } } impl<T> Default for JoinQueue<T> { fn default() -> Self { Self::new() } } /// Collect an iterator of futures into a [`JoinQueue`]. /// /// This is equivalent to calling [`JoinQueue::spawn`] on each element of the iterator. impl<T, F> std::iter::FromIterator<F> for JoinQueue<T> where F: Future<Output = T> + Send + 'static, T: Send + 'static, { fn from_iter<I: IntoIterator<Item = F>>(iter: I) -> Self { let mut set = Self::new(); iter.into_iter().for_each(|task| { set.spawn(task); }); set } } #[cfg(test)] mod tests { use super::*; /// A simple type that does not implement [`std::fmt::Debug`]. struct NotDebug; fn is_debug<T: std::fmt::Debug>() {} #[test] fn assert_debug() { is_debug::<JoinQueue<NotDebug>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/task/join_map.rs
tokio-util/src/task/join_map.rs
use hashbrown::hash_table::Entry; use hashbrown::{HashMap, HashTable}; use std::borrow::Borrow; use std::collections::hash_map::RandomState; use std::fmt; use std::future::Future; use std::hash::{BuildHasher, Hash}; use std::marker::PhantomData; use tokio::runtime::Handle; use tokio::task::{AbortHandle, Id, JoinError, JoinSet, LocalSet}; /// A collection of tasks spawned on a Tokio runtime, associated with hash map /// keys. /// /// This type is very similar to the [`JoinSet`] type in `tokio::task`, with the /// addition of a set of keys associated with each task. These keys allow /// [cancelling a task][abort] or [multiple tasks][abort_matching] in the /// `JoinMap` based on their keys, or [test whether a task corresponding to a /// given key exists][contains] in the `JoinMap`. /// /// In addition, when tasks in the `JoinMap` complete, they will return the /// associated key along with the value returned by the task, if any. /// /// A `JoinMap` can be used to await the completion of some or all of the tasks /// in the map. The map is not ordered, and the tasks will be returned in the /// order they complete. /// /// All of the tasks must have the same return type `V`. /// /// When the `JoinMap` is dropped, all tasks in the `JoinMap` are immediately aborted. /// /// # Examples /// /// Spawn multiple tasks and wait for them: /// /// ``` /// use tokio_util::task::JoinMap; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut map = JoinMap::new(); /// /// for i in 0..10 { /// // Spawn a task on the `JoinMap` with `i` as its key. /// map.spawn(i, async move { /* ... */ }); /// } /// /// let mut seen = [false; 10]; /// /// // When a task completes, `join_next` returns the task's key along /// // with its output. /// while let Some((key, res)) = map.join_next().await { /// seen[key] = true; /// assert!(res.is_ok(), "task {} completed successfully!", key); /// } /// /// for i in 0..10 { /// assert!(seen[i]); /// } /// # } /// ``` /// /// Cancel tasks based on their keys: /// /// ``` /// use tokio_util::task::JoinMap; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut map = JoinMap::new(); /// /// map.spawn("hello world", std::future::ready(1)); /// map.spawn("goodbye world", std::future::pending()); /// /// // Look up the "goodbye world" task in the map and abort it. /// let aborted = map.abort("goodbye world"); /// /// // `JoinMap::abort` returns `true` if a task existed for the /// // provided key. /// assert!(aborted); /// /// while let Some((key, res)) = map.join_next().await { /// if key == "goodbye world" { /// // The aborted task should complete with a cancelled `JoinError`. /// assert!(res.unwrap_err().is_cancelled()); /// } else { /// // Other tasks should complete normally. /// assert_eq!(res.unwrap(), 1); /// } /// } /// # } /// ``` /// /// [`JoinSet`]: tokio::task::JoinSet /// [abort]: fn@Self::abort /// [abort_matching]: fn@Self::abort_matching /// [contains]: fn@Self::contains_key pub struct JoinMap<K, V, S = RandomState> { /// A map of the [`AbortHandle`]s of the tasks spawned on this `JoinMap`, /// indexed by their keys. tasks_by_key: HashTable<(K, AbortHandle)>, /// A map from task IDs to the hash of the key associated with that task. /// /// This map is used to perform reverse lookups of tasks in the /// `tasks_by_key` map based on their task IDs. When a task terminates, the /// ID is provided to us by the `JoinSet`, so we can look up the hash value /// of that task's key, and then remove it from the `tasks_by_key` map using /// the raw hash code, resolving collisions by comparing task IDs. hashes_by_task: HashMap<Id, u64, S>, /// The [`JoinSet`] that awaits the completion of tasks spawned on this /// `JoinMap`. tasks: JoinSet<V>, } impl<K, V> JoinMap<K, V> { /// Creates a new empty `JoinMap`. /// /// The `JoinMap` is initially created with a capacity of 0, so it will not /// allocate until a task is first spawned on it. /// /// # Examples /// /// ``` /// use tokio_util::task::JoinMap; /// let map: JoinMap<&str, i32> = JoinMap::new(); /// ``` #[inline] #[must_use] pub fn new() -> Self { Self::with_hasher(RandomState::new()) } /// Creates an empty `JoinMap` with the specified capacity. /// /// The `JoinMap` will be able to hold at least `capacity` tasks without /// reallocating. /// /// # Examples /// /// ``` /// use tokio_util::task::JoinMap; /// let map: JoinMap<&str, i32> = JoinMap::with_capacity(10); /// ``` #[inline] #[must_use] pub fn with_capacity(capacity: usize) -> Self { JoinMap::with_capacity_and_hasher(capacity, Default::default()) } } impl<K, V, S> JoinMap<K, V, S> { /// Creates an empty `JoinMap` which will use the given hash builder to hash /// keys. /// /// The created map has the default initial capacity. /// /// Warning: `hash_builder` is normally randomly generated, and /// is designed to allow `JoinMap` to be resistant to attacks that /// cause many collisions and very poor performance. Setting it /// manually using this function can expose a DoS attack vector. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for /// the `JoinMap` to be useful, see its documentation for details. #[inline] #[must_use] pub fn with_hasher(hash_builder: S) -> Self { Self::with_capacity_and_hasher(0, hash_builder) } /// Creates an empty `JoinMap` with the specified capacity, using `hash_builder` /// to hash the keys. /// /// The `JoinMap` will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the `JoinMap` will not allocate. /// /// Warning: `hash_builder` is normally randomly generated, and /// is designed to allow HashMaps to be resistant to attacks that /// cause many collisions and very poor performance. Setting it /// manually using this function can expose a DoS attack vector. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for /// the `JoinMap`to be useful, see its documentation for details. /// /// # Examples /// /// ``` /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// use tokio_util::task::JoinMap; /// use std::collections::hash_map::RandomState; /// /// let s = RandomState::new(); /// let mut map = JoinMap::with_capacity_and_hasher(10, s); /// map.spawn(1, async move { "hello world!" }); /// # } /// ``` #[inline] #[must_use] pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { Self { tasks_by_key: HashTable::with_capacity(capacity), hashes_by_task: HashMap::with_capacity_and_hasher(capacity, hash_builder), tasks: JoinSet::new(), } } /// Returns the number of tasks currently in the `JoinMap`. pub fn len(&self) -> usize { let len = self.tasks_by_key.len(); debug_assert_eq!(len, self.hashes_by_task.len()); len } /// Returns whether the `JoinMap` is empty. pub fn is_empty(&self) -> bool { let empty = self.tasks_by_key.is_empty(); debug_assert_eq!(empty, self.hashes_by_task.is_empty()); empty } /// Returns the number of tasks the map can hold without reallocating. /// /// This number is a lower bound; the `JoinMap` might be able to hold /// more, but is guaranteed to be able to hold at least this many. /// /// # Examples /// /// ``` /// use tokio_util::task::JoinMap; /// /// let map: JoinMap<i32, i32> = JoinMap::with_capacity(100); /// assert!(map.capacity() >= 100); /// ``` #[inline] pub fn capacity(&self) -> usize { let capacity = self.tasks_by_key.capacity(); debug_assert_eq!(capacity, self.hashes_by_task.capacity()); capacity } } impl<K, V, S> JoinMap<K, V, S> where K: Hash + Eq, V: 'static, S: BuildHasher, { /// Spawn the provided task and store it in this `JoinMap` with the provided /// key. /// /// If a task previously existed in the `JoinMap` for this key, that task /// will be cancelled and replaced with the new one. The previous task will /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will /// *not* return a cancelled [`JoinError`] for that task. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`join_next`]: Self::join_next #[track_caller] pub fn spawn<F>(&mut self, key: K, task: F) where F: Future<Output = V>, F: Send + 'static, V: Send, { let task = self.tasks.spawn(task); self.insert(key, task) } /// Spawn the provided task on the provided runtime and store it in this /// `JoinMap` with the provided key. /// /// If a task previously existed in the `JoinMap` for this key, that task /// will be cancelled and replaced with the new one. The previous task will /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will /// *not* return a cancelled [`JoinError`] for that task. /// /// [`join_next`]: Self::join_next #[track_caller] pub fn spawn_on<F>(&mut self, key: K, task: F, handle: &Handle) where F: Future<Output = V>, F: Send + 'static, V: Send, { let task = self.tasks.spawn_on(task, handle); self.insert(key, task); } /// Spawn the blocking code on the blocking threadpool and store it in this `JoinMap` with the provided /// key. /// /// If a task previously existed in the `JoinMap` for this key, that task /// will be cancelled and replaced with the new one. The previous task will /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will /// *not* return a cancelled [`JoinError`] for that task. /// /// Note that blocking tasks cannot be cancelled after execution starts. /// Replaced blocking tasks will still run to completion if the task has begun /// to execute when it is replaced. A blocking task which is replaced before /// it has been scheduled on a blocking worker thread will be cancelled. /// /// # Panics /// /// This method panics if called outside of a Tokio runtime. /// /// [`join_next`]: Self::join_next #[track_caller] pub fn spawn_blocking<F>(&mut self, key: K, f: F) where F: FnOnce() -> V, F: Send + 'static, V: Send, { let task = self.tasks.spawn_blocking(f); self.insert(key, task) } /// Spawn the blocking code on the blocking threadpool of the provided runtime and store it in this /// `JoinMap` with the provided key. /// /// If a task previously existed in the `JoinMap` for this key, that task /// will be cancelled and replaced with the new one. The previous task will /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will /// *not* return a cancelled [`JoinError`] for that task. /// /// Note that blocking tasks cannot be cancelled after execution starts. /// Replaced blocking tasks will still run to completion if the task has begun /// to execute when it is replaced. A blocking task which is replaced before /// it has been scheduled on a blocking worker thread will be cancelled. /// /// [`join_next`]: Self::join_next #[track_caller] pub fn spawn_blocking_on<F>(&mut self, key: K, f: F, handle: &Handle) where F: FnOnce() -> V, F: Send + 'static, V: Send, { let task = self.tasks.spawn_blocking_on(f, handle); self.insert(key, task); } /// Spawn the provided task on the current [`LocalSet`] or [`LocalRuntime`] /// and store it in this `JoinMap` with the provided key. /// /// If a task previously existed in the `JoinMap` for this key, that task /// will be cancelled and replaced with the new one. The previous task will /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will /// *not* return a cancelled [`JoinError`] for that task. /// /// # Panics /// /// This method panics if it is called outside of a `LocalSet` or `LocalRuntime`. /// /// [`LocalSet`]: tokio::task::LocalSet /// [`LocalRuntime`]: tokio::runtime::LocalRuntime /// [`join_next`]: Self::join_next #[track_caller] pub fn spawn_local<F>(&mut self, key: K, task: F) where F: Future<Output = V>, F: 'static, { let task = self.tasks.spawn_local(task); self.insert(key, task); } /// Spawn the provided task on the provided [`LocalSet`] and store it in /// this `JoinMap` with the provided key. /// /// If a task previously existed in the `JoinMap` for this key, that task /// will be cancelled and replaced with the new one. The previous task will /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will /// *not* return a cancelled [`JoinError`] for that task. /// /// [`LocalSet`]: tokio::task::LocalSet /// [`join_next`]: Self::join_next #[track_caller] pub fn spawn_local_on<F>(&mut self, key: K, task: F, local_set: &LocalSet) where F: Future<Output = V>, F: 'static, { let task = self.tasks.spawn_local_on(task, local_set); self.insert(key, task) } fn insert(&mut self, mut key: K, mut abort: AbortHandle) { let hash_builder = self.hashes_by_task.hasher(); let hash = hash_builder.hash_one(&key); let id = abort.id(); // Insert the new key into the map of tasks by keys. let entry = self.tasks_by_key .entry(hash, |(k, _)| *k == key, |(k, _)| hash_builder.hash_one(k)); match entry { Entry::Occupied(occ) => { // There was a previous task spawned with the same key! Cancel // that task, and remove its ID from the map of hashes by task IDs. (key, abort) = std::mem::replace(occ.into_mut(), (key, abort)); // Remove the old task ID. let _prev_hash = self.hashes_by_task.remove(&abort.id()); debug_assert_eq!(Some(hash), _prev_hash); // Associate the key's hash with the new task's ID, for looking up tasks by ID. let _prev = self.hashes_by_task.insert(id, hash); debug_assert!(_prev.is_none(), "no prior task should have had the same ID"); // Note: it's important to drop `key` and abort the task here. // This defends against any panics during drop handling for causing inconsistent state. abort.abort(); drop(key); } Entry::Vacant(vac) => { vac.insert((key, abort)); // Associate the key's hash with this task's ID, for looking up tasks by ID. let _prev = self.hashes_by_task.insert(id, hash); debug_assert!(_prev.is_none(), "no prior task should have had the same ID"); } }; } /// Waits until one of the tasks in the map completes and returns its /// output, along with the key corresponding to that task. /// /// Returns `None` if the map is empty. /// /// # Cancel Safety /// /// This method is cancel safe. If `join_next` is used as the event in a [`tokio::select!`] /// statement and some other branch completes first, it is guaranteed that no tasks were /// removed from this `JoinMap`. /// /// # Returns /// /// This function returns: /// /// * `Some((key, Ok(value)))` if one of the tasks in this `JoinMap` has /// completed. The `value` is the return value of that ask, and `key` is /// the key associated with the task. /// * `Some((key, Err(err))` if one of the tasks in this `JoinMap` has /// panicked or been aborted. `key` is the key associated with the task /// that panicked or was aborted. /// * `None` if the `JoinMap` is empty. /// /// [`tokio::select!`]: tokio::select pub async fn join_next(&mut self) -> Option<(K, Result<V, JoinError>)> { loop { let (res, id) = match self.tasks.join_next_with_id().await { Some(Ok((id, output))) => (Ok(output), id), Some(Err(e)) => { let id = e.id(); (Err(e), id) } None => return None, }; if let Some(key) = self.remove_by_id(id) { break Some((key, res)); } } } /// Aborts all tasks and waits for them to finish shutting down. /// /// Calling this method is equivalent to calling [`abort_all`] and then calling [`join_next`] in /// a loop until it returns `None`. /// /// This method ignores any panics in the tasks shutting down. When this call returns, the /// `JoinMap` will be empty. /// /// [`abort_all`]: fn@Self::abort_all /// [`join_next`]: fn@Self::join_next pub async fn shutdown(&mut self) { self.abort_all(); while self.join_next().await.is_some() {} } /// Abort the task corresponding to the provided `key`. /// /// If this `JoinMap` contains a task corresponding to `key`, this method /// will abort that task and return `true`. Otherwise, if no task exists for /// `key`, this method returns `false`. /// /// # Examples /// /// Aborting a task by key: /// /// ``` /// use tokio_util::task::JoinMap; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut map = JoinMap::new(); /// /// map.spawn("hello world", std::future::ready(1)); /// map.spawn("goodbye world", std::future::pending()); /// /// // Look up the "goodbye world" task in the map and abort it. /// map.abort("goodbye world"); /// /// while let Some((key, res)) = map.join_next().await { /// if key == "goodbye world" { /// // The aborted task should complete with a cancelled `JoinError`. /// assert!(res.unwrap_err().is_cancelled()); /// } else { /// // Other tasks should complete normally. /// assert_eq!(res.unwrap(), 1); /// } /// } /// # } /// ``` /// /// `abort` returns `true` if a task was aborted: /// ``` /// use tokio_util::task::JoinMap; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut map = JoinMap::new(); /// /// map.spawn("hello world", async move { /* ... */ }); /// map.spawn("goodbye world", async move { /* ... */}); /// /// // A task for the key "goodbye world" should exist in the map: /// assert!(map.abort("goodbye world")); /// /// // Aborting a key that does not exist will return `false`: /// assert!(!map.abort("goodbye universe")); /// # } /// ``` pub fn abort<Q>(&mut self, key: &Q) -> bool where Q: ?Sized + Hash + Eq, K: Borrow<Q>, { match self.get_by_key(key) { Some((_, handle)) => { handle.abort(); true } None => false, } } /// Aborts all tasks with keys matching `predicate`. /// /// `predicate` is a function called with a reference to each key in the /// map. If it returns `true` for a given key, the corresponding task will /// be cancelled. /// /// # Examples /// ``` /// use tokio_util::task::JoinMap; /// /// # // use the current thread rt so that spawned tasks don't /// # // complete in the background before they can be aborted. /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let mut map = JoinMap::new(); /// /// map.spawn("hello world", async move { /// // ... /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! /// }); /// map.spawn("goodbye world", async move { /// // ... /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! /// }); /// map.spawn("hello san francisco", async move { /// // ... /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! /// }); /// map.spawn("goodbye universe", async move { /// // ... /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! /// }); /// /// // Abort all tasks whose keys begin with "goodbye" /// map.abort_matching(|key| key.starts_with("goodbye")); /// /// let mut seen = 0; /// while let Some((key, res)) = map.join_next().await { /// seen += 1; /// if key.starts_with("goodbye") { /// // The aborted task should complete with a cancelled `JoinError`. /// assert!(res.unwrap_err().is_cancelled()); /// } else { /// // Other tasks should complete normally. /// assert!(key.starts_with("hello")); /// assert!(res.is_ok()); /// } /// } /// /// // All spawned tasks should have completed. /// assert_eq!(seen, 4); /// # } /// ``` pub fn abort_matching(&mut self, mut predicate: impl FnMut(&K) -> bool) { // Note: this method iterates over the tasks and keys *without* removing // any entries, so that the keys from aborted tasks can still be // returned when calling `join_next` in the future. for (key, task) in &self.tasks_by_key { if predicate(key) { task.abort(); } } } /// Returns an iterator visiting all keys in this `JoinMap` in arbitrary order. /// /// If a task has completed, but its output hasn't yet been consumed by a /// call to [`join_next`], this method will still return its key. /// /// [`join_next`]: fn@Self::join_next pub fn keys(&self) -> JoinMapKeys<'_, K, V> { JoinMapKeys { iter: self.tasks_by_key.iter(), _value: PhantomData, } } /// Returns `true` if this `JoinMap` contains a task for the provided key. /// /// If the task has completed, but its output hasn't yet been consumed by a /// call to [`join_next`], this method will still return `true`. /// /// [`join_next`]: fn@Self::join_next pub fn contains_key<Q>(&self, key: &Q) -> bool where Q: ?Sized + Hash + Eq, K: Borrow<Q>, { self.get_by_key(key).is_some() } /// Returns `true` if this `JoinMap` contains a task with the provided /// [task ID]. /// /// If the task has completed, but its output hasn't yet been consumed by a /// call to [`join_next`], this method will still return `true`. /// /// [`join_next`]: fn@Self::join_next /// [task ID]: tokio::task::Id pub fn contains_task(&self, task: &Id) -> bool { self.hashes_by_task.contains_key(task) } /// Reserves capacity for at least `additional` more tasks to be spawned /// on this `JoinMap` without reallocating for the map of task keys. The /// collection may reserve more space to avoid frequent reallocations. /// /// Note that spawning a task will still cause an allocation for the task /// itself. /// /// # Panics /// /// Panics if the new allocation size overflows [`usize`]. /// /// # Examples /// /// ``` /// use tokio_util::task::JoinMap; /// /// let mut map: JoinMap<&str, i32> = JoinMap::new(); /// map.reserve(10); /// ``` #[inline] pub fn reserve(&mut self, additional: usize) { self.tasks_by_key.reserve(additional, |(k, _)| { self.hashes_by_task.hasher().hash_one(k) }); self.hashes_by_task.reserve(additional); } /// Shrinks the capacity of the `JoinMap` as much as possible. It will drop /// down as much as possible while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. /// /// # Examples /// /// ``` /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// use tokio_util::task::JoinMap; /// /// let mut map: JoinMap<i32, i32> = JoinMap::with_capacity(100); /// map.spawn(1, async move { 2 }); /// map.spawn(3, async move { 4 }); /// assert!(map.capacity() >= 100); /// map.shrink_to_fit(); /// assert!(map.capacity() >= 2); /// # } /// ``` #[inline] pub fn shrink_to_fit(&mut self) { self.hashes_by_task.shrink_to_fit(); self.tasks_by_key .shrink_to_fit(|(k, _)| self.hashes_by_task.hasher().hash_one(k)); } /// Shrinks the capacity of the map with a lower limit. It will drop /// down no lower than the supplied limit while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. /// /// If the current capacity is less than the lower limit, this is a no-op. /// /// # Examples /// /// ``` /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// use tokio_util::task::JoinMap; /// /// let mut map: JoinMap<i32, i32> = JoinMap::with_capacity(100); /// map.spawn(1, async move { 2 }); /// map.spawn(3, async move { 4 }); /// assert!(map.capacity() >= 100); /// map.shrink_to(10); /// assert!(map.capacity() >= 10); /// map.shrink_to(0); /// assert!(map.capacity() >= 2); /// # } /// ``` #[inline] pub fn shrink_to(&mut self, min_capacity: usize) { self.hashes_by_task.shrink_to(min_capacity); self.tasks_by_key.shrink_to(min_capacity, |(k, _)| { self.hashes_by_task.hasher().hash_one(k) }) } /// Look up a task in the map by its key, returning the key and abort handle. fn get_by_key<'map, Q>(&'map self, key: &Q) -> Option<&'map (K, AbortHandle)> where Q: ?Sized + Hash + Eq, K: Borrow<Q>, { let hash = self.hashes_by_task.hasher().hash_one(key); self.tasks_by_key.find(hash, |(k, _)| k.borrow() == key) } /// Remove a task from the map by ID, returning the key for that task. fn remove_by_id(&mut self, id: Id) -> Option<K> { // Get the hash for the given ID. let hash = self.hashes_by_task.remove(&id)?; // Remove the entry for that hash. let entry = self .tasks_by_key .find_entry(hash, |(_, abort)| abort.id() == id); let (key, _) = match entry { Ok(entry) => entry.remove().0, _ => return None, }; Some(key) } } impl<K, V, S> JoinMap<K, V, S> where V: 'static, { /// Aborts all tasks on this `JoinMap`. /// /// This does not remove the tasks from the `JoinMap`. To wait for the tasks to complete /// cancellation, you should call `join_next` in a loop until the `JoinMap` is empty. pub fn abort_all(&mut self) { self.tasks.abort_all() } /// Removes all tasks from this `JoinMap` without aborting them. /// /// The tasks removed by this call will continue to run in the background even if the `JoinMap` /// is dropped. They may still be aborted by key. pub fn detach_all(&mut self) { self.tasks.detach_all(); self.tasks_by_key.clear(); self.hashes_by_task.clear(); } } // Hand-written `fmt::Debug` implementation in order to avoid requiring `V: // Debug`, since no value is ever actually stored in the map. impl<K: fmt::Debug, V, S> fmt::Debug for JoinMap<K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // format the task keys and abort handles a little nicer by just // printing the key and task ID pairs, without format the `Key` struct // itself or the `AbortHandle`, which would just format the task's ID // again. struct KeySet<'a, K: fmt::Debug>(&'a HashTable<(K, AbortHandle)>); impl<K: fmt::Debug> fmt::Debug for KeySet<'_, K> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map() .entries(self.0.iter().map(|(key, abort)| (key, abort.id()))) .finish() } } f.debug_struct("JoinMap") // The `tasks_by_key` map is the only one that contains information // that's really worth formatting for the user, since it contains // the tasks' keys and IDs. The other fields are basically // implementation details. .field("tasks", &KeySet(&self.tasks_by_key)) .finish() } } impl<K, V> Default for JoinMap<K, V> { fn default() -> Self { Self::new() } } /// An iterator over the keys of a [`JoinMap`]. #[derive(Debug, Clone)] pub struct JoinMapKeys<'a, K, V> { iter: hashbrown::hash_table::Iter<'a, (K, AbortHandle)>, /// To make it easier to change `JoinMap` in the future, keep V as a generic /// parameter. _value: PhantomData<&'a V>, } impl<'a, K, V> Iterator for JoinMapKeys<'a, K, V> { type Item = &'a K; fn next(&mut self) -> Option<&'a K> { self.iter.next().map(|(key, _)| key) } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } impl<'a, K, V> ExactSizeIterator for JoinMapKeys<'a, K, V> { fn len(&self) -> usize { self.iter.len() } } impl<'a, K, V> std::iter::FusedIterator for JoinMapKeys<'a, K, V> {}
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/task/spawn_pinned.rs
tokio-util/src/task/spawn_pinned.rs
use futures_util::future::{AbortHandle, Abortable}; use std::fmt; use std::fmt::{Debug, Formatter}; use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use tokio::runtime::Builder; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use tokio::sync::oneshot; use tokio::task::{spawn_local, JoinHandle, LocalSet}; /// A cloneable handle to a local pool, used for spawning `!Send` tasks. /// /// Internally the local pool uses a [`tokio::task::LocalSet`] for each worker thread /// in the pool. Consequently you can also use [`tokio::task::spawn_local`] (which will /// execute on the same thread) inside the Future you supply to the various spawn methods /// of `LocalPoolHandle`. /// /// [`tokio::task::LocalSet`]: tokio::task::LocalSet /// [`tokio::task::spawn_local`]: tokio::task::spawn_local /// /// # Examples /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use std::rc::Rc; /// use tokio::task; /// use tokio_util::task::LocalPoolHandle; /// /// #[tokio::main(flavor = "current_thread")] /// async fn main() { /// let pool = LocalPoolHandle::new(5); /// /// let output = pool.spawn_pinned(|| { /// // `data` is !Send + !Sync /// let data = Rc::new("local data"); /// let data_clone = data.clone(); /// /// async move { /// task::spawn_local(async move { /// println!("{}", data_clone); /// }); /// /// data.to_string() /// } /// }).await.unwrap(); /// println!("output: {}", output); /// } /// # } /// ``` /// #[derive(Clone)] pub struct LocalPoolHandle { pool: Arc<LocalPool>, } impl LocalPoolHandle { /// Create a new pool of threads to handle `!Send` tasks. Spawn tasks onto this /// pool via [`LocalPoolHandle::spawn_pinned`]. /// /// # Panics /// /// Panics if the pool size is less than one. #[track_caller] pub fn new(pool_size: usize) -> LocalPoolHandle { assert!(pool_size > 0); let workers = (0..pool_size) .map(|_| LocalWorkerHandle::new_worker()) .collect(); let pool = Arc::new(LocalPool { workers }); LocalPoolHandle { pool } } /// Returns the number of threads of the Pool. #[inline] pub fn num_threads(&self) -> usize { self.pool.workers.len() } /// Returns the number of tasks scheduled on each worker. The indices of the /// worker threads correspond to the indices of the returned `Vec`. pub fn get_task_loads_for_each_worker(&self) -> Vec<usize> { self.pool .workers .iter() .map(|worker| worker.task_count.load(Ordering::SeqCst)) .collect::<Vec<_>>() } /// Spawn a task onto a worker thread and pin it there so it can't be moved /// off of the thread. Note that the future is not [`Send`], but the /// [`FnOnce`] which creates it is. /// /// # Examples /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use std::rc::Rc; /// use tokio_util::task::LocalPoolHandle; /// /// #[tokio::main] /// async fn main() { /// // Create the local pool /// let pool = LocalPoolHandle::new(1); /// /// // Spawn a !Send future onto the pool and await it /// let output = pool /// .spawn_pinned(|| { /// // Rc is !Send + !Sync /// let local_data = Rc::new("test"); /// /// // This future holds an Rc, so it is !Send /// async move { local_data.to_string() } /// }) /// .await /// .unwrap(); /// /// assert_eq!(output, "test"); /// } /// # } /// ``` pub fn spawn_pinned<F, Fut>(&self, create_task: F) -> JoinHandle<Fut::Output> where F: FnOnce() -> Fut, F: Send + 'static, Fut: Future + 'static, Fut::Output: Send + 'static, { self.pool .spawn_pinned(create_task, WorkerChoice::LeastBurdened) } /// Differs from `spawn_pinned` only in that you can choose a specific worker thread /// of the pool, whereas `spawn_pinned` chooses the worker with the smallest /// number of tasks scheduled. /// /// A worker thread is chosen by index. Indices are 0 based and the largest index /// is given by `num_threads() - 1` /// /// # Panics /// /// This method panics if the index is out of bounds. /// /// # Examples /// /// This method can be used to spawn a task on all worker threads of the pool: /// /// ``` /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio_util::task::LocalPoolHandle; /// /// #[tokio::main] /// async fn main() { /// const NUM_WORKERS: usize = 3; /// let pool = LocalPoolHandle::new(NUM_WORKERS); /// let handles = (0..pool.num_threads()) /// .map(|worker_idx| { /// pool.spawn_pinned_by_idx( /// || { /// async { /// "test" /// } /// }, /// worker_idx, /// ) /// }) /// .collect::<Vec<_>>(); /// /// for handle in handles { /// handle.await.unwrap(); /// } /// } /// # } /// ``` /// #[track_caller] pub fn spawn_pinned_by_idx<F, Fut>(&self, create_task: F, idx: usize) -> JoinHandle<Fut::Output> where F: FnOnce() -> Fut, F: Send + 'static, Fut: Future + 'static, Fut::Output: Send + 'static, { self.pool .spawn_pinned(create_task, WorkerChoice::ByIdx(idx)) } } impl Debug for LocalPoolHandle { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.write_str("LocalPoolHandle") } } enum WorkerChoice { LeastBurdened, ByIdx(usize), } struct LocalPool { workers: Box<[LocalWorkerHandle]>, } impl LocalPool { /// Spawn a `?Send` future onto a worker #[track_caller] fn spawn_pinned<F, Fut>( &self, create_task: F, worker_choice: WorkerChoice, ) -> JoinHandle<Fut::Output> where F: FnOnce() -> Fut, F: Send + 'static, Fut: Future + 'static, Fut::Output: Send + 'static, { let (sender, receiver) = oneshot::channel(); let (worker, job_guard) = match worker_choice { WorkerChoice::LeastBurdened => self.find_and_incr_least_burdened_worker(), WorkerChoice::ByIdx(idx) => self.find_worker_by_idx(idx), }; let worker_spawner = worker.spawner.clone(); // Spawn a future onto the worker's runtime so we can immediately return // a join handle. worker.runtime_handle.spawn(async move { // Move the job guard into the task let _job_guard = job_guard; // Propagate aborts via Abortable/AbortHandle let (abort_handle, abort_registration) = AbortHandle::new_pair(); let _abort_guard = AbortGuard(abort_handle); // Inside the future we can't run spawn_local yet because we're not // in the context of a LocalSet. We need to send create_task to the // LocalSet task for spawning. let spawn_task = Box::new(move || { // Once we're in the LocalSet context we can call spawn_local let join_handle = spawn_local( async move { Abortable::new(create_task(), abort_registration).await }, ); // Send the join handle back to the spawner. If sending fails, // we assume the parent task was canceled, so cancel this task // as well. if let Err(join_handle) = sender.send(join_handle) { join_handle.abort() } }); // Send the callback to the LocalSet task if let Err(e) = worker_spawner.send(spawn_task) { // Propagate the error as a panic in the join handle. panic!("Failed to send job to worker: {e}"); } // Wait for the task's join handle let join_handle = match receiver.await { Ok(handle) => handle, Err(e) => { // We sent the task successfully, but failed to get its // join handle... We assume something happened to the worker // and the task was not spawned. Propagate the error as a // panic in the join handle. panic!("Worker failed to send join handle: {e}"); } }; // Wait for the task to complete let join_result = join_handle.await; match join_result { Ok(Ok(output)) => output, Ok(Err(_)) => { // Pinned task was aborted. But that only happens if this // task is aborted. So this is an impossible branch. unreachable!( "Reaching this branch means this task was previously \ aborted but it continued running anyways" ) } Err(e) => { if e.is_panic() { std::panic::resume_unwind(e.into_panic()); } else if e.is_cancelled() { // No one else should have the join handle, so this is // unexpected. Forward this error as a panic in the join // handle. panic!("spawn_pinned task was canceled: {e}"); } else { // Something unknown happened (not a panic or // cancellation). Forward this error as a panic in the // join handle. panic!("spawn_pinned task failed: {e}"); } } } }) } /// Find the worker with the least number of tasks, increment its task /// count, and return its handle. Make sure to actually spawn a task on /// the worker so the task count is kept consistent with load. /// /// A job count guard is also returned to ensure the task count gets /// decremented when the job is done. fn find_and_incr_least_burdened_worker(&self) -> (&LocalWorkerHandle, JobCountGuard) { loop { let (worker, task_count) = self .workers .iter() .map(|worker| (worker, worker.task_count.load(Ordering::SeqCst))) .min_by_key(|&(_, count)| count) .expect("There must be more than one worker"); // Make sure the task count hasn't changed since when we choose this // worker. Otherwise, restart the search. if worker .task_count .compare_exchange( task_count, task_count + 1, Ordering::SeqCst, Ordering::Relaxed, ) .is_ok() { return (worker, JobCountGuard(Arc::clone(&worker.task_count))); } } } #[track_caller] fn find_worker_by_idx(&self, idx: usize) -> (&LocalWorkerHandle, JobCountGuard) { let worker = &self.workers[idx]; worker.task_count.fetch_add(1, Ordering::SeqCst); (worker, JobCountGuard(Arc::clone(&worker.task_count))) } } /// Automatically decrements a worker's job count when a job finishes (when /// this gets dropped). struct JobCountGuard(Arc<AtomicUsize>); impl Drop for JobCountGuard { fn drop(&mut self) { // Decrement the job count let previous_value = self.0.fetch_sub(1, Ordering::SeqCst); debug_assert!(previous_value >= 1); } } /// Calls abort on the handle when dropped. struct AbortGuard(AbortHandle); impl Drop for AbortGuard { fn drop(&mut self) { self.0.abort(); } } type PinnedFutureSpawner = Box<dyn FnOnce() + Send + 'static>; struct LocalWorkerHandle { runtime_handle: tokio::runtime::Handle, spawner: UnboundedSender<PinnedFutureSpawner>, task_count: Arc<AtomicUsize>, } impl LocalWorkerHandle { /// Create a new worker for executing pinned tasks fn new_worker() -> LocalWorkerHandle { let (sender, receiver) = unbounded_channel(); let runtime = Builder::new_current_thread() .enable_all() .build() .expect("Failed to start a pinned worker thread runtime"); let runtime_handle = runtime.handle().clone(); let task_count = Arc::new(AtomicUsize::new(0)); let task_count_clone = Arc::clone(&task_count); std::thread::spawn(|| Self::run(runtime, receiver, task_count_clone)); LocalWorkerHandle { runtime_handle, spawner: sender, task_count, } } fn run( runtime: tokio::runtime::Runtime, mut task_receiver: UnboundedReceiver<PinnedFutureSpawner>, task_count: Arc<AtomicUsize>, ) { let local_set = LocalSet::new(); local_set.block_on(&runtime, async { while let Some(spawn_task) = task_receiver.recv().await { // Calls spawn_local(future) (spawn_task)(); } }); // If there are any tasks on the runtime associated with a LocalSet task // that has already completed, but whose output has not yet been // reported, let that task complete. // // Since the task_count is decremented when the runtime task exits, // reading that counter lets us know if any such tasks completed during // the call to `block_on`. // // Tasks on the LocalSet can't complete during this loop since they're // stored on the LocalSet and we aren't accessing it. let mut previous_task_count = task_count.load(Ordering::SeqCst); loop { // This call will also run tasks spawned on the runtime. runtime.block_on(tokio::task::yield_now()); let new_task_count = task_count.load(Ordering::SeqCst); if new_task_count == previous_task_count { break; } else { previous_task_count = new_task_count; } } // It's now no longer possible for a task on the runtime to be // associated with a LocalSet task that has completed. Drop both the // LocalSet and runtime to let tasks on the runtime be cancelled if and // only if they are still on the LocalSet. // // Drop the LocalSet task first so that anyone awaiting the runtime // JoinHandle will see the cancelled error after the LocalSet task // destructor has completed. drop(local_set); drop(runtime); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/task/task_tracker.rs
tokio-util/src/task/task_tracker.rs
//! Types related to the [`TaskTracker`] collection. //! //! See the documentation of [`TaskTracker`] for more information. use pin_project_lite::pin_project; use std::fmt; use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use tokio::sync::{futures::Notified, Notify}; #[cfg(feature = "rt")] use tokio::{ runtime::Handle, task::{JoinHandle, LocalSet}, }; /// A task tracker used for waiting until tasks exit. /// /// This is usually used together with [`CancellationToken`] to implement [graceful shutdown]. The /// `CancellationToken` is used to signal to tasks that they should shut down, and the /// `TaskTracker` is used to wait for them to finish shutting down. /// /// The `TaskTracker` will also keep track of a `closed` boolean. This is used to handle the case /// where the `TaskTracker` is empty, but we don't want to shut down yet. This means that the /// [`wait`] method will wait until *both* of the following happen at the same time: /// /// * The `TaskTracker` must be closed using the [`close`] method. /// * The `TaskTracker` must be empty, that is, all tasks that it is tracking must have exited. /// /// When a call to [`wait`] returns, it is guaranteed that all tracked tasks have exited and that /// the destructor of the future has finished running. However, there might be a short amount of /// time where [`JoinHandle::is_finished`] returns false. /// /// # Comparison to `JoinSet` /// /// The main Tokio crate has a similar collection known as [`JoinSet`]. The `JoinSet` type has a /// lot more features than `TaskTracker`, so `TaskTracker` should only be used when one of its /// unique features is required: /// /// 1. When tasks exit, a `TaskTracker` will allow the task to immediately free its memory. /// 2. By not closing the `TaskTracker`, [`wait`] will be prevented from returning even if /// the `TaskTracker` is empty. /// 3. A `TaskTracker` does not require mutable access to insert tasks. /// 4. A `TaskTracker` can be cloned to share it with many tasks. /// /// The first point is the most important one. A [`JoinSet`] keeps track of the return value of /// every inserted task. This means that if the caller keeps inserting tasks and never calls /// [`join_next`], then their return values will keep building up and consuming memory, _even if_ /// most of the tasks have already exited. This can cause the process to run out of memory. With a /// `TaskTracker`, this does not happen. Once tasks exit, they are immediately removed from the /// `TaskTracker`. /// /// Note that unlike [`JoinSet`], dropping a `TaskTracker` does not abort the tasks. /// /// # Examples /// /// For more examples, please see the topic page on [graceful shutdown]. /// /// ## Spawn tasks and wait for them to exit /// /// This is a simple example. For this case, [`JoinSet`] should probably be used instead. /// /// ``` /// use tokio_util::task::TaskTracker; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let tracker = TaskTracker::new(); /// /// for i in 0..10 { /// tracker.spawn(async move { /// println!("Task {} is running!", i); /// }); /// } /// // Once we spawned everything, we close the tracker. /// tracker.close(); /// /// // Wait for everything to finish. /// tracker.wait().await; /// /// println!("This is printed after all of the tasks."); /// # } /// ``` /// /// ## Wait for tasks to exit /// /// This example shows the intended use-case of `TaskTracker`. It is used together with /// [`CancellationToken`] to implement graceful shutdown. /// ``` /// use tokio_util::sync::CancellationToken; /// use tokio_util::task::TaskTracker; /// use tokio_util::time::FutureExt; /// /// use tokio::time::{self, Duration}; /// /// async fn background_task(num: u64) { /// for i in 0..10 { /// time::sleep(Duration::from_millis(100*num)).await; /// println!("Background task {} in iteration {}.", num, i); /// } /// } /// /// #[tokio::main] /// # async fn _hidden() {} /// # #[tokio::main(flavor = "current_thread", start_paused = true)] /// async fn main() { /// let tracker = TaskTracker::new(); /// let token = CancellationToken::new(); /// /// for i in 0..10 { /// let token = token.clone(); /// tracker.spawn(async move { /// // Use a `with_cancellation_token_owned` to kill the background task /// // if the token is cancelled. /// match background_task(i) /// .with_cancellation_token_owned(token) /// .await /// { /// Some(()) => println!("Task {} exiting normally.", i), /// None => { /// // Do some cleanup before we really exit. /// time::sleep(Duration::from_millis(50)).await; /// println!("Task {} finished cleanup.", i); /// } /// } /// }); /// } /// /// // Spawn a background task that will send the shutdown signal. /// { /// let tracker = tracker.clone(); /// tokio::spawn(async move { /// // Normally you would use something like ctrl-c instead of /// // sleeping. /// time::sleep(Duration::from_secs(2)).await; /// tracker.close(); /// token.cancel(); /// }); /// } /// /// // Wait for all tasks to exit. /// tracker.wait().await; /// /// println!("All tasks have exited now."); /// } /// ``` /// /// [`CancellationToken`]: crate::sync::CancellationToken /// [`JoinHandle::is_finished`]: tokio::task::JoinHandle::is_finished /// [`JoinSet`]: tokio::task::JoinSet /// [`close`]: Self::close /// [`join_next`]: tokio::task::JoinSet::join_next /// [`wait`]: Self::wait /// [graceful shutdown]: https://tokio.rs/tokio/topics/shutdown pub struct TaskTracker { inner: Arc<TaskTrackerInner>, } /// Represents a task tracked by a [`TaskTracker`]. #[must_use] #[derive(Debug)] pub struct TaskTrackerToken { task_tracker: TaskTracker, } struct TaskTrackerInner { /// Keeps track of the state. /// /// The lowest bit is whether the task tracker is closed. /// /// The rest of the bits count the number of tracked tasks. state: AtomicUsize, /// Used to notify when the last task exits. on_last_exit: Notify, } pin_project! { /// A future that is tracked as a task by a [`TaskTracker`]. /// /// The associated [`TaskTracker`] cannot complete until this future is dropped. /// /// This future is returned by [`TaskTracker::track_future`]. #[must_use = "futures do nothing unless polled"] pub struct TrackedFuture<F> { #[pin] future: F, token: TaskTrackerToken, } } pin_project! { /// A future that completes when the [`TaskTracker`] is empty and closed. /// /// This future is returned by [`TaskTracker::wait`]. #[must_use = "futures do nothing unless polled"] pub struct TaskTrackerWaitFuture<'a> { #[pin] future: Notified<'a>, inner: Option<&'a TaskTrackerInner>, } } impl TaskTrackerInner { #[inline] fn new() -> Self { Self { state: AtomicUsize::new(0), on_last_exit: Notify::new(), } } #[inline] fn is_closed_and_empty(&self) -> bool { // If empty and closed bit set, then we are done. // // The acquire load will synchronize with the release store of any previous call to // `set_closed` and `drop_task`. self.state.load(Ordering::Acquire) == 1 } #[inline] fn set_closed(&self) -> bool { // The AcqRel ordering makes the closed bit behave like a `Mutex<bool>` for synchronization // purposes. We do this because it makes the return value of `TaskTracker::{close,reopen}` // more meaningful for the user. Without these orderings, this assert could fail: // ``` // // thread 1 // some_other_atomic.store(true, Relaxed); // tracker.close(); // // // thread 2 // if tracker.reopen() { // assert!(some_other_atomic.load(Relaxed)); // } // ``` // However, with the AcqRel ordering, we establish a happens-before relationship from the // call to `close` and the later call to `reopen` that returned true. let state = self.state.fetch_or(1, Ordering::AcqRel); // If there are no tasks, and if it was not already closed: if state == 0 { self.notify_now(); } (state & 1) == 0 } #[inline] fn set_open(&self) -> bool { // See `set_closed` regarding the AcqRel ordering. let state = self.state.fetch_and(!1, Ordering::AcqRel); (state & 1) == 1 } #[inline] fn add_task(&self) { self.state.fetch_add(2, Ordering::Relaxed); } #[inline] fn drop_task(&self) { let state = self.state.fetch_sub(2, Ordering::Release); // If this was the last task and we are closed: if state == 3 { self.notify_now(); } } #[cold] fn notify_now(&self) { // Insert an acquire fence. This matters for `drop_task` but doesn't matter for // `set_closed` since it already uses AcqRel. // // This synchronizes with the release store of any other call to `drop_task`, and with the // release store in the call to `set_closed`. That ensures that everything that happened // before those other calls to `drop_task` or `set_closed` will be visible after this load, // and those things will also be visible to anything woken by the call to `notify_waiters`. self.state.load(Ordering::Acquire); self.on_last_exit.notify_waiters(); } } impl TaskTracker { /// Creates a new `TaskTracker`. /// /// The `TaskTracker` will start out as open. #[must_use] pub fn new() -> Self { Self { inner: Arc::new(TaskTrackerInner::new()), } } /// Waits until this `TaskTracker` is both closed and empty. /// /// If the `TaskTracker` is already closed and empty when this method is called, then it /// returns immediately. /// /// The `wait` future is resistant against [ABA problems][aba]. That is, if the `TaskTracker` /// becomes both closed and empty for a short amount of time, then it is guarantee that all /// `wait` futures that were created before the short time interval will trigger, even if they /// are not polled during that short time interval. /// /// # Cancel safety /// /// This method is cancel safe. /// /// However, the resistance against [ABA problems][aba] is lost when using `wait` as the /// condition in a `tokio::select!` loop. /// /// [aba]: https://en.wikipedia.org/wiki/ABA_problem #[inline] pub fn wait(&self) -> TaskTrackerWaitFuture<'_> { TaskTrackerWaitFuture { future: self.inner.on_last_exit.notified(), inner: if self.inner.is_closed_and_empty() { None } else { Some(&self.inner) }, } } /// Close this `TaskTracker`. /// /// This allows [`wait`] futures to complete. It does not prevent you from spawning new tasks. /// /// Returns `true` if this closed the `TaskTracker`, or `false` if it was already closed. /// /// [`wait`]: Self::wait #[inline] pub fn close(&self) -> bool { self.inner.set_closed() } /// Reopen this `TaskTracker`. /// /// This prevents [`wait`] futures from completing even if the `TaskTracker` is empty. /// /// Returns `true` if this reopened the `TaskTracker`, or `false` if it was already open. /// /// [`wait`]: Self::wait #[inline] pub fn reopen(&self) -> bool { self.inner.set_open() } /// Returns `true` if this `TaskTracker` is [closed](Self::close). #[inline] #[must_use] pub fn is_closed(&self) -> bool { (self.inner.state.load(Ordering::Acquire) & 1) != 0 } /// Returns the number of tasks tracked by this `TaskTracker`. #[inline] #[must_use] pub fn len(&self) -> usize { self.inner.state.load(Ordering::Acquire) >> 1 } /// Returns `true` if there are no tasks in this `TaskTracker`. #[inline] #[must_use] pub fn is_empty(&self) -> bool { self.inner.state.load(Ordering::Acquire) <= 1 } /// Spawn the provided future on the current Tokio runtime, and track it in this `TaskTracker`. /// /// This is equivalent to `tokio::spawn(tracker.track_future(task))`. #[inline] #[track_caller] #[cfg(feature = "rt")] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn spawn<F>(&self, task: F) -> JoinHandle<F::Output> where F: Future + Send + 'static, F::Output: Send + 'static, { tokio::task::spawn(self.track_future(task)) } /// Spawn the provided future on the provided Tokio runtime, and track it in this `TaskTracker`. /// /// This is equivalent to `handle.spawn(tracker.track_future(task))`. #[inline] #[track_caller] #[cfg(feature = "rt")] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn spawn_on<F>(&self, task: F, handle: &Handle) -> JoinHandle<F::Output> where F: Future + Send + 'static, F::Output: Send + 'static, { handle.spawn(self.track_future(task)) } /// Spawn the provided future on the current [`LocalSet`] or [`LocalRuntime`] /// and track it in this `TaskTracker`. /// /// This is equivalent to `tokio::task::spawn_local(tracker.track_future(task))`. /// /// # Panics /// /// This method panics if it is called outside of a `LocalSet` or `LocalRuntime`. /// /// [`LocalSet`]: tokio::task::LocalSet /// [`LocalRuntime`]: tokio::runtime::LocalRuntime #[inline] #[track_caller] #[cfg(feature = "rt")] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn spawn_local<F>(&self, task: F) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static, { tokio::task::spawn_local(self.track_future(task)) } /// Spawn the provided future on the provided [`LocalSet`], and track it in this `TaskTracker`. /// /// This is equivalent to `local_set.spawn_local(tracker.track_future(task))`. /// /// [`LocalSet`]: tokio::task::LocalSet #[inline] #[track_caller] #[cfg(feature = "rt")] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn spawn_local_on<F>(&self, task: F, local_set: &LocalSet) -> JoinHandle<F::Output> where F: Future + 'static, F::Output: 'static, { local_set.spawn_local(self.track_future(task)) } /// Spawn the provided blocking task on the current Tokio runtime, and track it in this `TaskTracker`. /// /// This is equivalent to `tokio::task::spawn_blocking(tracker.track_future(task))`. #[inline] #[track_caller] #[cfg(feature = "rt")] #[cfg(not(target_family = "wasm"))] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn spawn_blocking<F, T>(&self, task: F) -> JoinHandle<T> where F: FnOnce() -> T, F: Send + 'static, T: Send + 'static, { let token = self.token(); tokio::task::spawn_blocking(move || { let res = task(); drop(token); res }) } /// Spawn the provided blocking task on the provided Tokio runtime, and track it in this `TaskTracker`. /// /// This is equivalent to `handle.spawn_blocking(tracker.track_future(task))`. #[inline] #[track_caller] #[cfg(feature = "rt")] #[cfg(not(target_family = "wasm"))] #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub fn spawn_blocking_on<F, T>(&self, task: F, handle: &Handle) -> JoinHandle<T> where F: FnOnce() -> T, F: Send + 'static, T: Send + 'static, { let token = self.token(); handle.spawn_blocking(move || { let res = task(); drop(token); res }) } /// Track the provided future. /// /// The returned [`TrackedFuture`] will count as a task tracked by this collection, and will /// prevent calls to [`wait`] from returning until the task is dropped. /// /// The task is removed from the collection when it is dropped, not when [`poll`] returns /// [`Poll::Ready`]. /// /// # Examples /// /// Track a future spawned with [`tokio::spawn`]. /// /// ``` /// # async fn my_async_fn() {} /// use tokio_util::task::TaskTracker; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let tracker = TaskTracker::new(); /// /// tokio::spawn(tracker.track_future(my_async_fn())); /// # } /// ``` /// /// Track a future spawned on a [`JoinSet`]. /// ``` /// # async fn my_async_fn() {} /// use tokio::task::JoinSet; /// use tokio_util::task::TaskTracker; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() { /// let tracker = TaskTracker::new(); /// let mut join_set = JoinSet::new(); /// /// join_set.spawn(tracker.track_future(my_async_fn())); /// # } /// ``` /// /// [`JoinSet`]: tokio::task::JoinSet /// [`Poll::Pending`]: std::task::Poll::Pending /// [`poll`]: std::future::Future::poll /// [`wait`]: Self::wait #[inline] pub fn track_future<F: Future>(&self, future: F) -> TrackedFuture<F> { TrackedFuture { future, token: self.token(), } } /// Creates a [`TaskTrackerToken`] representing a task tracked by this `TaskTracker`. /// /// This token is a lower-level utility than the spawn methods. Each token is considered to /// correspond to a task. As long as the token exists, the `TaskTracker` cannot complete. /// Furthermore, the count returned by the [`len`] method will include the tokens in the count. /// /// Dropping the token indicates to the `TaskTracker` that the task has exited. /// /// [`len`]: TaskTracker::len #[inline] pub fn token(&self) -> TaskTrackerToken { self.inner.add_task(); TaskTrackerToken { task_tracker: self.clone(), } } /// Returns `true` if both task trackers correspond to the same set of tasks. /// /// # Examples /// /// ``` /// use tokio_util::task::TaskTracker; /// /// let tracker_1 = TaskTracker::new(); /// let tracker_2 = TaskTracker::new(); /// let tracker_1_clone = tracker_1.clone(); /// /// assert!(TaskTracker::ptr_eq(&tracker_1, &tracker_1_clone)); /// assert!(!TaskTracker::ptr_eq(&tracker_1, &tracker_2)); /// ``` #[inline] #[must_use] pub fn ptr_eq(left: &TaskTracker, right: &TaskTracker) -> bool { Arc::ptr_eq(&left.inner, &right.inner) } } impl Default for TaskTracker { /// Creates a new `TaskTracker`. /// /// The `TaskTracker` will start out as open. #[inline] fn default() -> TaskTracker { TaskTracker::new() } } impl Clone for TaskTracker { /// Returns a new `TaskTracker` that tracks the same set of tasks. /// /// Since the new `TaskTracker` shares the same set of tasks, changes to one set are visible in /// all other clones. /// /// # Examples /// /// ``` /// use tokio_util::task::TaskTracker; /// /// #[tokio::main] /// # async fn _hidden() {} /// # #[tokio::main(flavor = "current_thread")] /// async fn main() { /// let tracker = TaskTracker::new(); /// let cloned = tracker.clone(); /// /// // Spawns on `tracker` are visible in `cloned`. /// tracker.spawn(std::future::pending::<()>()); /// assert_eq!(cloned.len(), 1); /// /// // Spawns on `cloned` are visible in `tracker`. /// cloned.spawn(std::future::pending::<()>()); /// assert_eq!(tracker.len(), 2); /// /// // Calling `close` is visible to `cloned`. /// tracker.close(); /// assert!(cloned.is_closed()); /// /// // Calling `reopen` is visible to `tracker`. /// cloned.reopen(); /// assert!(!tracker.is_closed()); /// } /// ``` #[inline] fn clone(&self) -> TaskTracker { Self { inner: self.inner.clone(), } } } fn debug_inner(inner: &TaskTrackerInner, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = inner.state.load(Ordering::Acquire); let is_closed = (state & 1) != 0; let len = state >> 1; f.debug_struct("TaskTracker") .field("len", &len) .field("is_closed", &is_closed) .field("inner", &(inner as *const TaskTrackerInner)) .finish() } impl fmt::Debug for TaskTracker { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { debug_inner(&self.inner, f) } } impl TaskTrackerToken { /// Returns the [`TaskTracker`] that this token is associated with. #[inline] #[must_use] pub fn task_tracker(&self) -> &TaskTracker { &self.task_tracker } } impl Clone for TaskTrackerToken { /// Returns a new `TaskTrackerToken` associated with the same [`TaskTracker`]. /// /// This is equivalent to `token.task_tracker().token()`. #[inline] fn clone(&self) -> TaskTrackerToken { self.task_tracker.token() } } impl Drop for TaskTrackerToken { /// Dropping the token indicates to the [`TaskTracker`] that the task has exited. #[inline] fn drop(&mut self) { self.task_tracker.inner.drop_task(); } } impl<F: Future> Future for TrackedFuture<F> { type Output = F::Output; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<F::Output> { self.project().future.poll(cx) } } impl<F: fmt::Debug> fmt::Debug for TrackedFuture<F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TrackedFuture") .field("future", &self.future) .field("task_tracker", self.token.task_tracker()) .finish() } } impl<'a> Future for TaskTrackerWaitFuture<'a> { type Output = (); #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let me = self.project(); let inner = match me.inner.as_ref() { None => return Poll::Ready(()), Some(inner) => inner, }; let ready = inner.is_closed_and_empty() || me.future.poll(cx).is_ready(); if ready { *me.inner = None; Poll::Ready(()) } else { Poll::Pending } } } impl<'a> fmt::Debug for TaskTrackerWaitFuture<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { struct Helper<'a>(&'a TaskTrackerInner); impl fmt::Debug for Helper<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { debug_inner(self.0, f) } } f.debug_struct("TaskTrackerWaitFuture") .field("future", &self.future) .field("task_tracker", &self.inner.map(Helper)) .finish() } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/task/abort_on_drop.rs
tokio-util/src/task/abort_on_drop.rs
//! An [`AbortOnDropHandle`] is like a [`JoinHandle`], except that it //! will abort the task as soon as it is dropped. use tokio::task::{AbortHandle, JoinError, JoinHandle}; use std::{ future::Future, mem::ManuallyDrop, pin::Pin, task::{Context, Poll}, }; /// A wrapper around a [`tokio::task::JoinHandle`], /// which [aborts] the task when it is dropped. /// /// [aborts]: tokio::task::JoinHandle::abort #[must_use = "Dropping the handle aborts the task immediately"] pub struct AbortOnDropHandle<T>(JoinHandle<T>); impl<T> Drop for AbortOnDropHandle<T> { fn drop(&mut self) { self.0.abort() } } impl<T> AbortOnDropHandle<T> { /// Create an [`AbortOnDropHandle`] from a [`JoinHandle`]. pub fn new(handle: JoinHandle<T>) -> Self { Self(handle) } /// Abort the task associated with this handle, /// equivalent to [`JoinHandle::abort`]. pub fn abort(&self) { self.0.abort() } /// Checks if the task associated with this handle is finished, /// equivalent to [`JoinHandle::is_finished`]. pub fn is_finished(&self) -> bool { self.0.is_finished() } /// Returns a new [`AbortHandle`] that can be used to remotely abort this task, /// equivalent to [`JoinHandle::abort_handle`]. pub fn abort_handle(&self) -> AbortHandle { self.0.abort_handle() } /// Cancels aborting on drop and returns the original [`JoinHandle`]. pub fn detach(self) -> JoinHandle<T> { // Avoid invoking `AbortOnDropHandle`'s `Drop` impl let this = ManuallyDrop::new(self); // SAFETY: `&this.0` is a reference, so it is certainly initialized, and // it won't be double-dropped because it's in a `ManuallyDrop` unsafe { std::ptr::read(&this.0) } } } impl<T> std::fmt::Debug for AbortOnDropHandle<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("AbortOnDropHandle") .field("id", &self.0.id()) .finish() } } impl<T> Future for AbortOnDropHandle<T> { type Output = Result<T, JoinError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { Pin::new(&mut self.0).poll(cx) } } impl<T> AsRef<JoinHandle<T>> for AbortOnDropHandle<T> { fn as_ref(&self) -> &JoinHandle<T> { &self.0 } } #[cfg(test)] mod tests { use super::*; /// A simple type that does not implement [`std::fmt::Debug`]. struct NotDebug; fn is_debug<T: std::fmt::Debug>() {} #[test] fn assert_debug() { is_debug::<AbortOnDropHandle<NotDebug>>(); } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/task/mod.rs
tokio-util/src/task/mod.rs
//! Extra utilities for spawning tasks //! //! This module is only available when the `rt` feature is enabled. Note that enabling the //! `join-map` feature will automatically also enable the `rt` feature. cfg_rt! { mod spawn_pinned; pub use spawn_pinned::LocalPoolHandle; pub mod task_tracker; #[doc(inline)] pub use task_tracker::TaskTracker; mod abort_on_drop; pub use abort_on_drop::AbortOnDropHandle; mod join_queue; pub use join_queue::JoinQueue; } #[cfg(feature = "join-map")] mod join_map; #[cfg(feature = "join-map")] #[cfg_attr(docsrs, doc(cfg(feature = "join-map")))] pub use join_map::{JoinMap, JoinMapKeys};
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/simplex.rs
tokio-util/src/io/simplex.rs
//! Unidirectional byte-oriented channel. use crate::util::poll_proceed; use bytes::Buf; use bytes::BytesMut; use futures_core::ready; use std::io::Error as IoError; use std::io::ErrorKind as IoErrorKind; use std::io::IoSlice; use std::pin::Pin; use std::sync::{Arc, Mutex}; use std::task::{Context, Poll, Waker}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; type IoResult<T> = Result<T, IoError>; const CLOSED_ERROR_MSG: &str = "simplex has been closed"; #[derive(Debug)] struct Inner { /// `poll_write` will return [`Poll::Pending`] if the backpressure boundary is reached backpressure_boundary: usize, /// either [`Sender`] or [`Receiver`] is closed is_closed: bool, /// Waker used to wake the [`Receiver`] receiver_waker: Option<Waker>, /// Waker used to wake the [`Sender`] sender_waker: Option<Waker>, /// Buffer used to read and write data buf: BytesMut, } impl Inner { fn with_capacity(capacity: usize) -> Self { Self { backpressure_boundary: capacity, is_closed: false, receiver_waker: None, sender_waker: None, buf: BytesMut::with_capacity(capacity), } } fn register_receiver_waker(&mut self, waker: &Waker) -> Option<Waker> { match self.receiver_waker.as_mut() { Some(old) if old.will_wake(waker) => None, _ => self.receiver_waker.replace(waker.clone()), } } fn register_sender_waker(&mut self, waker: &Waker) -> Option<Waker> { match self.sender_waker.as_mut() { Some(old) if old.will_wake(waker) => None, _ => self.sender_waker.replace(waker.clone()), } } fn take_receiver_waker(&mut self) -> Option<Waker> { self.receiver_waker.take() } fn take_sender_waker(&mut self) -> Option<Waker> { self.sender_waker.take() } fn is_closed(&self) -> bool { self.is_closed } fn close_receiver(&mut self) -> Option<Waker> { self.is_closed = true; self.take_sender_waker() } fn close_sender(&mut self) -> Option<Waker> { self.is_closed = true; self.take_receiver_waker() } } /// Receiver of the simplex channel. /// /// # Cancellation safety /// /// The `Receiver` is cancel safe. If it is used as the event in a /// [`tokio::select!`](macro@tokio::select) statement and some other branch /// completes first, it is guaranteed that no bytes were received on this /// channel. /// /// You can still read the remaining data from the buffer /// even if the write half has been dropped. /// See [`Sender::poll_shutdown`] and [`Sender::drop`] for more details. #[derive(Debug)] pub struct Receiver { inner: Arc<Mutex<Inner>>, } impl Drop for Receiver { /// This also wakes up the [`Sender`]. fn drop(&mut self) { let maybe_waker = { let mut inner = self.inner.lock().unwrap(); inner.close_receiver() }; if let Some(waker) = maybe_waker { waker.wake(); } } } impl AsyncRead for Receiver { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<IoResult<()>> { let coop = ready!(poll_proceed(cx)); let mut inner = self.inner.lock().unwrap(); let to_read = buf.remaining().min(inner.buf.remaining()); if to_read == 0 { if inner.is_closed() || buf.remaining() == 0 { return Poll::Ready(Ok(())); } let old_waker = inner.register_receiver_waker(cx.waker()); let maybe_waker = inner.take_sender_waker(); // unlock before waking up and dropping old waker drop(inner); drop(old_waker); if let Some(waker) = maybe_waker { waker.wake(); } return Poll::Pending; } // this is to avoid starving other tasks coop.made_progress(); buf.put_slice(&inner.buf[..to_read]); inner.buf.advance(to_read); let waker = inner.take_sender_waker(); drop(inner); // unlock before waking up if let Some(waker) = waker { waker.wake(); } Poll::Ready(Ok(())) } } /// Sender of the simplex channel. /// /// # Cancellation safety /// /// The `Sender` is cancel safe. If it is used as the event in a /// [`tokio::select!`](macro@tokio::select) statement and some other branch /// completes first, it is guaranteed that no bytes were sent on this /// channel. /// /// # Shutdown /// /// See [`Sender::poll_shutdown`]. #[derive(Debug)] pub struct Sender { inner: Arc<Mutex<Inner>>, } impl Drop for Sender { /// This also wakes up the [`Receiver`]. fn drop(&mut self) { let maybe_waker = { let mut inner = self.inner.lock().unwrap(); inner.close_sender() }; if let Some(waker) = maybe_waker { waker.wake(); } } } impl AsyncWrite for Sender { /// # Errors /// /// This method will return [`IoErrorKind::BrokenPipe`] /// if the channel has been closed. fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<IoResult<usize>> { let coop = ready!(poll_proceed(cx)); let mut inner = self.inner.lock().unwrap(); if inner.is_closed() { return Poll::Ready(Err(IoError::new(IoErrorKind::BrokenPipe, CLOSED_ERROR_MSG))); } let free = inner .backpressure_boundary .checked_sub(inner.buf.len()) .expect("backpressure boundary overflow"); let to_write = buf.len().min(free); if to_write == 0 { if buf.is_empty() { return Poll::Ready(Ok(0)); } let old_waker = inner.register_sender_waker(cx.waker()); let waker = inner.take_receiver_waker(); // unlock before waking up and dropping old waker drop(inner); drop(old_waker); if let Some(waker) = waker { waker.wake(); } return Poll::Pending; } // this is to avoid starving other tasks coop.made_progress(); inner.buf.extend_from_slice(&buf[..to_write]); let waker = inner.take_receiver_waker(); drop(inner); // unlock before waking up if let Some(waker) = waker { waker.wake(); } Poll::Ready(Ok(to_write)) } /// # Errors /// /// This method will return [`IoErrorKind::BrokenPipe`] /// if the channel has been closed. fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<IoResult<()>> { let inner = self.inner.lock().unwrap(); if inner.is_closed() { Poll::Ready(Err(IoError::new(IoErrorKind::BrokenPipe, CLOSED_ERROR_MSG))) } else { Poll::Ready(Ok(())) } } /// After returns [`Poll::Ready`], all the following call to /// [`Sender::poll_write`] and [`Sender::poll_flush`] /// will return error. /// /// The [`Receiver`] can still be used to read remaining data /// until all bytes have been consumed. fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<IoResult<()>> { let maybe_waker = { let mut inner = self.inner.lock().unwrap(); inner.close_sender() }; if let Some(waker) = maybe_waker { waker.wake(); } Poll::Ready(Ok(())) } fn is_write_vectored(&self) -> bool { true } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<Result<usize, IoError>> { let coop = ready!(poll_proceed(cx)); let mut inner = self.inner.lock().unwrap(); if inner.is_closed() { return Poll::Ready(Err(IoError::new(IoErrorKind::BrokenPipe, CLOSED_ERROR_MSG))); } let free = inner .backpressure_boundary .checked_sub(inner.buf.len()) .expect("backpressure boundary overflow"); if free == 0 { let old_waker = inner.register_sender_waker(cx.waker()); let maybe_waker = inner.take_receiver_waker(); // unlock before waking up and dropping old waker drop(inner); drop(old_waker); if let Some(waker) = maybe_waker { waker.wake(); } return Poll::Pending; } // this is to avoid starving other tasks coop.made_progress(); let mut rem = free; for buf in bufs { if rem == 0 { break; } let to_write = buf.len().min(rem); if to_write == 0 { assert_ne!(rem, 0); assert_eq!(buf.len(), 0); continue; } inner.buf.extend_from_slice(&buf[..to_write]); rem -= to_write; } let waker = inner.take_receiver_waker(); drop(inner); // unlock before waking up if let Some(waker) = waker { waker.wake(); } Poll::Ready(Ok(free - rem)) } } /// Create a simplex channel. /// /// The `capacity` parameter specifies the maximum number of bytes that can be /// stored in the channel without making the [`Sender::poll_write`] /// return [`Poll::Pending`]. /// /// # Panics /// /// This function will panic if `capacity` is zero. pub fn new(capacity: usize) -> (Sender, Receiver) { assert_ne!(capacity, 0, "capacity must be greater than zero"); let inner = Arc::new(Mutex::new(Inner::with_capacity(capacity))); let tx = Sender { inner: Arc::clone(&inner), }; let rx = Receiver { inner }; (tx, rx) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/sync_bridge.rs
tokio-util/src/io/sync_bridge.rs
use std::io::{BufRead, Read, Seek, Write}; use tokio::io::{ AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt, }; /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or /// a [`tokio::io::AsyncWrite`] synchronously as a [`std::io::Write`]. /// /// # Alternatives /// /// In many cases, there are better alternatives to using `SyncIoBridge`, especially /// if you want to avoid blocking the async runtime. Consider the following scenarios: /// /// When hashing data, using `SyncIoBridge` can lead to suboptimal performance and /// might not fully leverage the async capabilities of the system. /// /// ### Why It Matters: /// /// `SyncIoBridge` allows you to use asynchronous I/O operations in an synchronous /// context by blocking the current thread. However, this can be inefficient because: /// - **Inefficient Resource Usage**: `SyncIoBridge` takes up an entire OS thread, /// which is inefficient compared to asynchronous code that can multiplex many /// tasks on a single thread. /// - **Thread Pool Saturation**: Excessive use of `SyncIoBridge` can exhaust the /// async runtime's thread pool, reducing the number of threads available for /// other tasks and impacting overall performance. /// - **Missed Concurrency Benefits**: By using synchronous operations with /// `SyncIoBridge`, you lose the ability to interleave tasks efficiently, /// which is a key advantage of asynchronous programming. /// /// ## Example 1: Hashing Data /// /// The use of `SyncIoBridge` is unnecessary when hashing data. Instead, you can /// process the data asynchronously by reading it into memory, which avoids blocking /// the async runtime. /// /// There are two strategies for avoiding `SyncIoBridge` when hashing data. When /// the data fits into memory, the easiest is to read the data into a `Vec<u8>` /// and hash it: /// /// Explanation: This example demonstrates how to asynchronously read data from a /// reader into memory and hash it using a synchronous hashing function. The /// `SyncIoBridge` is avoided, ensuring that the async runtime is not blocked. /// ```rust /// use tokio::io::AsyncReadExt; /// use tokio::io::AsyncRead; /// use std::io::Cursor; /// # mod blake3 { pub fn hash(_: &[u8]) {} } /// /// async fn hash_contents(mut reader: impl AsyncRead + Unpin) -> Result<(), std::io::Error> { /// // Read all data from the reader into a Vec<u8>. /// let mut data = Vec::new(); /// reader.read_to_end(&mut data).await?; /// /// // Hash the data using the blake3 hashing function. /// let hash = blake3::hash(&data); /// /// Ok(hash) /// } /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), std::io::Error> { /// // Example: In-memory data. /// let data = b"Hello, world!"; // A byte slice. /// let reader = Cursor::new(data); // Create an in-memory AsyncRead. /// hash_contents(reader).await /// # } /// ``` /// /// When the data doesn't fit into memory, the hashing library will usually /// provide a `hasher` that you can repeatedly call `update` on to hash the data /// one chunk at the time. /// /// Explanation: This example demonstrates how to asynchronously stream data in /// chunks for hashing. Each chunk is read asynchronously, and the hash is updated /// incrementally. This avoids blocking and improves performance over using /// `SyncIoBridge`. /// /// ```rust /// use tokio::io::AsyncReadExt; /// use tokio::io::AsyncRead; /// use std::io::Cursor; /// # struct Hasher; /// # impl Hasher { pub fn update(&mut self, _: &[u8]) {} pub fn finalize(&self) {} } /// /// /// Asynchronously streams data from an async reader, processes it in chunks, /// /// and hashes the data incrementally. /// async fn hash_stream(mut reader: impl AsyncRead + Unpin, mut hasher: Hasher) -> Result<(), std::io::Error> { /// // Create a buffer to read data into, sized for performance. /// let mut data = vec![0; 16 * 1024]; /// loop { /// // Read data from the reader into the buffer. /// let len = reader.read(&mut data).await?; /// if len == 0 { break; } // Exit loop if no more data. /// /// // Update the hash with the data read. /// hasher.update(&data[..len]); /// } /// /// // Finalize the hash after all data has been processed. /// let hash = hasher.finalize(); /// /// Ok(hash) /// } /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), std::io::Error> { /// // Example: In-memory data. /// let data = b"Hello, world!"; // A byte slice. /// let reader = Cursor::new(data); // Create an in-memory AsyncRead. /// let hasher = Hasher; /// hash_stream(reader, hasher).await /// # } /// ``` /// /// /// ## Example 2: Compressing Data /// /// When compressing data, the use of `SyncIoBridge` is unnecessary as it introduces /// blocking and inefficient code. Instead, you can utilize an async compression library /// such as the [`async-compression`](https://docs.rs/async-compression/latest/async_compression/) /// crate, which is built to handle asynchronous data streams efficiently. /// /// Explanation: This example shows how to asynchronously compress data using an /// async compression library. By reading and writing asynchronously, it avoids /// blocking and is more efficient than using `SyncIoBridge` with a non-async /// compression library. /// /// ```ignore /// use async_compression::tokio::write::GzipEncoder; /// use std::io::Cursor; /// use tokio::io::AsyncRead; /// /// /// Asynchronously compresses data from an async reader using Gzip and an async encoder. /// async fn compress_data(mut reader: impl AsyncRead + Unpin) -> Result<(), std::io::Error> { /// let writer = tokio::io::sink(); /// /// // Create a Gzip encoder that wraps the writer. /// let mut encoder = GzipEncoder::new(writer); /// /// // Copy data from the reader to the encoder, compressing it. /// tokio::io::copy(&mut reader, &mut encoder).await?; /// /// Ok(()) ///} /// /// #[tokio::main] /// async fn main() -> Result<(), std::io::Error> { /// // Example: In-memory data. /// let data = b"Hello, world!"; // A byte slice. /// let reader = Cursor::new(data); // Create an in-memory AsyncRead. /// compress_data(reader).await?; /// /// Ok(()) /// } /// ``` /// /// /// ## Example 3: Parsing Data Formats /// /// /// `SyncIoBridge` is not ideal when parsing data formats such as `JSON`, as it /// blocks async operations. A more efficient approach is to read data asynchronously /// into memory and then `deserialize` it, avoiding unnecessary synchronization overhead. /// /// Explanation: This example shows how to asynchronously read data into memory /// and then parse it as `JSON`. By avoiding `SyncIoBridge`, the asynchronous runtime /// remains unblocked, leading to better performance when working with asynchronous /// I/O streams. /// /// ```rust,no_run /// use tokio::io::AsyncRead; /// use tokio::io::AsyncReadExt; /// use std::io::Cursor; /// # mod serde { /// # pub trait DeserializeOwned: 'static {} /// # impl<T: 'static> DeserializeOwned for T {} /// # } /// # mod serde_json { /// # use super::serde::DeserializeOwned; /// # pub fn from_slice<T: DeserializeOwned>(_: &[u8]) -> Result<T, std::io::Error> { /// # unimplemented!() /// # } /// # } /// # #[derive(Debug)] struct MyStruct; /// /// /// async fn parse_json(mut reader: impl AsyncRead + Unpin) -> Result<MyStruct, std::io::Error> { /// // Read all data from the reader into a Vec<u8>. /// let mut data = Vec::new(); /// reader.read_to_end(&mut data).await?; /// /// // Deserialize the data from the Vec<u8> into a MyStruct instance. /// let value: MyStruct = serde_json::from_slice(&data)?; /// /// Ok(value) ///} /// /// #[tokio::main] /// async fn main() -> Result<(), std::io::Error> { /// // Example: In-memory data. /// let data = b"Hello, world!"; // A byte slice. /// let reader = Cursor::new(data); // Create an in-memory AsyncRead. /// parse_json(reader).await?; /// Ok(()) /// } /// ``` /// /// ## Correct Usage of `SyncIoBridge` inside `spawn_blocking` /// /// `SyncIoBridge` is mainly useful when you need to interface with synchronous /// libraries from an asynchronous context. /// /// Explanation: This example shows how to use `SyncIoBridge` inside a `spawn_blocking` /// task to safely perform synchronous I/O without blocking the async runtime. The /// `spawn_blocking` ensures that the synchronous code is offloaded to a dedicated /// thread pool, preventing it from interfering with the async tasks. /// /// ```rust /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::task::spawn_blocking; /// use tokio_util::io::SyncIoBridge; /// use tokio::io::AsyncRead; /// use std::marker::Unpin; /// use std::io::Cursor; /// /// /// Wraps an async reader with `SyncIoBridge` and performs synchronous I/O operations in a blocking task. /// async fn process_sync_io(reader: impl AsyncRead + Unpin + Send + 'static) -> Result<Vec<u8>, std::io::Error> { /// // Wrap the async reader with `SyncIoBridge` to allow synchronous reading. /// let mut sync_reader = SyncIoBridge::new(reader); /// /// // Spawn a blocking task to perform synchronous I/O operations. /// let result = spawn_blocking(move || { /// // Create an in-memory buffer to hold the copied data. /// let mut buffer = Vec::new(); /// // Copy data from the sync_reader to the buffer. /// std::io::copy(&mut sync_reader, &mut buffer)?; /// // Return the buffer containing the copied data. /// Ok::<_, std::io::Error>(buffer) /// }) /// .await??; /// /// // Return the result from the blocking task. /// Ok(result) ///} /// /// #[tokio::main] /// async fn main() -> Result<(), std::io::Error> { /// // Example: In-memory data. /// let data = b"Hello, world!"; // A byte slice. /// let reader = Cursor::new(data); // Create an in-memory AsyncRead. /// let result = process_sync_io(reader).await?; /// /// // You can use `result` here as needed. /// /// Ok(()) /// } /// # } /// ``` /// #[derive(Debug)] pub struct SyncIoBridge<T> { src: T, rt: tokio::runtime::Handle, } impl<T: AsyncBufRead + Unpin> BufRead for SyncIoBridge<T> { fn fill_buf(&mut self) -> std::io::Result<&[u8]> { let src = &mut self.src; self.rt.block_on(AsyncBufReadExt::fill_buf(src)) } fn consume(&mut self, amt: usize) { let src = &mut self.src; AsyncBufReadExt::consume(src, amt) } fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> std::io::Result<usize> { let src = &mut self.src; self.rt .block_on(AsyncBufReadExt::read_until(src, byte, buf)) } fn read_line(&mut self, buf: &mut String) -> std::io::Result<usize> { let src = &mut self.src; self.rt.block_on(AsyncBufReadExt::read_line(src, buf)) } } impl<T: AsyncRead + Unpin> Read for SyncIoBridge<T> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { let src = &mut self.src; self.rt.block_on(AsyncReadExt::read(src, buf)) } fn read_to_end(&mut self, buf: &mut Vec<u8>) -> std::io::Result<usize> { let src = &mut self.src; self.rt.block_on(src.read_to_end(buf)) } fn read_to_string(&mut self, buf: &mut String) -> std::io::Result<usize> { let src = &mut self.src; self.rt.block_on(src.read_to_string(buf)) } fn read_exact(&mut self, buf: &mut [u8]) -> std::io::Result<()> { let src = &mut self.src; // The AsyncRead trait returns the count, synchronous doesn't. let _n = self.rt.block_on(src.read_exact(buf))?; Ok(()) } } impl<T: AsyncWrite + Unpin> Write for SyncIoBridge<T> { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { let src = &mut self.src; self.rt.block_on(src.write(buf)) } fn flush(&mut self) -> std::io::Result<()> { let src = &mut self.src; self.rt.block_on(src.flush()) } fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> { let src = &mut self.src; self.rt.block_on(src.write_all(buf)) } fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result<usize> { let src = &mut self.src; self.rt.block_on(src.write_vectored(bufs)) } } impl<T: AsyncSeek + Unpin> Seek for SyncIoBridge<T> { fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> { let src = &mut self.src; self.rt.block_on(AsyncSeekExt::seek(src, pos)) } } // Because https://doc.rust-lang.org/std/io/trait.Write.html#method.is_write_vectored is at the time // of this writing still unstable, we expose this as part of a standalone method. impl<T: AsyncWrite> SyncIoBridge<T> { /// Determines if the underlying [`tokio::io::AsyncWrite`] target supports efficient vectored writes. /// /// See [`tokio::io::AsyncWrite::is_write_vectored`]. pub fn is_write_vectored(&self) -> bool { self.src.is_write_vectored() } } impl<T: AsyncWrite + Unpin> SyncIoBridge<T> { /// Shutdown this writer. This method provides a way to call the [`AsyncWriteExt::shutdown`] /// function of the inner [`tokio::io::AsyncWrite`] instance. /// /// # Errors /// /// This method returns the same errors as [`AsyncWriteExt::shutdown`]. /// /// [`AsyncWriteExt::shutdown`]: tokio::io::AsyncWriteExt::shutdown pub fn shutdown(&mut self) -> std::io::Result<()> { let src = &mut self.src; self.rt.block_on(src.shutdown()) } } impl<T: Unpin> SyncIoBridge<T> { /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or /// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. /// /// When this struct is created, it captures a handle to the current thread's runtime with [`tokio::runtime::Handle::current`]. /// It is hence OK to move this struct into a separate thread outside the runtime, as created /// by e.g. [`tokio::task::spawn_blocking`]. /// /// Stated even more strongly: to make use of this bridge, you *must* move /// it into a separate thread outside the runtime. The synchronous I/O will use the /// underlying handle to block on the backing asynchronous source, via /// [`tokio::runtime::Handle::block_on`]. As noted in the documentation for that /// function, an attempt to `block_on` from an asynchronous execution context /// will panic. /// /// # Wrapping `!Unpin` types /// /// Use e.g. `SyncIoBridge::new(Box::pin(src))`. /// /// # Panics /// /// This will panic if called outside the context of a Tokio runtime. #[track_caller] pub fn new(src: T) -> Self { Self::new_with_handle(src, tokio::runtime::Handle::current()) } /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or /// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. /// /// This is the same as [`SyncIoBridge::new`], but allows passing an arbitrary handle and hence may /// be initially invoked outside of an asynchronous context. pub fn new_with_handle(src: T, rt: tokio::runtime::Handle) -> Self { Self { src, rt } } /// Consume this bridge, returning the underlying stream. pub fn into_inner(self) -> T { self.src } } impl<T> AsMut<T> for SyncIoBridge<T> { fn as_mut(&mut self) -> &mut T { &mut self.src } } impl<T> AsRef<T> for SyncIoBridge<T> { fn as_ref(&self) -> &T { &self.src } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/sink_writer.rs
tokio-util/src/io/sink_writer.rs
use futures_sink::Sink; use futures_core::stream::Stream; use pin_project_lite::pin_project; use std::io; use std::pin::Pin; use std::task::{ready, Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; pin_project! { /// Convert a [`Sink`] of byte chunks into an [`AsyncWrite`]. /// /// Whenever you write to this [`SinkWriter`], the supplied bytes are /// forwarded to the inner [`Sink`]. When `shutdown` is called on this /// [`SinkWriter`], the inner sink is closed. /// /// This adapter takes a `Sink<&[u8]>` and provides an [`AsyncWrite`] impl /// for it. Because of the lifetime, this trait is relatively rarely /// implemented. The main ways to get a `Sink<&[u8]>` that you can use with /// this type are: /// /// * With the codec module by implementing the [`Encoder`]`<&[u8]>` trait. /// * By wrapping a `Sink<Bytes>` in a [`CopyToBytes`]. /// * Manually implementing `Sink<&[u8]>` directly. /// /// The opposite conversion of implementing `Sink<_>` for an [`AsyncWrite`] /// is done using the [`codec`] module. /// /// # Example /// /// ``` /// use bytes::Bytes; /// use futures_util::SinkExt; /// use std::io::{Error, ErrorKind}; /// use tokio::io::AsyncWriteExt; /// use tokio_util::io::{SinkWriter, CopyToBytes}; /// use tokio_util::sync::PollSender; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), Error> { /// // We use an mpsc channel as an example of a `Sink<Bytes>`. /// let (tx, mut rx) = tokio::sync::mpsc::channel::<Bytes>(1); /// let sink = PollSender::new(tx).sink_map_err(|_| Error::from(ErrorKind::BrokenPipe)); /// /// // Wrap it in `CopyToBytes` to get a `Sink<&[u8]>`. /// let mut writer = SinkWriter::new(CopyToBytes::new(sink)); /// /// // Write data to our interface... /// let data: [u8; 4] = [1, 2, 3, 4]; /// let _ = writer.write(&data).await?; /// /// // ... and receive it. /// assert_eq!(data.as_slice(), &*rx.recv().await.unwrap()); /// # Ok(()) /// # } /// ``` /// /// [`AsyncWrite`]: tokio::io::AsyncWrite /// [`CopyToBytes`]: crate::io::CopyToBytes /// [`Encoder`]: crate::codec::Encoder /// [`Sink`]: futures_sink::Sink /// [`codec`]: crate::codec #[derive(Debug)] pub struct SinkWriter<S> { #[pin] inner: S, } } impl<S> SinkWriter<S> { /// Creates a new [`SinkWriter`]. pub fn new(sink: S) -> Self { Self { inner: sink } } /// Gets a reference to the underlying sink. pub fn get_ref(&self) -> &S { &self.inner } /// Gets a mutable reference to the underlying sink. pub fn get_mut(&mut self) -> &mut S { &mut self.inner } /// Consumes this [`SinkWriter`], returning the underlying sink. pub fn into_inner(self) -> S { self.inner } } impl<S, E> AsyncWrite for SinkWriter<S> where for<'a> S: Sink<&'a [u8], Error = E>, E: Into<io::Error>, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<Result<usize, io::Error>> { let mut this = self.project(); ready!(this.inner.as_mut().poll_ready(cx).map_err(Into::into))?; match this.inner.as_mut().start_send(buf) { Ok(()) => Poll::Ready(Ok(buf.len())), Err(e) => Poll::Ready(Err(e.into())), } } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { self.project().inner.poll_flush(cx).map_err(Into::into) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { self.project().inner.poll_close(cx).map_err(Into::into) } } impl<S: Stream> Stream for SinkWriter<S> { type Item = S::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.project().inner.poll_next(cx) } } impl<S: AsyncRead> AsyncRead for SinkWriter<S> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.project().inner.poll_read(cx, buf) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/stream_reader.rs
tokio-util/src/io/stream_reader.rs
use bytes::Buf; use futures_core::stream::Stream; use futures_sink::Sink; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncBufRead, AsyncRead, ReadBuf}; /// Convert a [`Stream`] of byte chunks into an [`AsyncRead`]. /// /// This type performs the inverse operation of [`ReaderStream`]. /// /// This type also implements the [`AsyncBufRead`] trait, so you can use it /// to read a `Stream` of byte chunks line-by-line. See the examples below. /// /// # Example /// /// ``` /// use bytes::Bytes; /// use tokio::io::{AsyncReadExt, Result}; /// use tokio_util::io::StreamReader; /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> std::io::Result<()> { /// /// // Create a stream from an iterator. /// let stream = tokio_stream::iter(vec![ /// Result::Ok(Bytes::from_static(&[0, 1, 2, 3])), /// Result::Ok(Bytes::from_static(&[4, 5, 6, 7])), /// Result::Ok(Bytes::from_static(&[8, 9, 10, 11])), /// ]); /// /// // Convert it to an AsyncRead. /// let mut read = StreamReader::new(stream); /// /// // Read five bytes from the stream. /// let mut buf = [0; 5]; /// read.read_exact(&mut buf).await?; /// assert_eq!(buf, [0, 1, 2, 3, 4]); /// /// // Read the rest of the current chunk. /// assert_eq!(read.read(&mut buf).await?, 3); /// assert_eq!(&buf[..3], [5, 6, 7]); /// /// // Read the next chunk. /// assert_eq!(read.read(&mut buf).await?, 4); /// assert_eq!(&buf[..4], [8, 9, 10, 11]); /// /// // We have now reached the end. /// assert_eq!(read.read(&mut buf).await?, 0); /// /// # Ok(()) /// # } /// ``` /// /// If the stream produces errors which are not [`std::io::Error`], /// the errors can be converted using [`StreamExt`] to map each /// element. /// /// ``` /// use bytes::Bytes; /// use tokio::io::AsyncReadExt; /// use tokio_util::io::StreamReader; /// use tokio_stream::StreamExt; /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> std::io::Result<()> { /// /// // Create a stream from an iterator, including an error. /// let stream = tokio_stream::iter(vec![ /// Result::Ok(Bytes::from_static(&[0, 1, 2, 3])), /// Result::Ok(Bytes::from_static(&[4, 5, 6, 7])), /// Result::Err("Something bad happened!") /// ]); /// /// // Use StreamExt to map the stream and error to a std::io::Error /// let stream = stream.map(|result| result.map_err(|err| { /// std::io::Error::new(std::io::ErrorKind::Other, err) /// })); /// /// // Convert it to an AsyncRead. /// let mut read = StreamReader::new(stream); /// /// // Read five bytes from the stream. /// let mut buf = [0; 5]; /// read.read_exact(&mut buf).await?; /// assert_eq!(buf, [0, 1, 2, 3, 4]); /// /// // Read the rest of the current chunk. /// assert_eq!(read.read(&mut buf).await?, 3); /// assert_eq!(&buf[..3], [5, 6, 7]); /// /// // Reading the next chunk will produce an error /// let error = read.read(&mut buf).await.unwrap_err(); /// assert_eq!(error.kind(), std::io::ErrorKind::Other); /// assert_eq!(error.into_inner().unwrap().to_string(), "Something bad happened!"); /// /// // We have now reached the end. /// assert_eq!(read.read(&mut buf).await?, 0); /// /// # Ok(()) /// # } /// ``` /// /// Using the [`AsyncBufRead`] impl, you can read a `Stream` of byte chunks /// line-by-line. Note that you will usually also need to convert the error /// type when doing this. See the second example for an explanation of how /// to do this. /// /// ``` /// use tokio::io::{Result, AsyncBufReadExt}; /// use tokio_util::io::StreamReader; /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> std::io::Result<()> { /// /// // Create a stream of byte chunks. /// let stream = tokio_stream::iter(vec![ /// Result::Ok(b"The first line.\n".as_slice()), /// Result::Ok(b"The second line.".as_slice()), /// Result::Ok(b"\nThe third".as_slice()), /// Result::Ok(b" line.\nThe fourth line.\nThe fifth line.\n".as_slice()), /// ]); /// /// // Convert it to an AsyncRead. /// let mut read = StreamReader::new(stream); /// /// // Loop through the lines from the `StreamReader`. /// let mut line = String::new(); /// let mut lines = Vec::new(); /// loop { /// line.clear(); /// let len = read.read_line(&mut line).await?; /// if len == 0 { break; } /// lines.push(line.clone()); /// } /// /// // Verify that we got the lines we expected. /// assert_eq!( /// lines, /// vec![ /// "The first line.\n", /// "The second line.\n", /// "The third line.\n", /// "The fourth line.\n", /// "The fifth line.\n", /// ] /// ); /// # Ok(()) /// # } /// ``` /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`AsyncBufRead`]: tokio::io::AsyncBufRead /// [`Stream`]: futures_core::Stream /// [`ReaderStream`]: crate::io::ReaderStream /// [`StreamExt`]: https://docs.rs/tokio-stream/latest/tokio_stream/trait.StreamExt.html #[derive(Debug)] pub struct StreamReader<S, B> { // This field is pinned. inner: S, // This field is not pinned. chunk: Option<B>, } impl<S, B, E> StreamReader<S, B> where S: Stream<Item = Result<B, E>>, B: Buf, E: Into<std::io::Error>, { /// Convert a stream of byte chunks into an [`AsyncRead`]. /// /// The item should be a [`Result`] with the ok variant being something that /// implements the [`Buf`] trait (e.g. `Cursor<Vec<u8>>` or `Bytes`). The error /// should be convertible into an [io error]. /// /// [`Result`]: std::result::Result /// [`Buf`]: bytes::Buf /// [io error]: std::io::Error pub fn new(stream: S) -> Self { Self { inner: stream, chunk: None, } } /// Do we have a chunk and is it non-empty? fn has_chunk(&self) -> bool { if let Some(ref chunk) = self.chunk { chunk.remaining() > 0 } else { false } } /// Consumes this `StreamReader`, returning a Tuple consisting /// of the underlying stream and an Option of the internal buffer, /// which is Some in case the buffer contains elements. pub fn into_inner_with_chunk(self) -> (S, Option<B>) { if self.has_chunk() { (self.inner, self.chunk) } else { (self.inner, None) } } } impl<S, B> StreamReader<S, B> { /// Gets a reference to the underlying stream. /// /// It is inadvisable to directly read from the underlying stream. pub fn get_ref(&self) -> &S { &self.inner } /// Gets a mutable reference to the underlying stream. /// /// It is inadvisable to directly read from the underlying stream. pub fn get_mut(&mut self) -> &mut S { &mut self.inner } /// Gets a pinned mutable reference to the underlying stream. /// /// It is inadvisable to directly read from the underlying stream. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut S> { self.project().inner } /// Consumes this `BufWriter`, returning the underlying stream. /// /// Note that any leftover data in the internal buffer is lost. /// If you additionally want access to the internal buffer use /// [`into_inner_with_chunk`]. /// /// [`into_inner_with_chunk`]: crate::io::StreamReader::into_inner_with_chunk pub fn into_inner(self) -> S { self.inner } } impl<S, B, E> AsyncRead for StreamReader<S, B> where S: Stream<Item = Result<B, E>>, B: Buf, E: Into<std::io::Error>, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); } let inner_buf = match self.as_mut().poll_fill_buf(cx) { Poll::Ready(Ok(buf)) => buf, Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), Poll::Pending => return Poll::Pending, }; let len = std::cmp::min(inner_buf.len(), buf.remaining()); buf.put_slice(&inner_buf[..len]); self.consume(len); Poll::Ready(Ok(())) } } impl<S, B, E> AsyncBufRead for StreamReader<S, B> where S: Stream<Item = Result<B, E>>, B: Buf, E: Into<std::io::Error>, { fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { loop { if self.as_mut().has_chunk() { // This unwrap is very sad, but it can't be avoided. let buf = self.project().chunk.as_ref().unwrap().chunk(); return Poll::Ready(Ok(buf)); } else { match self.as_mut().project().inner.poll_next(cx) { Poll::Ready(Some(Ok(chunk))) => { // Go around the loop in case the chunk is empty. *self.as_mut().project().chunk = Some(chunk); } Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())), Poll::Ready(None) => return Poll::Ready(Ok(&[])), Poll::Pending => return Poll::Pending, } } } } fn consume(self: Pin<&mut Self>, amt: usize) { if amt > 0 { self.project() .chunk .as_mut() .expect("No chunk present") .advance(amt); } } } // The code below is a manual expansion of the code that pin-project-lite would // generate. This is done because pin-project-lite fails by hitting the recursion // limit on this struct. (Every line of documentation is handled recursively by // the macro.) impl<S: Unpin, B> Unpin for StreamReader<S, B> {} struct StreamReaderProject<'a, S, B> { inner: Pin<&'a mut S>, chunk: &'a mut Option<B>, } impl<S, B> StreamReader<S, B> { #[inline] fn project(self: Pin<&mut Self>) -> StreamReaderProject<'_, S, B> { // SAFETY: We define that only `inner` should be pinned when `Self` is // and have an appropriate `impl Unpin` for this. let me = unsafe { Pin::into_inner_unchecked(self) }; StreamReaderProject { inner: unsafe { Pin::new_unchecked(&mut me.inner) }, chunk: &mut me.chunk, } } } impl<S: Sink<T, Error = E>, B, E, T> Sink<T> for StreamReader<S, B> { type Error = E; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { self.project().inner.start_send(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_close(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/read_arc.rs
tokio-util/src/io/read_arc.rs
use std::io; use std::mem::MaybeUninit; use std::sync::Arc; use tokio::io::{AsyncRead, AsyncReadExt}; /// Read data from an `AsyncRead` into an `Arc`. /// /// This uses `Arc::new_uninit_slice` and reads into the resulting uninitialized `Arc`. /// /// # Example /// /// ``` /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> std::io::Result<()> { /// use tokio_util::io::read_exact_arc; /// /// let read = tokio::io::repeat(42); /// /// let arc = read_exact_arc(read, 4).await?; /// /// assert_eq!(&arc[..], &[42; 4]); /// # Ok(()) /// # } /// ``` pub async fn read_exact_arc<R: AsyncRead>(read: R, len: usize) -> io::Result<Arc<[u8]>> { tokio::pin!(read); // TODO(MSRV 1.82): When bumping MSRV, switch to `Arc::new_uninit_slice(len)`. The following is // equivalent, and generates the same assembly, but works without requiring MSRV 1.82. let arc: Arc<[MaybeUninit<u8>]> = (0..len).map(|_| MaybeUninit::uninit()).collect(); // TODO(MSRV future): Use `Arc::get_mut_unchecked` once it's stabilized. // SAFETY: We're the only owner of the `Arc`, and we keep the `Arc` valid throughout this loop // as we write through this reference. let mut buf = unsafe { &mut *(Arc::as_ptr(&arc) as *mut [MaybeUninit<u8>]) }; while !buf.is_empty() { if read.read_buf(&mut buf).await? == 0 { return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "early eof")); } } // TODO(MSRV 1.82): When bumping MSRV, switch to `arc.assume_init()`. The following is // equivalent, and generates the same assembly, but works without requiring MSRV 1.82. // SAFETY: This changes `[MaybeUninit<u8>]` to `[u8]`, and we've initialized all the bytes in // the loop above. Ok(unsafe { Arc::from_raw(Arc::into_raw(arc) as *const [u8]) }) }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/reader_stream.rs
tokio-util/src/io/reader_stream.rs
use bytes::{Bytes, BytesMut}; use futures_core::stream::Stream; use pin_project_lite::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::AsyncRead; const DEFAULT_CAPACITY: usize = 4096; pin_project! { /// Convert an [`AsyncRead`] into a [`Stream`] of byte chunks. /// /// This stream is fused. It performs the inverse operation of /// [`StreamReader`]. /// /// # Example /// /// ``` /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> std::io::Result<()> { /// use tokio_stream::StreamExt; /// use tokio_util::io::ReaderStream; /// /// // Create a stream of data. /// let data = b"hello, world!"; /// let mut stream = ReaderStream::new(&data[..]); /// /// // Read all of the chunks into a vector. /// let mut stream_contents = Vec::new(); /// while let Some(chunk) = stream.next().await { /// stream_contents.extend_from_slice(&chunk?); /// } /// /// // Once the chunks are concatenated, we should have the /// // original data. /// assert_eq!(stream_contents, data); /// # Ok(()) /// # } /// ``` /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`StreamReader`]: crate::io::StreamReader /// [`Stream`]: futures_core::Stream #[derive(Debug)] pub struct ReaderStream<R> { // Reader itself. // // This value is `None` if the stream has terminated. #[pin] reader: Option<R>, // Working buffer, used to optimize allocations. buf: BytesMut, capacity: usize, } } impl<R: AsyncRead> ReaderStream<R> { /// Convert an [`AsyncRead`] into a [`Stream`] with item type /// `Result<Bytes, std::io::Error>`. /// /// Currently, the default capacity 4096 bytes (4 KiB). /// This capacity is not part of the semver contract /// and may be tweaked in future releases without /// requiring a major version bump. /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Stream`]: futures_core::Stream pub fn new(reader: R) -> Self { ReaderStream { reader: Some(reader), buf: BytesMut::new(), capacity: DEFAULT_CAPACITY, } } /// Convert an [`AsyncRead`] into a [`Stream`] with item type /// `Result<Bytes, std::io::Error>`, /// with a specific read buffer initial capacity. /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Stream`]: futures_core::Stream pub fn with_capacity(reader: R, capacity: usize) -> Self { ReaderStream { reader: Some(reader), buf: BytesMut::with_capacity(capacity), capacity, } } } impl<R: AsyncRead> Stream for ReaderStream<R> { type Item = std::io::Result<Bytes>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { use crate::util::poll_read_buf; let mut this = self.as_mut().project(); let reader = match this.reader.as_pin_mut() { Some(r) => r, None => return Poll::Ready(None), }; if this.buf.capacity() == 0 { this.buf.reserve(*this.capacity); } match poll_read_buf(reader, cx, &mut this.buf) { Poll::Pending => Poll::Pending, Poll::Ready(Err(err)) => { self.project().reader.set(None); Poll::Ready(Some(Err(err))) } Poll::Ready(Ok(0)) => { self.project().reader.set(None); Poll::Ready(None) } Poll::Ready(Ok(_)) => { let chunk = this.buf.split(); Poll::Ready(Some(Ok(chunk.freeze()))) } } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/mod.rs
tokio-util/src/io/mod.rs
//! Helpers for IO related tasks. //! //! The stream types are often used in combination with hyper or reqwest, as they //! allow converting between a hyper [`Body`] and [`AsyncRead`]. //! //! The [`SyncIoBridge`] type converts from the world of async I/O //! to synchronous I/O; this may often come up when using synchronous APIs //! inside [`tokio::task::spawn_blocking`]. //! //! [`Body`]: https://docs.rs/hyper/0.13/hyper/struct.Body.html //! [`AsyncRead`]: tokio::io::AsyncRead mod copy_to_bytes; mod inspect; mod read_buf; mod reader_stream; pub mod simplex; mod sink_writer; mod stream_reader; cfg_io_util! { mod read_arc; pub use self::read_arc::read_exact_arc; mod sync_bridge; pub use self::sync_bridge::SyncIoBridge; } pub use self::copy_to_bytes::CopyToBytes; pub use self::inspect::{InspectReader, InspectWriter}; pub use self::read_buf::read_buf; pub use self::reader_stream::ReaderStream; pub use self::sink_writer::SinkWriter; pub use self::stream_reader::StreamReader; pub use crate::util::{poll_read_buf, poll_write_buf};
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/copy_to_bytes.rs
tokio-util/src/io/copy_to_bytes.rs
use bytes::Bytes; use futures_core::stream::Stream; use futures_sink::Sink; use pin_project_lite::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; pin_project! { /// A helper that wraps a [`Sink`]`<`[`Bytes`]`>` and converts it into a /// [`Sink`]`<&'a [u8]>` by copying each byte slice into an owned [`Bytes`]. /// /// See the documentation for [`SinkWriter`] for an example. /// /// [`Bytes`]: bytes::Bytes /// [`SinkWriter`]: crate::io::SinkWriter /// [`Sink`]: futures_sink::Sink #[derive(Debug)] pub struct CopyToBytes<S> { #[pin] inner: S, } } impl<S> CopyToBytes<S> { /// Creates a new [`CopyToBytes`]. pub fn new(inner: S) -> Self { Self { inner } } /// Gets a reference to the underlying sink. pub fn get_ref(&self) -> &S { &self.inner } /// Gets a mutable reference to the underlying sink. pub fn get_mut(&mut self) -> &mut S { &mut self.inner } /// Consumes this [`CopyToBytes`], returning the underlying sink. pub fn into_inner(self) -> S { self.inner } } impl<'a, S> Sink<&'a [u8]> for CopyToBytes<S> where S: Sink<Bytes>, { type Error = S::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: &'a [u8]) -> Result<(), Self::Error> { self.project() .inner .start_send(Bytes::copy_from_slice(item)) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.project().inner.poll_close(cx) } } impl<S: Stream> Stream for CopyToBytes<S> { type Item = S::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.project().inner.poll_next(cx) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/read_buf.rs
tokio-util/src/io/read_buf.rs
use bytes::BufMut; use std::future::Future; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::AsyncRead; /// Read data from an `AsyncRead` into an implementer of the [`BufMut`] trait. /// /// [`BufMut`]: bytes::BufMut /// /// # Example /// /// ``` /// use bytes::{Bytes, BytesMut}; /// use tokio_stream as stream; /// use tokio::io::Result; /// use tokio_util::io::{StreamReader, read_buf}; /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> std::io::Result<()> { /// /// // Create a reader from an iterator. This particular reader will always be /// // ready. /// let mut read = StreamReader::new(stream::iter(vec![Result::Ok(Bytes::from_static(&[0, 1, 2, 3]))])); /// /// let mut buf = BytesMut::new(); /// let mut reads = 0; /// /// loop { /// reads += 1; /// let n = read_buf(&mut read, &mut buf).await?; /// /// if n == 0 { /// break; /// } /// } /// /// // one or more reads might be necessary. /// assert!(reads >= 1); /// assert_eq!(&buf[..], &[0, 1, 2, 3]); /// # Ok(()) /// # } /// ``` pub async fn read_buf<R, B>(read: &mut R, buf: &mut B) -> io::Result<usize> where R: AsyncRead + Unpin, B: BufMut, { return ReadBufFn(read, buf).await; struct ReadBufFn<'a, R, B>(&'a mut R, &'a mut B); impl<'a, R, B> Future for ReadBufFn<'a, R, B> where R: AsyncRead + Unpin, B: BufMut, { type Output = io::Result<usize>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = &mut *self; crate::util::poll_read_buf(Pin::new(this.0), cx, this.1) } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/io/inspect.rs
tokio-util/src/io/inspect.rs
use pin_project_lite::pin_project; use std::io::{IoSlice, Result}; use std::pin::Pin; use std::task::{ready, Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; pin_project! { /// An adapter that lets you inspect the data that's being read. /// /// This is useful for things like hashing data as it's read in. pub struct InspectReader<R, F> { #[pin] reader: R, f: F, } } impl<R, F> InspectReader<R, F> { /// Create a new `InspectReader`, wrapping `reader` and calling `f` for the /// new data supplied by each read call. /// /// The closure will only be called with an empty slice if the inner reader /// returns without reading data into the buffer. This happens at EOF, or if /// `poll_read` is called with a zero-size buffer. pub fn new(reader: R, f: F) -> InspectReader<R, F> where R: AsyncRead, F: FnMut(&[u8]), { InspectReader { reader, f } } /// Consumes the `InspectReader`, returning the wrapped reader pub fn into_inner(self) -> R { self.reader } } impl<R: AsyncRead, F: FnMut(&[u8])> AsyncRead for InspectReader<R, F> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<Result<()>> { let me = self.project(); let filled_length = buf.filled().len(); ready!(me.reader.poll_read(cx, buf))?; (me.f)(&buf.filled()[filled_length..]); Poll::Ready(Ok(())) } } impl<R: AsyncWrite, F> AsyncWrite for InspectReader<R, F> { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<std::result::Result<usize, std::io::Error>> { self.project().reader.poll_write(cx, buf) } fn poll_flush( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<std::result::Result<(), std::io::Error>> { self.project().reader.poll_flush(cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<std::result::Result<(), std::io::Error>> { self.project().reader.poll_shutdown(cx) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<Result<usize>> { self.project().reader.poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { self.reader.is_write_vectored() } } pin_project! { /// An adapter that lets you inspect the data that's being written. /// /// This is useful for things like hashing data as it's written out. pub struct InspectWriter<W, F> { #[pin] writer: W, f: F, } } impl<W, F> InspectWriter<W, F> { /// Create a new `InspectWriter`, wrapping `write` and calling `f` for the /// data successfully written by each write call. /// /// The closure `f` will never be called with an empty slice. A vectored /// write can result in multiple calls to `f` - at most one call to `f` per /// buffer supplied to `poll_write_vectored`. pub fn new(writer: W, f: F) -> InspectWriter<W, F> where W: AsyncWrite, F: FnMut(&[u8]), { InspectWriter { writer, f } } /// Consumes the `InspectWriter`, returning the wrapped writer pub fn into_inner(self) -> W { self.writer } } impl<W: AsyncWrite, F: FnMut(&[u8])> AsyncWrite for InspectWriter<W, F> { fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> { let me = self.project(); let res = me.writer.poll_write(cx, buf); if let Poll::Ready(Ok(count)) = res { if count != 0 { (me.f)(&buf[..count]); } } res } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> { let me = self.project(); me.writer.poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> { let me = self.project(); me.writer.poll_shutdown(cx) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll<Result<usize>> { let me = self.project(); let res = me.writer.poll_write_vectored(cx, bufs); if let Poll::Ready(Ok(mut count)) = res { for buf in bufs { if count == 0 { break; } let size = count.min(buf.len()); if size != 0 { (me.f)(&buf[..size]); count -= size; } } } res } fn is_write_vectored(&self) -> bool { self.writer.is_write_vectored() } } impl<W: AsyncRead, F> AsyncRead for InspectWriter<W, F> { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<std::io::Result<()>> { self.project().writer.poll_read(cx, buf) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/future/with_cancellation_token.rs
tokio-util/src/future/with_cancellation_token.rs
use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; use pin_project_lite::pin_project; use crate::sync::{CancellationToken, RunUntilCancelledFuture, RunUntilCancelledFutureOwned}; pin_project! { /// A [`Future`] that is resolved once the corresponding [`CancellationToken`] /// is cancelled or a given [`Future`] gets resolved. /// /// This future is immediately resolved if the corresponding [`CancellationToken`] /// is already cancelled, otherwise, in case of concurrent completion and /// cancellation, this is biased towards the future completion. #[must_use = "futures do nothing unless polled"] pub struct WithCancellationTokenFuture<'a, F: Future> { #[pin] run_until_cancelled: Option<RunUntilCancelledFuture<'a, F>> } } impl<'a, F: Future> WithCancellationTokenFuture<'a, F> { pub(crate) fn new(cancellation_token: &'a CancellationToken, future: F) -> Self { Self { run_until_cancelled: (!cancellation_token.is_cancelled()) .then(|| RunUntilCancelledFuture::new(cancellation_token, future)), } } } impl<'a, F: Future> Future for WithCancellationTokenFuture<'a, F> { type Output = Option<F::Output>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); match this.run_until_cancelled.as_pin_mut() { Some(fut) => fut.poll(cx), None => Poll::Ready(None), } } } pin_project! { /// A [`Future`] that is resolved once the corresponding [`CancellationToken`] /// is cancelled or a given [`Future`] gets resolved. /// /// This future is immediately resolved if the corresponding [`CancellationToken`] /// is already cancelled, otherwise, in case of concurrent completion and /// cancellation, this is biased towards the future completion. #[must_use = "futures do nothing unless polled"] pub struct WithCancellationTokenFutureOwned<F: Future> { #[pin] run_until_cancelled: Option<RunUntilCancelledFutureOwned<F>> } } impl<F: Future> WithCancellationTokenFutureOwned<F> { pub(crate) fn new(cancellation_token: CancellationToken, future: F) -> Self { Self { run_until_cancelled: (!cancellation_token.is_cancelled()) .then(|| RunUntilCancelledFutureOwned::new(cancellation_token, future)), } } } impl<F: Future> Future for WithCancellationTokenFutureOwned<F> { type Output = Option<F::Output>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); match this.run_until_cancelled.as_pin_mut() { Some(fut) => fut.poll(cx), None => Poll::Ready(None), } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/sync/mpsc.rs
tokio-util/src/sync/mpsc.rs
use futures_sink::Sink; use std::pin::Pin; use std::task::{Context, Poll}; use std::{fmt, mem}; use tokio::sync::mpsc::OwnedPermit; use tokio::sync::mpsc::Sender; use super::ReusableBoxFuture; /// Error returned by the `PollSender` when the channel is closed. #[derive(Debug)] pub struct PollSendError<T>(Option<T>); impl<T> PollSendError<T> { /// Consumes the stored value, if any. /// /// If this error was encountered when calling `start_send`/`send_item`, this will be the item /// that the caller attempted to send. Otherwise, it will be `None`. pub fn into_inner(self) -> Option<T> { self.0 } } impl<T> fmt::Display for PollSendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "channel closed") } } impl<T: fmt::Debug> std::error::Error for PollSendError<T> {} #[derive(Debug)] enum State<T> { Idle(Sender<T>), Acquiring, ReadyToSend(OwnedPermit<T>), Closed, } /// A wrapper around [`mpsc::Sender`] that can be polled. /// /// [`mpsc::Sender`]: tokio::sync::mpsc::Sender #[derive(Debug)] pub struct PollSender<T> { sender: Option<Sender<T>>, state: State<T>, acquire: PollSenderFuture<T>, } // Creates a future for acquiring a permit from the underlying channel. This is used to ensure // there's capacity for a send to complete. // // By reusing the same async fn for both `Some` and `None`, we make sure every future passed to // ReusableBoxFuture has the same underlying type, and hence the same size and alignment. async fn make_acquire_future<T>( data: Option<Sender<T>>, ) -> Result<OwnedPermit<T>, PollSendError<T>> { match data { Some(sender) => sender .reserve_owned() .await .map_err(|_| PollSendError(None)), None => unreachable!("this future should not be pollable in this state"), } } type InnerFuture<'a, T> = ReusableBoxFuture<'a, Result<OwnedPermit<T>, PollSendError<T>>>; #[derive(Debug)] // TODO: This should be replace with a type_alias_impl_trait to eliminate `'static` and all the transmutes struct PollSenderFuture<T>(InnerFuture<'static, T>); impl<T> PollSenderFuture<T> { /// Create with an empty inner future with no `Send` bound. fn empty() -> Self { // We don't use `make_acquire_future` here because our relaxed bounds on `T` are not // compatible with the transitive bounds required by `Sender<T>`. Self(ReusableBoxFuture::new(async { unreachable!() })) } } impl<T: Send> PollSenderFuture<T> { /// Create with an empty inner future. fn new() -> Self { let v = InnerFuture::new(make_acquire_future(None)); // This is safe because `make_acquire_future(None)` is actually `'static` Self(unsafe { mem::transmute::<InnerFuture<'_, T>, InnerFuture<'static, T>>(v) }) } /// Poll the inner future. fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Result<OwnedPermit<T>, PollSendError<T>>> { self.0.poll(cx) } /// Replace the inner future. fn set(&mut self, sender: Option<Sender<T>>) { let inner: *mut InnerFuture<'static, T> = &mut self.0; let inner: *mut InnerFuture<'_, T> = inner.cast(); // SAFETY: The `make_acquire_future(sender)` future must not exist after the type `T` // becomes invalid, and this casts away the type-level lifetime check for that. However, the // inner future is never moved out of this `PollSenderFuture<T>`, so the future will not // live longer than the `PollSenderFuture<T>` lives. A `PollSenderFuture<T>` is guaranteed // to not exist after the type `T` becomes invalid, because it is annotated with a `T`, so // this is ok. let inner = unsafe { &mut *inner }; inner.set(make_acquire_future(sender)); } } impl<T: Send> PollSender<T> { /// Creates a new `PollSender`. pub fn new(sender: Sender<T>) -> Self { Self { sender: Some(sender.clone()), state: State::Idle(sender), acquire: PollSenderFuture::new(), } } fn take_state(&mut self) -> State<T> { mem::replace(&mut self.state, State::Closed) } /// Attempts to prepare the sender to receive a value. /// /// This method must be called and return `Poll::Ready(Ok(()))` prior to each call to /// `send_item`. /// /// This method returns `Poll::Ready` once the underlying channel is ready to receive a value, /// by reserving a slot in the channel for the item to be sent. If this method returns /// `Poll::Pending`, the current task is registered to be notified (via /// `cx.waker().wake_by_ref()`) when `poll_reserve` should be called again. /// /// # Errors /// /// If the channel is closed, an error will be returned. This is a permanent state. pub fn poll_reserve(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), PollSendError<T>>> { loop { let (result, next_state) = match self.take_state() { State::Idle(sender) => { // Start trying to acquire a permit to reserve a slot for our send, and // immediately loop back around to poll it the first time. self.acquire.set(Some(sender)); (None, State::Acquiring) } State::Acquiring => match self.acquire.poll(cx) { // Channel has capacity. Poll::Ready(Ok(permit)) => { (Some(Poll::Ready(Ok(()))), State::ReadyToSend(permit)) } // Channel is closed. Poll::Ready(Err(e)) => (Some(Poll::Ready(Err(e))), State::Closed), // Channel doesn't have capacity yet, so we need to wait. Poll::Pending => (Some(Poll::Pending), State::Acquiring), }, // We're closed, either by choice or because the underlying sender was closed. s @ State::Closed => (Some(Poll::Ready(Err(PollSendError(None)))), s), // We're already ready to send an item. s @ State::ReadyToSend(_) => (Some(Poll::Ready(Ok(()))), s), }; self.state = next_state; if let Some(result) = result { return result; } } } /// Sends an item to the channel. /// /// Before calling `send_item`, `poll_reserve` must be called with a successful return /// value of `Poll::Ready(Ok(()))`. /// /// # Errors /// /// If the channel is closed, an error will be returned. This is a permanent state. /// /// # Panics /// /// If `poll_reserve` was not successfully called prior to calling `send_item`, then this method /// will panic. #[track_caller] pub fn send_item(&mut self, value: T) -> Result<(), PollSendError<T>> { let (result, next_state) = match self.take_state() { State::Idle(_) | State::Acquiring => { panic!("`send_item` called without first calling `poll_reserve`") } // We have a permit to send our item, so go ahead, which gets us our sender back. State::ReadyToSend(permit) => (Ok(()), State::Idle(permit.send(value))), // We're closed, either by choice or because the underlying sender was closed. State::Closed => (Err(PollSendError(Some(value))), State::Closed), }; // Handle deferred closing if `close` was called between `poll_reserve` and `send_item`. self.state = if self.sender.is_some() { next_state } else { State::Closed }; result } /// Checks whether this sender is closed. /// /// The underlying channel that this sender was wrapping may still be open. pub fn is_closed(&self) -> bool { matches!(self.state, State::Closed) || self.sender.is_none() } /// Gets a reference to the `Sender` of the underlying channel. /// /// If `PollSender` has been closed, `None` is returned. The underlying channel that this sender /// was wrapping may still be open. pub fn get_ref(&self) -> Option<&Sender<T>> { self.sender.as_ref() } /// Closes this sender. /// /// No more messages will be able to be sent from this sender, but the underlying channel will /// remain open until all senders have dropped, or until the [`Receiver`] closes the channel. /// /// If a slot was previously reserved by calling `poll_reserve`, then a final call can be made /// to `send_item` in order to consume the reserved slot. After that, no further sends will be /// possible. If you do not intend to send another item, you can release the reserved slot back /// to the underlying sender by calling [`abort_send`]. /// /// [`abort_send`]: crate::sync::PollSender::abort_send /// [`Receiver`]: tokio::sync::mpsc::Receiver pub fn close(&mut self) { // Mark ourselves officially closed by dropping our main sender. self.sender = None; // If we're already idle, closed, or we haven't yet reserved a slot, we can quickly // transition to the closed state. Otherwise, leave the existing permit in place for the // caller if they want to complete the send. match self.state { State::Idle(_) => self.state = State::Closed, State::Acquiring => { self.acquire.set(None); self.state = State::Closed; } _ => {} } } /// Aborts the current in-progress send, if any. /// /// Returns `true` if a send was aborted. If the sender was closed prior to calling /// `abort_send`, then the sender will remain in the closed state, otherwise the sender will be /// ready to attempt another send. pub fn abort_send(&mut self) -> bool { // We may have been closed in the meantime, after a call to `poll_reserve` already // succeeded. We'll check if `self.sender` is `None` to see if we should transition to the // closed state when we actually abort a send, rather than resetting ourselves back to idle. let (result, next_state) = match self.take_state() { // We're currently trying to reserve a slot to send into. State::Acquiring => { // Replacing the future drops the in-flight one. self.acquire.set(None); // If we haven't closed yet, we have to clone our stored sender since we have no way // to get it back from the acquire future we just dropped. let state = match self.sender.clone() { Some(sender) => State::Idle(sender), None => State::Closed, }; (true, state) } // We got the permit. If we haven't closed yet, get the sender back. State::ReadyToSend(permit) => { let state = if self.sender.is_some() { State::Idle(permit.release()) } else { State::Closed }; (true, state) } s => (false, s), }; self.state = next_state; result } } impl<T> Clone for PollSender<T> { /// Clones this `PollSender`. /// /// The resulting `PollSender` will have an initial state identical to calling `PollSender::new`. fn clone(&self) -> PollSender<T> { let (sender, state) = match self.sender.clone() { Some(sender) => (Some(sender.clone()), State::Idle(sender)), None => (None, State::Closed), }; Self { sender, state, acquire: PollSenderFuture::empty(), } } } impl<T: Send> Sink<T> for PollSender<T> { type Error = PollSendError<T>; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Pin::into_inner(self).poll_reserve(cx) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { Pin::into_inner(self).send_item(item) } fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Pin::into_inner(self).close(); Poll::Ready(Ok(())) } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/sync/mod.rs
tokio-util/src/sync/mod.rs
//! Synchronization primitives mod cancellation_token; pub use cancellation_token::{ guard::DropGuard, guard_ref::DropGuardRef, CancellationToken, WaitForCancellationFuture, WaitForCancellationFutureOwned, }; pub(crate) use cancellation_token::{RunUntilCancelledFuture, RunUntilCancelledFutureOwned}; mod mpsc; pub use mpsc::{PollSendError, PollSender}; mod poll_semaphore; pub use poll_semaphore::PollSemaphore; mod reusable_box; pub use reusable_box::ReusableBoxFuture; #[cfg(test)] mod tests;
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/sync/cancellation_token.rs
tokio-util/src/sync/cancellation_token.rs
//! An asynchronously awaitable [`CancellationToken`]. //! The token allows to signal a cancellation request to one or more tasks. pub(crate) mod guard; pub(crate) mod guard_ref; mod tree_node; use crate::loom::sync::Arc; use crate::util::MaybeDangling; use core::future::Future; use core::pin::Pin; use core::task::{Context, Poll}; use guard::DropGuard; use guard_ref::DropGuardRef; use pin_project_lite::pin_project; /// A token which can be used to signal a cancellation request to one or more /// tasks. /// /// Tasks can call [`CancellationToken::cancelled()`] in order to /// obtain a Future which will be resolved when cancellation is requested. /// /// Cancellation can be requested through the [`CancellationToken::cancel`] method. /// /// # Examples /// /// ```no_run /// use tokio::select; /// use tokio_util::sync::CancellationToken; /// /// #[tokio::main] /// async fn main() { /// let token = CancellationToken::new(); /// let cloned_token = token.clone(); /// /// let join_handle = tokio::spawn(async move { /// // Wait for either cancellation or a very long time /// select! { /// _ = cloned_token.cancelled() => { /// // The token was cancelled /// 5 /// } /// _ = tokio::time::sleep(std::time::Duration::from_secs(9999)) => { /// 99 /// } /// } /// }); /// /// tokio::spawn(async move { /// tokio::time::sleep(std::time::Duration::from_millis(10)).await; /// token.cancel(); /// }); /// /// assert_eq!(5, join_handle.await.unwrap()); /// } /// ``` pub struct CancellationToken { inner: Arc<tree_node::TreeNode>, } impl std::panic::UnwindSafe for CancellationToken {} impl std::panic::RefUnwindSafe for CancellationToken {} pin_project! { /// A Future that is resolved once the corresponding [`CancellationToken`] /// is cancelled. #[must_use = "futures do nothing unless polled"] pub struct WaitForCancellationFuture<'a> { cancellation_token: &'a CancellationToken, #[pin] future: tokio::sync::futures::Notified<'a>, } } pin_project! { /// A Future that is resolved once the corresponding [`CancellationToken`] /// is cancelled. /// /// This is the counterpart to [`WaitForCancellationFuture`] that takes /// [`CancellationToken`] by value instead of using a reference. #[must_use = "futures do nothing unless polled"] pub struct WaitForCancellationFutureOwned { // This field internally has a reference to the cancellation token, but camouflages // the relationship with `'static`. To avoid Undefined Behavior, we must ensure // that the reference is only used while the cancellation token is still alive. To // do that, we ensure that the future is the first field, so that it is dropped // before the cancellation token. // // We use `MaybeDanglingFuture` here because without it, the compiler could assert // the reference inside `future` to be valid even after the destructor of that // field runs. (Specifically, when the `WaitForCancellationFutureOwned` is passed // as an argument to a function, the reference can be asserted to be valid for the // rest of that function.) To avoid that, we use `MaybeDangling` which tells the // compiler that the reference stored inside it might not be valid. // // See <https://users.rust-lang.org/t/unsafe-code-review-semi-owning-weak-rwlock-t-guard/95706> // for more info. #[pin] future: MaybeDangling<tokio::sync::futures::Notified<'static>>, cancellation_token: CancellationToken, } } // ===== impl CancellationToken ===== impl core::fmt::Debug for CancellationToken { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("CancellationToken") .field("is_cancelled", &self.is_cancelled()) .finish() } } impl Clone for CancellationToken { /// Creates a clone of the [`CancellationToken`] which will get cancelled /// whenever the current token gets cancelled, and vice versa. fn clone(&self) -> Self { tree_node::increase_handle_refcount(&self.inner); CancellationToken { inner: self.inner.clone(), } } } impl Drop for CancellationToken { fn drop(&mut self) { tree_node::decrease_handle_refcount(&self.inner); } } impl Default for CancellationToken { fn default() -> CancellationToken { CancellationToken::new() } } impl CancellationToken { /// Creates a new [`CancellationToken`] in the non-cancelled state. pub fn new() -> CancellationToken { CancellationToken { inner: Arc::new(tree_node::TreeNode::new()), } } /// Creates a [`CancellationToken`] which will get cancelled whenever the /// current token gets cancelled. Unlike a cloned [`CancellationToken`], /// cancelling a child token does not cancel the parent token. /// /// If the current token is already cancelled, the child token will get /// returned in cancelled state. /// /// # Examples /// /// ```no_run /// use tokio::select; /// use tokio_util::sync::CancellationToken; /// /// #[tokio::main] /// async fn main() { /// let token = CancellationToken::new(); /// let child_token = token.child_token(); /// /// let join_handle = tokio::spawn(async move { /// // Wait for either cancellation or a very long time /// select! { /// _ = child_token.cancelled() => { /// // The token was cancelled /// 5 /// } /// _ = tokio::time::sleep(std::time::Duration::from_secs(9999)) => { /// 99 /// } /// } /// }); /// /// tokio::spawn(async move { /// tokio::time::sleep(std::time::Duration::from_millis(10)).await; /// token.cancel(); /// }); /// /// assert_eq!(5, join_handle.await.unwrap()); /// } /// ``` pub fn child_token(&self) -> CancellationToken { CancellationToken { inner: tree_node::child_node(&self.inner), } } /// Cancel the [`CancellationToken`] and all child tokens which had been /// derived from it. /// /// This will wake up all tasks which are waiting for cancellation. /// /// Be aware that cancellation is not an atomic operation. It is possible /// for another thread running in parallel with a call to `cancel` to first /// receive `true` from `is_cancelled` on one child node, and then receive /// `false` from `is_cancelled` on another child node. However, once the /// call to `cancel` returns, all child nodes have been fully cancelled. pub fn cancel(&self) { tree_node::cancel(&self.inner); } /// Returns `true` if the `CancellationToken` is cancelled. pub fn is_cancelled(&self) -> bool { tree_node::is_cancelled(&self.inner) } /// Returns a [`Future`] that gets fulfilled when cancellation is requested. /// /// Equivalent to: /// /// ```ignore /// async fn cancelled(&self); /// ``` /// /// The future will complete immediately if the token is already cancelled /// when this method is called. /// /// # Cancellation safety /// /// This method is cancel safe. pub fn cancelled(&self) -> WaitForCancellationFuture<'_> { WaitForCancellationFuture { cancellation_token: self, future: self.inner.notified(), } } /// Returns a [`Future`] that gets fulfilled when cancellation is requested. /// /// Equivalent to: /// /// ```ignore /// async fn cancelled_owned(self); /// ``` /// /// The future will complete immediately if the token is already cancelled /// when this method is called. /// /// The function takes self by value and returns a future that owns the /// token. /// /// # Cancellation safety /// /// This method is cancel safe. pub fn cancelled_owned(self) -> WaitForCancellationFutureOwned { WaitForCancellationFutureOwned::new(self) } /// Creates a [`DropGuard`] for this token. /// /// Returned guard will cancel this token (and all its children) on drop /// unless disarmed. pub fn drop_guard(self) -> DropGuard { DropGuard { inner: Some(self) } } /// Creates a [`DropGuardRef`] for this token. /// /// Returned guard will cancel this token (and all its children) on drop /// unless disarmed. pub fn drop_guard_ref(&self) -> DropGuardRef<'_> { DropGuardRef { inner: Some(self) } } /// Runs a future to completion and returns its result wrapped inside of an `Option` /// unless the [`CancellationToken`] is cancelled. In that case the function returns /// `None` and the future gets dropped. /// /// # Fairness /// /// Calling this on an already-cancelled token directly returns `None`. /// For all subsequent polls, in case of concurrent completion and /// cancellation, this is biased towards the future completion. /// /// # Cancellation safety /// /// This method is only cancel safe if `fut` is cancel safe. pub async fn run_until_cancelled<F>(&self, fut: F) -> Option<F::Output> where F: Future, { if self.is_cancelled() { None } else { RunUntilCancelledFuture { cancellation: self.cancelled(), future: fut, } .await } } /// Runs a future to completion and returns its result wrapped inside of an `Option` /// unless the [`CancellationToken`] is cancelled. In that case the function returns /// `None` and the future gets dropped. /// /// The function takes self by value and returns a future that owns the token. /// /// # Fairness /// /// Calling this on an already-cancelled token directly returns `None`. /// For all subsequent polls, in case of concurrent completion and /// cancellation, this is biased towards the future completion. /// /// # Cancellation safety /// /// This method is only cancel safe if `fut` is cancel safe. pub async fn run_until_cancelled_owned<F>(self, fut: F) -> Option<F::Output> where F: Future, { self.run_until_cancelled(fut).await } } // ===== impl WaitForCancellationFuture ===== impl<'a> core::fmt::Debug for WaitForCancellationFuture<'a> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("WaitForCancellationFuture").finish() } } impl<'a> Future for WaitForCancellationFuture<'a> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let mut this = self.project(); loop { if this.cancellation_token.is_cancelled() { return Poll::Ready(()); } // No wakeups can be lost here because there is always a call to // `is_cancelled` between the creation of the future and the call to // `poll`, and the code that sets the cancelled flag does so before // waking the `Notified`. if this.future.as_mut().poll(cx).is_pending() { return Poll::Pending; } this.future.set(this.cancellation_token.inner.notified()); } } } // ===== impl WaitForCancellationFutureOwned ===== impl core::fmt::Debug for WaitForCancellationFutureOwned { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("WaitForCancellationFutureOwned").finish() } } impl WaitForCancellationFutureOwned { fn new(cancellation_token: CancellationToken) -> Self { WaitForCancellationFutureOwned { // cancellation_token holds a heap allocation and is guaranteed to have a // stable deref, thus it would be ok to move the cancellation_token while // the future holds a reference to it. // // # Safety // // cancellation_token is dropped after future due to the field ordering. future: MaybeDangling::new(unsafe { Self::new_future(&cancellation_token) }), cancellation_token, } } /// # Safety /// The returned future must be destroyed before the cancellation token is /// destroyed. unsafe fn new_future( cancellation_token: &CancellationToken, ) -> tokio::sync::futures::Notified<'static> { let inner_ptr = Arc::as_ptr(&cancellation_token.inner); // SAFETY: The `Arc::as_ptr` method guarantees that `inner_ptr` remains // valid until the strong count of the Arc drops to zero, and the caller // guarantees that they will drop the future before that happens. (*inner_ptr).notified() } } impl Future for WaitForCancellationFutureOwned { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let mut this = self.project(); loop { if this.cancellation_token.is_cancelled() { return Poll::Ready(()); } // No wakeups can be lost here because there is always a call to // `is_cancelled` between the creation of the future and the call to // `poll`, and the code that sets the cancelled flag does so before // waking the `Notified`. if this.future.as_mut().poll(cx).is_pending() { return Poll::Pending; } // # Safety // // cancellation_token is dropped after future due to the field ordering. this.future.set(MaybeDangling::new(unsafe { Self::new_future(this.cancellation_token) })); } } } pin_project! { /// A Future that is resolved once the corresponding [`CancellationToken`] /// is cancelled or a given Future gets resolved. It is biased towards the /// Future completion. #[must_use = "futures do nothing unless polled"] pub(crate) struct RunUntilCancelledFuture<'a, F: Future> { #[pin] cancellation: WaitForCancellationFuture<'a>, #[pin] future: F, } } impl<'a, F: Future> RunUntilCancelledFuture<'a, F> { pub(crate) fn new(cancellation_token: &'a CancellationToken, future: F) -> Self { Self { cancellation: cancellation_token.cancelled(), future, } } } impl<'a, F: Future> Future for RunUntilCancelledFuture<'a, F> { type Output = Option<F::Output>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); if let Poll::Ready(res) = this.future.poll(cx) { Poll::Ready(Some(res)) } else if this.cancellation.poll(cx).is_ready() { Poll::Ready(None) } else { Poll::Pending } } } pin_project! { /// A Future that is resolved once the corresponding [`CancellationToken`] /// is cancelled or a given Future gets resolved. It is biased towards the /// Future completion. #[must_use = "futures do nothing unless polled"] pub(crate) struct RunUntilCancelledFutureOwned<F: Future> { #[pin] cancellation: WaitForCancellationFutureOwned, #[pin] future: F, } } impl<F: Future> Future for RunUntilCancelledFutureOwned<F> { type Output = Option<F::Output>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); if let Poll::Ready(res) = this.future.poll(cx) { Poll::Ready(Some(res)) } else if this.cancellation.poll(cx).is_ready() { Poll::Ready(None) } else { Poll::Pending } } } impl<F: Future> RunUntilCancelledFutureOwned<F> { pub(crate) fn new(cancellation_token: CancellationToken, future: F) -> Self { Self { cancellation: cancellation_token.cancelled_owned(), future, } } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false
tokio-rs/tokio
https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio-util/src/sync/poll_semaphore.rs
tokio-util/src/sync/poll_semaphore.rs
use futures_core::Stream; use std::fmt; use std::pin::Pin; use std::sync::Arc; use std::task::{ready, Context, Poll}; use tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore, TryAcquireError}; use super::ReusableBoxFuture; /// A wrapper around [`Semaphore`] that provides a `poll_acquire` method. /// /// [`Semaphore`]: tokio::sync::Semaphore pub struct PollSemaphore { semaphore: Arc<Semaphore>, permit_fut: Option<( u32, // The number of permits requested. ReusableBoxFuture<'static, Result<OwnedSemaphorePermit, AcquireError>>, )>, } impl PollSemaphore { /// Create a new `PollSemaphore`. pub fn new(semaphore: Arc<Semaphore>) -> Self { Self { semaphore, permit_fut: None, } } /// Closes the semaphore. pub fn close(&self) { self.semaphore.close(); } /// Obtain a clone of the inner semaphore. pub fn clone_inner(&self) -> Arc<Semaphore> { self.semaphore.clone() } /// Get back the inner semaphore. pub fn into_inner(self) -> Arc<Semaphore> { self.semaphore } /// Poll to acquire a permit from the semaphore. /// /// This can return the following values: /// /// - `Poll::Pending` if a permit is not currently available. /// - `Poll::Ready(Some(permit))` if a permit was acquired. /// - `Poll::Ready(None)` if the semaphore has been closed. /// /// When this method returns `Poll::Pending`, the current task is scheduled /// to receive a wakeup when a permit becomes available, or when the /// semaphore is closed. Note that on multiple calls to `poll_acquire`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. pub fn poll_acquire(&mut self, cx: &mut Context<'_>) -> Poll<Option<OwnedSemaphorePermit>> { self.poll_acquire_many(cx, 1) } /// Poll to acquire many permits from the semaphore. /// /// This can return the following values: /// /// - `Poll::Pending` if a permit is not currently available. /// - `Poll::Ready(Some(permit))` if a permit was acquired. /// - `Poll::Ready(None)` if the semaphore has been closed. /// /// When this method returns `Poll::Pending`, the current task is scheduled /// to receive a wakeup when the permits become available, or when the /// semaphore is closed. Note that on multiple calls to `poll_acquire`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. pub fn poll_acquire_many( &mut self, cx: &mut Context<'_>, permits: u32, ) -> Poll<Option<OwnedSemaphorePermit>> { let permit_future = match self.permit_fut.as_mut() { Some((prev_permits, fut)) if *prev_permits == permits => fut, Some((old_permits, fut_box)) => { // We're requesting a different number of permits, so replace the future // and record the new amount. let fut = Arc::clone(&self.semaphore).acquire_many_owned(permits); fut_box.set(fut); *old_permits = permits; fut_box } None => { // avoid allocations completely if we can grab a permit immediately match Arc::clone(&self.semaphore).try_acquire_many_owned(permits) { Ok(permit) => return Poll::Ready(Some(permit)), Err(TryAcquireError::Closed) => return Poll::Ready(None), Err(TryAcquireError::NoPermits) => {} } let next_fut = Arc::clone(&self.semaphore).acquire_many_owned(permits); &mut self .permit_fut .get_or_insert((permits, ReusableBoxFuture::new(next_fut))) .1 } }; let result = ready!(permit_future.poll(cx)); // Assume we'll request the same amount of permits in a subsequent call. let next_fut = Arc::clone(&self.semaphore).acquire_many_owned(permits); permit_future.set(next_fut); match result { Ok(permit) => Poll::Ready(Some(permit)), Err(_closed) => { self.permit_fut = None; Poll::Ready(None) } } } /// Returns the current number of available permits. /// /// This is equivalent to the [`Semaphore::available_permits`] method on the /// `tokio::sync::Semaphore` type. /// /// [`Semaphore::available_permits`]: tokio::sync::Semaphore::available_permits pub fn available_permits(&self) -> usize { self.semaphore.available_permits() } /// Adds `n` new permits to the semaphore. /// /// The maximum number of permits is [`Semaphore::MAX_PERMITS`], and this function /// will panic if the limit is exceeded. /// /// This is equivalent to the [`Semaphore::add_permits`] method on the /// `tokio::sync::Semaphore` type. /// /// [`Semaphore::add_permits`]: tokio::sync::Semaphore::add_permits pub fn add_permits(&self, n: usize) { self.semaphore.add_permits(n); } } impl Stream for PollSemaphore { type Item = OwnedSemaphorePermit; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<OwnedSemaphorePermit>> { Pin::into_inner(self).poll_acquire(cx) } } impl Clone for PollSemaphore { fn clone(&self) -> PollSemaphore { PollSemaphore::new(self.clone_inner()) } } impl fmt::Debug for PollSemaphore { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollSemaphore") .field("semaphore", &self.semaphore) .finish() } } impl AsRef<Semaphore> for PollSemaphore { fn as_ref(&self) -> &Semaphore { &self.semaphore } }
rust
MIT
41d1877689f8669902b003a6affce60bdfeb3025
2026-01-04T15:33:40.250594Z
false